Merge branches 'release' and 'hp-cid' into release
[deliverable/linux.git] / drivers / net / wireless / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34 #include <linux/version.h>
35
36
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
42
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
48
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
54
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
60
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
66
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
72
73 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
77
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
84
85 static int cmdlog = 0;
86 static int debug = 0;
87 static int channel = 0;
88 static int mode = 0;
89
90 static u32 ipw_debug_level;
91 static int associate = 1;
92 static int auto_create = 1;
93 static int led = 0;
94 static int disable = 0;
95 static int bt_coexist = 0;
96 static int hwcrypto = 0;
97 static int roaming = 1;
98 static const char ipw_modes[] = {
99 'a', 'b', 'g', '?'
100 };
101 static int antenna = CFG_SYS_ANTENNA_BOTH;
102
103 #ifdef CONFIG_IPW2200_PROMISCUOUS
104 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
105 #endif
106
107
108 #ifdef CONFIG_IPW2200_QOS
109 static int qos_enable = 0;
110 static int qos_burst_enable = 0;
111 static int qos_no_ack_mask = 0;
112 static int burst_duration_CCK = 0;
113 static int burst_duration_OFDM = 0;
114
115 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
116 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
117 QOS_TX3_CW_MIN_OFDM},
118 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
119 QOS_TX3_CW_MAX_OFDM},
120 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
121 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
122 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
123 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
124 };
125
126 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
127 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
128 QOS_TX3_CW_MIN_CCK},
129 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
130 QOS_TX3_CW_MAX_CCK},
131 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
132 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
133 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
134 QOS_TX3_TXOP_LIMIT_CCK}
135 };
136
137 static struct ieee80211_qos_parameters def_parameters_OFDM = {
138 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
139 DEF_TX3_CW_MIN_OFDM},
140 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
141 DEF_TX3_CW_MAX_OFDM},
142 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
143 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
144 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
145 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
146 };
147
148 static struct ieee80211_qos_parameters def_parameters_CCK = {
149 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
150 DEF_TX3_CW_MIN_CCK},
151 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
152 DEF_TX3_CW_MAX_CCK},
153 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
154 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
155 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
156 DEF_TX3_TXOP_LIMIT_CCK}
157 };
158
159 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
160
161 static int from_priority_to_tx_queue[] = {
162 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
163 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
164 };
165
166 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
167
168 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
169 *qos_param);
170 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
171 *qos_param);
172 #endif /* CONFIG_IPW2200_QOS */
173
174 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
175 static void ipw_remove_current_network(struct ipw_priv *priv);
176 static void ipw_rx(struct ipw_priv *priv);
177 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
178 struct clx2_tx_queue *txq, int qindex);
179 static int ipw_queue_reset(struct ipw_priv *priv);
180
181 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
182 int len, int sync);
183
184 static void ipw_tx_queue_free(struct ipw_priv *);
185
186 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
187 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
188 static void ipw_rx_queue_replenish(void *);
189 static int ipw_up(struct ipw_priv *);
190 static void ipw_bg_up(struct work_struct *work);
191 static void ipw_down(struct ipw_priv *);
192 static void ipw_bg_down(struct work_struct *work);
193 static int ipw_config(struct ipw_priv *);
194 static int init_supported_rates(struct ipw_priv *priv,
195 struct ipw_supported_rates *prates);
196 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
197 static void ipw_send_wep_keys(struct ipw_priv *, int);
198
199 static int snprint_line(char *buf, size_t count,
200 const u8 * data, u32 len, u32 ofs)
201 {
202 int out, i, j, l;
203 char c;
204
205 out = snprintf(buf, count, "%08X", ofs);
206
207 for (l = 0, i = 0; i < 2; i++) {
208 out += snprintf(buf + out, count - out, " ");
209 for (j = 0; j < 8 && l < len; j++, l++)
210 out += snprintf(buf + out, count - out, "%02X ",
211 data[(i * 8 + j)]);
212 for (; j < 8; j++)
213 out += snprintf(buf + out, count - out, " ");
214 }
215
216 out += snprintf(buf + out, count - out, " ");
217 for (l = 0, i = 0; i < 2; i++) {
218 out += snprintf(buf + out, count - out, " ");
219 for (j = 0; j < 8 && l < len; j++, l++) {
220 c = data[(i * 8 + j)];
221 if (!isascii(c) || !isprint(c))
222 c = '.';
223
224 out += snprintf(buf + out, count - out, "%c", c);
225 }
226
227 for (; j < 8; j++)
228 out += snprintf(buf + out, count - out, " ");
229 }
230
231 return out;
232 }
233
234 static void printk_buf(int level, const u8 * data, u32 len)
235 {
236 char line[81];
237 u32 ofs = 0;
238 if (!(ipw_debug_level & level))
239 return;
240
241 while (len) {
242 snprint_line(line, sizeof(line), &data[ofs],
243 min(len, 16U), ofs);
244 printk(KERN_DEBUG "%s\n", line);
245 ofs += 16;
246 len -= min(len, 16U);
247 }
248 }
249
250 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
251 {
252 size_t out = size;
253 u32 ofs = 0;
254 int total = 0;
255
256 while (size && len) {
257 out = snprint_line(output, size, &data[ofs],
258 min_t(size_t, len, 16U), ofs);
259
260 ofs += 16;
261 output += out;
262 size -= out;
263 len -= min_t(size_t, len, 16U);
264 total += out;
265 }
266 return total;
267 }
268
269 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
270 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
271 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
272
273 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
274 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
275 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
276
277 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
278 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
279 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
280 {
281 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
282 __LINE__, (u32) (b), (u32) (c));
283 _ipw_write_reg8(a, b, c);
284 }
285
286 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
287 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
288 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
289 {
290 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
291 __LINE__, (u32) (b), (u32) (c));
292 _ipw_write_reg16(a, b, c);
293 }
294
295 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
296 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
297 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
298 {
299 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
300 __LINE__, (u32) (b), (u32) (c));
301 _ipw_write_reg32(a, b, c);
302 }
303
304 /* 8-bit direct write (low 4K) */
305 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
306
307 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
308 #define ipw_write8(ipw, ofs, val) \
309 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
310 _ipw_write8(ipw, ofs, val)
311
312 /* 16-bit direct write (low 4K) */
313 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
314
315 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316 #define ipw_write16(ipw, ofs, val) \
317 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318 _ipw_write16(ipw, ofs, val)
319
320 /* 32-bit direct write (low 4K) */
321 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
322
323 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324 #define ipw_write32(ipw, ofs, val) \
325 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326 _ipw_write32(ipw, ofs, val)
327
328 /* 8-bit direct read (low 4K) */
329 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
330
331 /* 8-bit direct read (low 4K), with debug wrapper */
332 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
333 {
334 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335 return _ipw_read8(ipw, ofs);
336 }
337
338 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
340
341 /* 16-bit direct read (low 4K) */
342 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
343
344 /* 16-bit direct read (low 4K), with debug wrapper */
345 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
346 {
347 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348 return _ipw_read16(ipw, ofs);
349 }
350
351 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
353
354 /* 32-bit direct read (low 4K) */
355 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
356
357 /* 32-bit direct read (low 4K), with debug wrapper */
358 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
359 {
360 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361 return _ipw_read32(ipw, ofs);
362 }
363
364 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
366
367 /* multi-byte read (above 4K), with debug wrapper */
368 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
369 static inline void __ipw_read_indirect(const char *f, int l,
370 struct ipw_priv *a, u32 b, u8 * c, int d)
371 {
372 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
373 d);
374 _ipw_read_indirect(a, b, c, d);
375 }
376
377 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
379
380 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
382 int num);
383 #define ipw_write_indirect(a, b, c, d) \
384 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385 _ipw_write_indirect(a, b, c, d)
386
387 /* 32-bit indirect write (above 4K) */
388 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
389 {
390 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
393 }
394
395 /* 8-bit indirect write (above 4K) */
396 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
397 {
398 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
399 u32 dif_len = reg - aligned_addr;
400
401 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
404 }
405
406 /* 16-bit indirect write (above 4K) */
407 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
408 {
409 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
410 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
411
412 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
415 }
416
417 /* 8-bit indirect read (above 4K) */
418 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
419 {
420 u32 word;
421 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424 return (word >> ((reg & 0x3) * 8)) & 0xff;
425 }
426
427 /* 32-bit indirect read (above 4K) */
428 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
429 {
430 u32 value;
431
432 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
433
434 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
437 return value;
438 }
439
440 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
441 /* for area above 1st 4K of SRAM/reg space */
442 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443 int num)
444 {
445 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = addr - aligned_addr;
447 u32 i;
448
449 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
450
451 if (num <= 0) {
452 return;
453 }
454
455 /* Read the first dword (or portion) byte by byte */
456 if (unlikely(dif_len)) {
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 /* Start reading at aligned_addr + dif_len */
459 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
461 aligned_addr += 4;
462 }
463
464 /* Read all of the middle dwords as dwords, with auto-increment */
465 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
468
469 /* Read the last dword (or portion) byte by byte */
470 if (unlikely(num)) {
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 for (i = 0; num > 0; i++, num--)
473 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
474 }
475 }
476
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /* for area above 1st 4K of SRAM/reg space */
479 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
480 int num)
481 {
482 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
483 u32 dif_len = addr - aligned_addr;
484 u32 i;
485
486 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
487
488 if (num <= 0) {
489 return;
490 }
491
492 /* Write the first dword (or portion) byte by byte */
493 if (unlikely(dif_len)) {
494 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495 /* Start writing at aligned_addr + dif_len */
496 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
498 aligned_addr += 4;
499 }
500
501 /* Write all of the middle dwords as dwords, with auto-increment */
502 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
505
506 /* Write the last dword (or portion) byte by byte */
507 if (unlikely(num)) {
508 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509 for (i = 0; num > 0; i++, num--, buf++)
510 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
511 }
512 }
513
514 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
515 /* for 1st 4K of SRAM/regs space */
516 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
517 int num)
518 {
519 memcpy_toio((priv->hw_base + addr), buf, num);
520 }
521
522 /* Set bit(s) in low 4K of SRAM/regs */
523 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
524 {
525 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
526 }
527
528 /* Clear bit(s) in low 4K of SRAM/regs */
529 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
530 {
531 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
532 }
533
534 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
535 {
536 if (priv->status & STATUS_INT_ENABLED)
537 return;
538 priv->status |= STATUS_INT_ENABLED;
539 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
540 }
541
542 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
543 {
544 if (!(priv->status & STATUS_INT_ENABLED))
545 return;
546 priv->status &= ~STATUS_INT_ENABLED;
547 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
548 }
549
550 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
551 {
552 unsigned long flags;
553
554 spin_lock_irqsave(&priv->irq_lock, flags);
555 __ipw_enable_interrupts(priv);
556 spin_unlock_irqrestore(&priv->irq_lock, flags);
557 }
558
559 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
560 {
561 unsigned long flags;
562
563 spin_lock_irqsave(&priv->irq_lock, flags);
564 __ipw_disable_interrupts(priv);
565 spin_unlock_irqrestore(&priv->irq_lock, flags);
566 }
567
568 static char *ipw_error_desc(u32 val)
569 {
570 switch (val) {
571 case IPW_FW_ERROR_OK:
572 return "ERROR_OK";
573 case IPW_FW_ERROR_FAIL:
574 return "ERROR_FAIL";
575 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576 return "MEMORY_UNDERFLOW";
577 case IPW_FW_ERROR_MEMORY_OVERFLOW:
578 return "MEMORY_OVERFLOW";
579 case IPW_FW_ERROR_BAD_PARAM:
580 return "BAD_PARAM";
581 case IPW_FW_ERROR_BAD_CHECKSUM:
582 return "BAD_CHECKSUM";
583 case IPW_FW_ERROR_NMI_INTERRUPT:
584 return "NMI_INTERRUPT";
585 case IPW_FW_ERROR_BAD_DATABASE:
586 return "BAD_DATABASE";
587 case IPW_FW_ERROR_ALLOC_FAIL:
588 return "ALLOC_FAIL";
589 case IPW_FW_ERROR_DMA_UNDERRUN:
590 return "DMA_UNDERRUN";
591 case IPW_FW_ERROR_DMA_STATUS:
592 return "DMA_STATUS";
593 case IPW_FW_ERROR_DINO_ERROR:
594 return "DINO_ERROR";
595 case IPW_FW_ERROR_EEPROM_ERROR:
596 return "EEPROM_ERROR";
597 case IPW_FW_ERROR_SYSASSERT:
598 return "SYSASSERT";
599 case IPW_FW_ERROR_FATAL_ERROR:
600 return "FATAL_ERROR";
601 default:
602 return "UNKNOWN_ERROR";
603 }
604 }
605
606 static void ipw_dump_error_log(struct ipw_priv *priv,
607 struct ipw_fw_error *error)
608 {
609 u32 i;
610
611 if (!error) {
612 IPW_ERROR("Error allocating and capturing error log. "
613 "Nothing to dump.\n");
614 return;
615 }
616
617 IPW_ERROR("Start IPW Error Log Dump:\n");
618 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619 error->status, error->config);
620
621 for (i = 0; i < error->elem_len; i++)
622 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
623 ipw_error_desc(error->elem[i].desc),
624 error->elem[i].time,
625 error->elem[i].blink1,
626 error->elem[i].blink2,
627 error->elem[i].link1,
628 error->elem[i].link2, error->elem[i].data);
629 for (i = 0; i < error->log_len; i++)
630 IPW_ERROR("%i\t0x%08x\t%i\n",
631 error->log[i].time,
632 error->log[i].data, error->log[i].event);
633 }
634
635 static inline int ipw_is_init(struct ipw_priv *priv)
636 {
637 return (priv->status & STATUS_INIT) ? 1 : 0;
638 }
639
640 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
641 {
642 u32 addr, field_info, field_len, field_count, total_len;
643
644 IPW_DEBUG_ORD("ordinal = %i\n", ord);
645
646 if (!priv || !val || !len) {
647 IPW_DEBUG_ORD("Invalid argument\n");
648 return -EINVAL;
649 }
650
651 /* verify device ordinal tables have been initialized */
652 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653 IPW_DEBUG_ORD("Access ordinals before initialization\n");
654 return -EINVAL;
655 }
656
657 switch (IPW_ORD_TABLE_ID_MASK & ord) {
658 case IPW_ORD_TABLE_0_MASK:
659 /*
660 * TABLE 0: Direct access to a table of 32 bit values
661 *
662 * This is a very simple table with the data directly
663 * read from the table
664 */
665
666 /* remove the table id from the ordinal */
667 ord &= IPW_ORD_TABLE_VALUE_MASK;
668
669 /* boundary check */
670 if (ord > priv->table0_len) {
671 IPW_DEBUG_ORD("ordinal value (%i) longer then "
672 "max (%i)\n", ord, priv->table0_len);
673 return -EINVAL;
674 }
675
676 /* verify we have enough room to store the value */
677 if (*len < sizeof(u32)) {
678 IPW_DEBUG_ORD("ordinal buffer length too small, "
679 "need %zd\n", sizeof(u32));
680 return -EINVAL;
681 }
682
683 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684 ord, priv->table0_addr + (ord << 2));
685
686 *len = sizeof(u32);
687 ord <<= 2;
688 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
689 break;
690
691 case IPW_ORD_TABLE_1_MASK:
692 /*
693 * TABLE 1: Indirect access to a table of 32 bit values
694 *
695 * This is a fairly large table of u32 values each
696 * representing starting addr for the data (which is
697 * also a u32)
698 */
699
700 /* remove the table id from the ordinal */
701 ord &= IPW_ORD_TABLE_VALUE_MASK;
702
703 /* boundary check */
704 if (ord > priv->table1_len) {
705 IPW_DEBUG_ORD("ordinal value too long\n");
706 return -EINVAL;
707 }
708
709 /* verify we have enough room to store the value */
710 if (*len < sizeof(u32)) {
711 IPW_DEBUG_ORD("ordinal buffer length too small, "
712 "need %zd\n", sizeof(u32));
713 return -EINVAL;
714 }
715
716 *((u32 *) val) =
717 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
718 *len = sizeof(u32);
719 break;
720
721 case IPW_ORD_TABLE_2_MASK:
722 /*
723 * TABLE 2: Indirect access to a table of variable sized values
724 *
725 * This table consist of six values, each containing
726 * - dword containing the starting offset of the data
727 * - dword containing the lengh in the first 16bits
728 * and the count in the second 16bits
729 */
730
731 /* remove the table id from the ordinal */
732 ord &= IPW_ORD_TABLE_VALUE_MASK;
733
734 /* boundary check */
735 if (ord > priv->table2_len) {
736 IPW_DEBUG_ORD("ordinal value too long\n");
737 return -EINVAL;
738 }
739
740 /* get the address of statistic */
741 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
742
743 /* get the second DW of statistics ;
744 * two 16-bit words - first is length, second is count */
745 field_info =
746 ipw_read_reg32(priv,
747 priv->table2_addr + (ord << 3) +
748 sizeof(u32));
749
750 /* get each entry length */
751 field_len = *((u16 *) & field_info);
752
753 /* get number of entries */
754 field_count = *(((u16 *) & field_info) + 1);
755
756 /* abort if not enought memory */
757 total_len = field_len * field_count;
758 if (total_len > *len) {
759 *len = total_len;
760 return -EINVAL;
761 }
762
763 *len = total_len;
764 if (!total_len)
765 return 0;
766
767 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768 "field_info = 0x%08x\n",
769 addr, total_len, field_info);
770 ipw_read_indirect(priv, addr, val, total_len);
771 break;
772
773 default:
774 IPW_DEBUG_ORD("Invalid ordinal!\n");
775 return -EINVAL;
776
777 }
778
779 return 0;
780 }
781
782 static void ipw_init_ordinals(struct ipw_priv *priv)
783 {
784 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785 priv->table0_len = ipw_read32(priv, priv->table0_addr);
786
787 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788 priv->table0_addr, priv->table0_len);
789
790 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
792
793 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794 priv->table1_addr, priv->table1_len);
795
796 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798 priv->table2_len &= 0x0000ffff; /* use first two bytes */
799
800 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801 priv->table2_addr, priv->table2_len);
802
803 }
804
805 static u32 ipw_register_toggle(u32 reg)
806 {
807 reg &= ~IPW_START_STANDBY;
808 if (reg & IPW_GATE_ODMA)
809 reg &= ~IPW_GATE_ODMA;
810 if (reg & IPW_GATE_IDMA)
811 reg &= ~IPW_GATE_IDMA;
812 if (reg & IPW_GATE_ADMA)
813 reg &= ~IPW_GATE_ADMA;
814 return reg;
815 }
816
817 /*
818 * LED behavior:
819 * - On radio ON, turn on any LEDs that require to be on during start
820 * - On initialization, start unassociated blink
821 * - On association, disable unassociated blink
822 * - On disassociation, start unassociated blink
823 * - On radio OFF, turn off any LEDs started during radio on
824 *
825 */
826 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
827 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
829
830 static void ipw_led_link_on(struct ipw_priv *priv)
831 {
832 unsigned long flags;
833 u32 led;
834
835 /* If configured to not use LEDs, or nic_type is 1,
836 * then we don't toggle a LINK led */
837 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
838 return;
839
840 spin_lock_irqsave(&priv->lock, flags);
841
842 if (!(priv->status & STATUS_RF_KILL_MASK) &&
843 !(priv->status & STATUS_LED_LINK_ON)) {
844 IPW_DEBUG_LED("Link LED On\n");
845 led = ipw_read_reg32(priv, IPW_EVENT_REG);
846 led |= priv->led_association_on;
847
848 led = ipw_register_toggle(led);
849
850 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851 ipw_write_reg32(priv, IPW_EVENT_REG, led);
852
853 priv->status |= STATUS_LED_LINK_ON;
854
855 /* If we aren't associated, schedule turning the LED off */
856 if (!(priv->status & STATUS_ASSOCIATED))
857 queue_delayed_work(priv->workqueue,
858 &priv->led_link_off,
859 LD_TIME_LINK_ON);
860 }
861
862 spin_unlock_irqrestore(&priv->lock, flags);
863 }
864
865 static void ipw_bg_led_link_on(struct work_struct *work)
866 {
867 struct ipw_priv *priv =
868 container_of(work, struct ipw_priv, led_link_on.work);
869 mutex_lock(&priv->mutex);
870 ipw_led_link_on(priv);
871 mutex_unlock(&priv->mutex);
872 }
873
874 static void ipw_led_link_off(struct ipw_priv *priv)
875 {
876 unsigned long flags;
877 u32 led;
878
879 /* If configured not to use LEDs, or nic type is 1,
880 * then we don't goggle the LINK led. */
881 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
882 return;
883
884 spin_lock_irqsave(&priv->lock, flags);
885
886 if (priv->status & STATUS_LED_LINK_ON) {
887 led = ipw_read_reg32(priv, IPW_EVENT_REG);
888 led &= priv->led_association_off;
889 led = ipw_register_toggle(led);
890
891 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
892 ipw_write_reg32(priv, IPW_EVENT_REG, led);
893
894 IPW_DEBUG_LED("Link LED Off\n");
895
896 priv->status &= ~STATUS_LED_LINK_ON;
897
898 /* If we aren't associated and the radio is on, schedule
899 * turning the LED on (blink while unassociated) */
900 if (!(priv->status & STATUS_RF_KILL_MASK) &&
901 !(priv->status & STATUS_ASSOCIATED))
902 queue_delayed_work(priv->workqueue, &priv->led_link_on,
903 LD_TIME_LINK_OFF);
904
905 }
906
907 spin_unlock_irqrestore(&priv->lock, flags);
908 }
909
910 static void ipw_bg_led_link_off(struct work_struct *work)
911 {
912 struct ipw_priv *priv =
913 container_of(work, struct ipw_priv, led_link_off.work);
914 mutex_lock(&priv->mutex);
915 ipw_led_link_off(priv);
916 mutex_unlock(&priv->mutex);
917 }
918
919 static void __ipw_led_activity_on(struct ipw_priv *priv)
920 {
921 u32 led;
922
923 if (priv->config & CFG_NO_LED)
924 return;
925
926 if (priv->status & STATUS_RF_KILL_MASK)
927 return;
928
929 if (!(priv->status & STATUS_LED_ACT_ON)) {
930 led = ipw_read_reg32(priv, IPW_EVENT_REG);
931 led |= priv->led_activity_on;
932
933 led = ipw_register_toggle(led);
934
935 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
936 ipw_write_reg32(priv, IPW_EVENT_REG, led);
937
938 IPW_DEBUG_LED("Activity LED On\n");
939
940 priv->status |= STATUS_LED_ACT_ON;
941
942 cancel_delayed_work(&priv->led_act_off);
943 queue_delayed_work(priv->workqueue, &priv->led_act_off,
944 LD_TIME_ACT_ON);
945 } else {
946 /* Reschedule LED off for full time period */
947 cancel_delayed_work(&priv->led_act_off);
948 queue_delayed_work(priv->workqueue, &priv->led_act_off,
949 LD_TIME_ACT_ON);
950 }
951 }
952
953 #if 0
954 void ipw_led_activity_on(struct ipw_priv *priv)
955 {
956 unsigned long flags;
957 spin_lock_irqsave(&priv->lock, flags);
958 __ipw_led_activity_on(priv);
959 spin_unlock_irqrestore(&priv->lock, flags);
960 }
961 #endif /* 0 */
962
963 static void ipw_led_activity_off(struct ipw_priv *priv)
964 {
965 unsigned long flags;
966 u32 led;
967
968 if (priv->config & CFG_NO_LED)
969 return;
970
971 spin_lock_irqsave(&priv->lock, flags);
972
973 if (priv->status & STATUS_LED_ACT_ON) {
974 led = ipw_read_reg32(priv, IPW_EVENT_REG);
975 led &= priv->led_activity_off;
976
977 led = ipw_register_toggle(led);
978
979 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
980 ipw_write_reg32(priv, IPW_EVENT_REG, led);
981
982 IPW_DEBUG_LED("Activity LED Off\n");
983
984 priv->status &= ~STATUS_LED_ACT_ON;
985 }
986
987 spin_unlock_irqrestore(&priv->lock, flags);
988 }
989
990 static void ipw_bg_led_activity_off(struct work_struct *work)
991 {
992 struct ipw_priv *priv =
993 container_of(work, struct ipw_priv, led_act_off.work);
994 mutex_lock(&priv->mutex);
995 ipw_led_activity_off(priv);
996 mutex_unlock(&priv->mutex);
997 }
998
999 static void ipw_led_band_on(struct ipw_priv *priv)
1000 {
1001 unsigned long flags;
1002 u32 led;
1003
1004 /* Only nic type 1 supports mode LEDs */
1005 if (priv->config & CFG_NO_LED ||
1006 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1007 return;
1008
1009 spin_lock_irqsave(&priv->lock, flags);
1010
1011 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1012 if (priv->assoc_network->mode == IEEE_A) {
1013 led |= priv->led_ofdm_on;
1014 led &= priv->led_association_off;
1015 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1016 } else if (priv->assoc_network->mode == IEEE_G) {
1017 led |= priv->led_ofdm_on;
1018 led |= priv->led_association_on;
1019 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1020 } else {
1021 led &= priv->led_ofdm_off;
1022 led |= priv->led_association_on;
1023 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1024 }
1025
1026 led = ipw_register_toggle(led);
1027
1028 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1029 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1030
1031 spin_unlock_irqrestore(&priv->lock, flags);
1032 }
1033
1034 static void ipw_led_band_off(struct ipw_priv *priv)
1035 {
1036 unsigned long flags;
1037 u32 led;
1038
1039 /* Only nic type 1 supports mode LEDs */
1040 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1041 return;
1042
1043 spin_lock_irqsave(&priv->lock, flags);
1044
1045 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1046 led &= priv->led_ofdm_off;
1047 led &= priv->led_association_off;
1048
1049 led = ipw_register_toggle(led);
1050
1051 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1052 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1053
1054 spin_unlock_irqrestore(&priv->lock, flags);
1055 }
1056
1057 static void ipw_led_radio_on(struct ipw_priv *priv)
1058 {
1059 ipw_led_link_on(priv);
1060 }
1061
1062 static void ipw_led_radio_off(struct ipw_priv *priv)
1063 {
1064 ipw_led_activity_off(priv);
1065 ipw_led_link_off(priv);
1066 }
1067
1068 static void ipw_led_link_up(struct ipw_priv *priv)
1069 {
1070 /* Set the Link Led on for all nic types */
1071 ipw_led_link_on(priv);
1072 }
1073
1074 static void ipw_led_link_down(struct ipw_priv *priv)
1075 {
1076 ipw_led_activity_off(priv);
1077 ipw_led_link_off(priv);
1078
1079 if (priv->status & STATUS_RF_KILL_MASK)
1080 ipw_led_radio_off(priv);
1081 }
1082
1083 static void ipw_led_init(struct ipw_priv *priv)
1084 {
1085 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1086
1087 /* Set the default PINs for the link and activity leds */
1088 priv->led_activity_on = IPW_ACTIVITY_LED;
1089 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1090
1091 priv->led_association_on = IPW_ASSOCIATED_LED;
1092 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1093
1094 /* Set the default PINs for the OFDM leds */
1095 priv->led_ofdm_on = IPW_OFDM_LED;
1096 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1097
1098 switch (priv->nic_type) {
1099 case EEPROM_NIC_TYPE_1:
1100 /* In this NIC type, the LEDs are reversed.... */
1101 priv->led_activity_on = IPW_ASSOCIATED_LED;
1102 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1103 priv->led_association_on = IPW_ACTIVITY_LED;
1104 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1105
1106 if (!(priv->config & CFG_NO_LED))
1107 ipw_led_band_on(priv);
1108
1109 /* And we don't blink link LEDs for this nic, so
1110 * just return here */
1111 return;
1112
1113 case EEPROM_NIC_TYPE_3:
1114 case EEPROM_NIC_TYPE_2:
1115 case EEPROM_NIC_TYPE_4:
1116 case EEPROM_NIC_TYPE_0:
1117 break;
1118
1119 default:
1120 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1121 priv->nic_type);
1122 priv->nic_type = EEPROM_NIC_TYPE_0;
1123 break;
1124 }
1125
1126 if (!(priv->config & CFG_NO_LED)) {
1127 if (priv->status & STATUS_ASSOCIATED)
1128 ipw_led_link_on(priv);
1129 else
1130 ipw_led_link_off(priv);
1131 }
1132 }
1133
1134 static void ipw_led_shutdown(struct ipw_priv *priv)
1135 {
1136 ipw_led_activity_off(priv);
1137 ipw_led_link_off(priv);
1138 ipw_led_band_off(priv);
1139 cancel_delayed_work(&priv->led_link_on);
1140 cancel_delayed_work(&priv->led_link_off);
1141 cancel_delayed_work(&priv->led_act_off);
1142 }
1143
1144 /*
1145 * The following adds a new attribute to the sysfs representation
1146 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1147 * used for controling the debug level.
1148 *
1149 * See the level definitions in ipw for details.
1150 */
1151 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1152 {
1153 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1154 }
1155
1156 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1157 size_t count)
1158 {
1159 char *p = (char *)buf;
1160 u32 val;
1161
1162 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1163 p++;
1164 if (p[0] == 'x' || p[0] == 'X')
1165 p++;
1166 val = simple_strtoul(p, &p, 16);
1167 } else
1168 val = simple_strtoul(p, &p, 10);
1169 if (p == buf)
1170 printk(KERN_INFO DRV_NAME
1171 ": %s is not in hex or decimal form.\n", buf);
1172 else
1173 ipw_debug_level = val;
1174
1175 return strnlen(buf, count);
1176 }
1177
1178 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1179 show_debug_level, store_debug_level);
1180
1181 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1182 {
1183 /* length = 1st dword in log */
1184 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1185 }
1186
1187 static void ipw_capture_event_log(struct ipw_priv *priv,
1188 u32 log_len, struct ipw_event *log)
1189 {
1190 u32 base;
1191
1192 if (log_len) {
1193 base = ipw_read32(priv, IPW_EVENT_LOG);
1194 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1195 (u8 *) log, sizeof(*log) * log_len);
1196 }
1197 }
1198
1199 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1200 {
1201 struct ipw_fw_error *error;
1202 u32 log_len = ipw_get_event_log_len(priv);
1203 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1204 u32 elem_len = ipw_read_reg32(priv, base);
1205
1206 error = kmalloc(sizeof(*error) +
1207 sizeof(*error->elem) * elem_len +
1208 sizeof(*error->log) * log_len, GFP_ATOMIC);
1209 if (!error) {
1210 IPW_ERROR("Memory allocation for firmware error log "
1211 "failed.\n");
1212 return NULL;
1213 }
1214 error->jiffies = jiffies;
1215 error->status = priv->status;
1216 error->config = priv->config;
1217 error->elem_len = elem_len;
1218 error->log_len = log_len;
1219 error->elem = (struct ipw_error_elem *)error->payload;
1220 error->log = (struct ipw_event *)(error->elem + elem_len);
1221
1222 ipw_capture_event_log(priv, log_len, error->log);
1223
1224 if (elem_len)
1225 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1226 sizeof(*error->elem) * elem_len);
1227
1228 return error;
1229 }
1230
1231 static ssize_t show_event_log(struct device *d,
1232 struct device_attribute *attr, char *buf)
1233 {
1234 struct ipw_priv *priv = dev_get_drvdata(d);
1235 u32 log_len = ipw_get_event_log_len(priv);
1236 u32 log_size;
1237 struct ipw_event *log;
1238 u32 len = 0, i;
1239
1240 /* not using min() because of its strict type checking */
1241 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1242 sizeof(*log) * log_len : PAGE_SIZE;
1243 log = kzalloc(log_size, GFP_KERNEL);
1244 if (!log) {
1245 IPW_ERROR("Unable to allocate memory for log\n");
1246 return 0;
1247 }
1248 log_len = log_size / sizeof(*log);
1249 ipw_capture_event_log(priv, log_len, log);
1250
1251 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1252 for (i = 0; i < log_len; i++)
1253 len += snprintf(buf + len, PAGE_SIZE - len,
1254 "\n%08X%08X%08X",
1255 log[i].time, log[i].event, log[i].data);
1256 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1257 kfree(log);
1258 return len;
1259 }
1260
1261 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1262
1263 static ssize_t show_error(struct device *d,
1264 struct device_attribute *attr, char *buf)
1265 {
1266 struct ipw_priv *priv = dev_get_drvdata(d);
1267 u32 len = 0, i;
1268 if (!priv->error)
1269 return 0;
1270 len += snprintf(buf + len, PAGE_SIZE - len,
1271 "%08lX%08X%08X%08X",
1272 priv->error->jiffies,
1273 priv->error->status,
1274 priv->error->config, priv->error->elem_len);
1275 for (i = 0; i < priv->error->elem_len; i++)
1276 len += snprintf(buf + len, PAGE_SIZE - len,
1277 "\n%08X%08X%08X%08X%08X%08X%08X",
1278 priv->error->elem[i].time,
1279 priv->error->elem[i].desc,
1280 priv->error->elem[i].blink1,
1281 priv->error->elem[i].blink2,
1282 priv->error->elem[i].link1,
1283 priv->error->elem[i].link2,
1284 priv->error->elem[i].data);
1285
1286 len += snprintf(buf + len, PAGE_SIZE - len,
1287 "\n%08X", priv->error->log_len);
1288 for (i = 0; i < priv->error->log_len; i++)
1289 len += snprintf(buf + len, PAGE_SIZE - len,
1290 "\n%08X%08X%08X",
1291 priv->error->log[i].time,
1292 priv->error->log[i].event,
1293 priv->error->log[i].data);
1294 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1295 return len;
1296 }
1297
1298 static ssize_t clear_error(struct device *d,
1299 struct device_attribute *attr,
1300 const char *buf, size_t count)
1301 {
1302 struct ipw_priv *priv = dev_get_drvdata(d);
1303
1304 kfree(priv->error);
1305 priv->error = NULL;
1306 return count;
1307 }
1308
1309 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1310
1311 static ssize_t show_cmd_log(struct device *d,
1312 struct device_attribute *attr, char *buf)
1313 {
1314 struct ipw_priv *priv = dev_get_drvdata(d);
1315 u32 len = 0, i;
1316 if (!priv->cmdlog)
1317 return 0;
1318 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1319 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1320 i = (i + 1) % priv->cmdlog_len) {
1321 len +=
1322 snprintf(buf + len, PAGE_SIZE - len,
1323 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1324 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1325 priv->cmdlog[i].cmd.len);
1326 len +=
1327 snprintk_buf(buf + len, PAGE_SIZE - len,
1328 (u8 *) priv->cmdlog[i].cmd.param,
1329 priv->cmdlog[i].cmd.len);
1330 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1331 }
1332 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1333 return len;
1334 }
1335
1336 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1337
1338 #ifdef CONFIG_IPW2200_PROMISCUOUS
1339 static void ipw_prom_free(struct ipw_priv *priv);
1340 static int ipw_prom_alloc(struct ipw_priv *priv);
1341 static ssize_t store_rtap_iface(struct device *d,
1342 struct device_attribute *attr,
1343 const char *buf, size_t count)
1344 {
1345 struct ipw_priv *priv = dev_get_drvdata(d);
1346 int rc = 0;
1347
1348 if (count < 1)
1349 return -EINVAL;
1350
1351 switch (buf[0]) {
1352 case '0':
1353 if (!rtap_iface)
1354 return count;
1355
1356 if (netif_running(priv->prom_net_dev)) {
1357 IPW_WARNING("Interface is up. Cannot unregister.\n");
1358 return count;
1359 }
1360
1361 ipw_prom_free(priv);
1362 rtap_iface = 0;
1363 break;
1364
1365 case '1':
1366 if (rtap_iface)
1367 return count;
1368
1369 rc = ipw_prom_alloc(priv);
1370 if (!rc)
1371 rtap_iface = 1;
1372 break;
1373
1374 default:
1375 return -EINVAL;
1376 }
1377
1378 if (rc) {
1379 IPW_ERROR("Failed to register promiscuous network "
1380 "device (error %d).\n", rc);
1381 }
1382
1383 return count;
1384 }
1385
1386 static ssize_t show_rtap_iface(struct device *d,
1387 struct device_attribute *attr,
1388 char *buf)
1389 {
1390 struct ipw_priv *priv = dev_get_drvdata(d);
1391 if (rtap_iface)
1392 return sprintf(buf, "%s", priv->prom_net_dev->name);
1393 else {
1394 buf[0] = '-';
1395 buf[1] = '1';
1396 buf[2] = '\0';
1397 return 3;
1398 }
1399 }
1400
1401 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1402 store_rtap_iface);
1403
1404 static ssize_t store_rtap_filter(struct device *d,
1405 struct device_attribute *attr,
1406 const char *buf, size_t count)
1407 {
1408 struct ipw_priv *priv = dev_get_drvdata(d);
1409
1410 if (!priv->prom_priv) {
1411 IPW_ERROR("Attempting to set filter without "
1412 "rtap_iface enabled.\n");
1413 return -EPERM;
1414 }
1415
1416 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1417
1418 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1419 BIT_ARG16(priv->prom_priv->filter));
1420
1421 return count;
1422 }
1423
1424 static ssize_t show_rtap_filter(struct device *d,
1425 struct device_attribute *attr,
1426 char *buf)
1427 {
1428 struct ipw_priv *priv = dev_get_drvdata(d);
1429 return sprintf(buf, "0x%04X",
1430 priv->prom_priv ? priv->prom_priv->filter : 0);
1431 }
1432
1433 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1434 store_rtap_filter);
1435 #endif
1436
1437 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1438 char *buf)
1439 {
1440 struct ipw_priv *priv = dev_get_drvdata(d);
1441 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1442 }
1443
1444 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1445 const char *buf, size_t count)
1446 {
1447 struct ipw_priv *priv = dev_get_drvdata(d);
1448 struct net_device *dev = priv->net_dev;
1449 char buffer[] = "00000000";
1450 unsigned long len =
1451 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1452 unsigned long val;
1453 char *p = buffer;
1454
1455 IPW_DEBUG_INFO("enter\n");
1456
1457 strncpy(buffer, buf, len);
1458 buffer[len] = 0;
1459
1460 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1461 p++;
1462 if (p[0] == 'x' || p[0] == 'X')
1463 p++;
1464 val = simple_strtoul(p, &p, 16);
1465 } else
1466 val = simple_strtoul(p, &p, 10);
1467 if (p == buffer) {
1468 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1469 } else {
1470 priv->ieee->scan_age = val;
1471 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1472 }
1473
1474 IPW_DEBUG_INFO("exit\n");
1475 return len;
1476 }
1477
1478 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1479
1480 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1481 char *buf)
1482 {
1483 struct ipw_priv *priv = dev_get_drvdata(d);
1484 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1485 }
1486
1487 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1488 const char *buf, size_t count)
1489 {
1490 struct ipw_priv *priv = dev_get_drvdata(d);
1491
1492 IPW_DEBUG_INFO("enter\n");
1493
1494 if (count == 0)
1495 return 0;
1496
1497 if (*buf == 0) {
1498 IPW_DEBUG_LED("Disabling LED control.\n");
1499 priv->config |= CFG_NO_LED;
1500 ipw_led_shutdown(priv);
1501 } else {
1502 IPW_DEBUG_LED("Enabling LED control.\n");
1503 priv->config &= ~CFG_NO_LED;
1504 ipw_led_init(priv);
1505 }
1506
1507 IPW_DEBUG_INFO("exit\n");
1508 return count;
1509 }
1510
1511 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1512
1513 static ssize_t show_status(struct device *d,
1514 struct device_attribute *attr, char *buf)
1515 {
1516 struct ipw_priv *p = d->driver_data;
1517 return sprintf(buf, "0x%08x\n", (int)p->status);
1518 }
1519
1520 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1521
1522 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1523 char *buf)
1524 {
1525 struct ipw_priv *p = d->driver_data;
1526 return sprintf(buf, "0x%08x\n", (int)p->config);
1527 }
1528
1529 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1530
1531 static ssize_t show_nic_type(struct device *d,
1532 struct device_attribute *attr, char *buf)
1533 {
1534 struct ipw_priv *priv = d->driver_data;
1535 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1536 }
1537
1538 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1539
1540 static ssize_t show_ucode_version(struct device *d,
1541 struct device_attribute *attr, char *buf)
1542 {
1543 u32 len = sizeof(u32), tmp = 0;
1544 struct ipw_priv *p = d->driver_data;
1545
1546 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1547 return 0;
1548
1549 return sprintf(buf, "0x%08x\n", tmp);
1550 }
1551
1552 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1553
1554 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1555 char *buf)
1556 {
1557 u32 len = sizeof(u32), tmp = 0;
1558 struct ipw_priv *p = d->driver_data;
1559
1560 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1561 return 0;
1562
1563 return sprintf(buf, "0x%08x\n", tmp);
1564 }
1565
1566 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1567
1568 /*
1569 * Add a device attribute to view/control the delay between eeprom
1570 * operations.
1571 */
1572 static ssize_t show_eeprom_delay(struct device *d,
1573 struct device_attribute *attr, char *buf)
1574 {
1575 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1576 return sprintf(buf, "%i\n", n);
1577 }
1578 static ssize_t store_eeprom_delay(struct device *d,
1579 struct device_attribute *attr,
1580 const char *buf, size_t count)
1581 {
1582 struct ipw_priv *p = d->driver_data;
1583 sscanf(buf, "%i", &p->eeprom_delay);
1584 return strnlen(buf, count);
1585 }
1586
1587 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1588 show_eeprom_delay, store_eeprom_delay);
1589
1590 static ssize_t show_command_event_reg(struct device *d,
1591 struct device_attribute *attr, char *buf)
1592 {
1593 u32 reg = 0;
1594 struct ipw_priv *p = d->driver_data;
1595
1596 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1597 return sprintf(buf, "0x%08x\n", reg);
1598 }
1599 static ssize_t store_command_event_reg(struct device *d,
1600 struct device_attribute *attr,
1601 const char *buf, size_t count)
1602 {
1603 u32 reg;
1604 struct ipw_priv *p = d->driver_data;
1605
1606 sscanf(buf, "%x", &reg);
1607 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1608 return strnlen(buf, count);
1609 }
1610
1611 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1612 show_command_event_reg, store_command_event_reg);
1613
1614 static ssize_t show_mem_gpio_reg(struct device *d,
1615 struct device_attribute *attr, char *buf)
1616 {
1617 u32 reg = 0;
1618 struct ipw_priv *p = d->driver_data;
1619
1620 reg = ipw_read_reg32(p, 0x301100);
1621 return sprintf(buf, "0x%08x\n", reg);
1622 }
1623 static ssize_t store_mem_gpio_reg(struct device *d,
1624 struct device_attribute *attr,
1625 const char *buf, size_t count)
1626 {
1627 u32 reg;
1628 struct ipw_priv *p = d->driver_data;
1629
1630 sscanf(buf, "%x", &reg);
1631 ipw_write_reg32(p, 0x301100, reg);
1632 return strnlen(buf, count);
1633 }
1634
1635 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1636 show_mem_gpio_reg, store_mem_gpio_reg);
1637
1638 static ssize_t show_indirect_dword(struct device *d,
1639 struct device_attribute *attr, char *buf)
1640 {
1641 u32 reg = 0;
1642 struct ipw_priv *priv = d->driver_data;
1643
1644 if (priv->status & STATUS_INDIRECT_DWORD)
1645 reg = ipw_read_reg32(priv, priv->indirect_dword);
1646 else
1647 reg = 0;
1648
1649 return sprintf(buf, "0x%08x\n", reg);
1650 }
1651 static ssize_t store_indirect_dword(struct device *d,
1652 struct device_attribute *attr,
1653 const char *buf, size_t count)
1654 {
1655 struct ipw_priv *priv = d->driver_data;
1656
1657 sscanf(buf, "%x", &priv->indirect_dword);
1658 priv->status |= STATUS_INDIRECT_DWORD;
1659 return strnlen(buf, count);
1660 }
1661
1662 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1663 show_indirect_dword, store_indirect_dword);
1664
1665 static ssize_t show_indirect_byte(struct device *d,
1666 struct device_attribute *attr, char *buf)
1667 {
1668 u8 reg = 0;
1669 struct ipw_priv *priv = d->driver_data;
1670
1671 if (priv->status & STATUS_INDIRECT_BYTE)
1672 reg = ipw_read_reg8(priv, priv->indirect_byte);
1673 else
1674 reg = 0;
1675
1676 return sprintf(buf, "0x%02x\n", reg);
1677 }
1678 static ssize_t store_indirect_byte(struct device *d,
1679 struct device_attribute *attr,
1680 const char *buf, size_t count)
1681 {
1682 struct ipw_priv *priv = d->driver_data;
1683
1684 sscanf(buf, "%x", &priv->indirect_byte);
1685 priv->status |= STATUS_INDIRECT_BYTE;
1686 return strnlen(buf, count);
1687 }
1688
1689 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1690 show_indirect_byte, store_indirect_byte);
1691
1692 static ssize_t show_direct_dword(struct device *d,
1693 struct device_attribute *attr, char *buf)
1694 {
1695 u32 reg = 0;
1696 struct ipw_priv *priv = d->driver_data;
1697
1698 if (priv->status & STATUS_DIRECT_DWORD)
1699 reg = ipw_read32(priv, priv->direct_dword);
1700 else
1701 reg = 0;
1702
1703 return sprintf(buf, "0x%08x\n", reg);
1704 }
1705 static ssize_t store_direct_dword(struct device *d,
1706 struct device_attribute *attr,
1707 const char *buf, size_t count)
1708 {
1709 struct ipw_priv *priv = d->driver_data;
1710
1711 sscanf(buf, "%x", &priv->direct_dword);
1712 priv->status |= STATUS_DIRECT_DWORD;
1713 return strnlen(buf, count);
1714 }
1715
1716 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1717 show_direct_dword, store_direct_dword);
1718
1719 static int rf_kill_active(struct ipw_priv *priv)
1720 {
1721 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1722 priv->status |= STATUS_RF_KILL_HW;
1723 else
1724 priv->status &= ~STATUS_RF_KILL_HW;
1725
1726 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1727 }
1728
1729 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1730 char *buf)
1731 {
1732 /* 0 - RF kill not enabled
1733 1 - SW based RF kill active (sysfs)
1734 2 - HW based RF kill active
1735 3 - Both HW and SW baed RF kill active */
1736 struct ipw_priv *priv = d->driver_data;
1737 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1738 (rf_kill_active(priv) ? 0x2 : 0x0);
1739 return sprintf(buf, "%i\n", val);
1740 }
1741
1742 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1743 {
1744 if ((disable_radio ? 1 : 0) ==
1745 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1746 return 0;
1747
1748 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1749 disable_radio ? "OFF" : "ON");
1750
1751 if (disable_radio) {
1752 priv->status |= STATUS_RF_KILL_SW;
1753
1754 if (priv->workqueue) {
1755 cancel_delayed_work(&priv->request_scan);
1756 cancel_delayed_work(&priv->scan_event);
1757 }
1758 queue_work(priv->workqueue, &priv->down);
1759 } else {
1760 priv->status &= ~STATUS_RF_KILL_SW;
1761 if (rf_kill_active(priv)) {
1762 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1763 "disabled by HW switch\n");
1764 /* Make sure the RF_KILL check timer is running */
1765 cancel_delayed_work(&priv->rf_kill);
1766 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1767 round_jiffies_relative(2 * HZ));
1768 } else
1769 queue_work(priv->workqueue, &priv->up);
1770 }
1771
1772 return 1;
1773 }
1774
1775 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1776 const char *buf, size_t count)
1777 {
1778 struct ipw_priv *priv = d->driver_data;
1779
1780 ipw_radio_kill_sw(priv, buf[0] == '1');
1781
1782 return count;
1783 }
1784
1785 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1786
1787 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1788 char *buf)
1789 {
1790 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1791 int pos = 0, len = 0;
1792 if (priv->config & CFG_SPEED_SCAN) {
1793 while (priv->speed_scan[pos] != 0)
1794 len += sprintf(&buf[len], "%d ",
1795 priv->speed_scan[pos++]);
1796 return len + sprintf(&buf[len], "\n");
1797 }
1798
1799 return sprintf(buf, "0\n");
1800 }
1801
1802 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1803 const char *buf, size_t count)
1804 {
1805 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1806 int channel, pos = 0;
1807 const char *p = buf;
1808
1809 /* list of space separated channels to scan, optionally ending with 0 */
1810 while ((channel = simple_strtol(p, NULL, 0))) {
1811 if (pos == MAX_SPEED_SCAN - 1) {
1812 priv->speed_scan[pos] = 0;
1813 break;
1814 }
1815
1816 if (ieee80211_is_valid_channel(priv->ieee, channel))
1817 priv->speed_scan[pos++] = channel;
1818 else
1819 IPW_WARNING("Skipping invalid channel request: %d\n",
1820 channel);
1821 p = strchr(p, ' ');
1822 if (!p)
1823 break;
1824 while (*p == ' ' || *p == '\t')
1825 p++;
1826 }
1827
1828 if (pos == 0)
1829 priv->config &= ~CFG_SPEED_SCAN;
1830 else {
1831 priv->speed_scan_pos = 0;
1832 priv->config |= CFG_SPEED_SCAN;
1833 }
1834
1835 return count;
1836 }
1837
1838 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1839 store_speed_scan);
1840
1841 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1842 char *buf)
1843 {
1844 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1845 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1846 }
1847
1848 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1849 const char *buf, size_t count)
1850 {
1851 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1852 if (buf[0] == '1')
1853 priv->config |= CFG_NET_STATS;
1854 else
1855 priv->config &= ~CFG_NET_STATS;
1856
1857 return count;
1858 }
1859
1860 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1861 show_net_stats, store_net_stats);
1862
1863 static ssize_t show_channels(struct device *d,
1864 struct device_attribute *attr,
1865 char *buf)
1866 {
1867 struct ipw_priv *priv = dev_get_drvdata(d);
1868 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
1869 int len = 0, i;
1870
1871 len = sprintf(&buf[len],
1872 "Displaying %d channels in 2.4Ghz band "
1873 "(802.11bg):\n", geo->bg_channels);
1874
1875 for (i = 0; i < geo->bg_channels; i++) {
1876 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1877 geo->bg[i].channel,
1878 geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT ?
1879 " (radar spectrum)" : "",
1880 ((geo->bg[i].flags & IEEE80211_CH_NO_IBSS) ||
1881 (geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT))
1882 ? "" : ", IBSS",
1883 geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1884 "passive only" : "active/passive",
1885 geo->bg[i].flags & IEEE80211_CH_B_ONLY ?
1886 "B" : "B/G");
1887 }
1888
1889 len += sprintf(&buf[len],
1890 "Displaying %d channels in 5.2Ghz band "
1891 "(802.11a):\n", geo->a_channels);
1892 for (i = 0; i < geo->a_channels; i++) {
1893 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1894 geo->a[i].channel,
1895 geo->a[i].flags & IEEE80211_CH_RADAR_DETECT ?
1896 " (radar spectrum)" : "",
1897 ((geo->a[i].flags & IEEE80211_CH_NO_IBSS) ||
1898 (geo->a[i].flags & IEEE80211_CH_RADAR_DETECT))
1899 ? "" : ", IBSS",
1900 geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1901 "passive only" : "active/passive");
1902 }
1903
1904 return len;
1905 }
1906
1907 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1908
1909 static void notify_wx_assoc_event(struct ipw_priv *priv)
1910 {
1911 union iwreq_data wrqu;
1912 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1913 if (priv->status & STATUS_ASSOCIATED)
1914 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1915 else
1916 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1917 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1918 }
1919
1920 static void ipw_irq_tasklet(struct ipw_priv *priv)
1921 {
1922 u32 inta, inta_mask, handled = 0;
1923 unsigned long flags;
1924 int rc = 0;
1925
1926 spin_lock_irqsave(&priv->irq_lock, flags);
1927
1928 inta = ipw_read32(priv, IPW_INTA_RW);
1929 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1930 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1931
1932 /* Add any cached INTA values that need to be handled */
1933 inta |= priv->isr_inta;
1934
1935 spin_unlock_irqrestore(&priv->irq_lock, flags);
1936
1937 spin_lock_irqsave(&priv->lock, flags);
1938
1939 /* handle all the justifications for the interrupt */
1940 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1941 ipw_rx(priv);
1942 handled |= IPW_INTA_BIT_RX_TRANSFER;
1943 }
1944
1945 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1946 IPW_DEBUG_HC("Command completed.\n");
1947 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1948 priv->status &= ~STATUS_HCMD_ACTIVE;
1949 wake_up_interruptible(&priv->wait_command_queue);
1950 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1951 }
1952
1953 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1954 IPW_DEBUG_TX("TX_QUEUE_1\n");
1955 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1956 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1957 }
1958
1959 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1960 IPW_DEBUG_TX("TX_QUEUE_2\n");
1961 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1962 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1963 }
1964
1965 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1966 IPW_DEBUG_TX("TX_QUEUE_3\n");
1967 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1968 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1969 }
1970
1971 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1972 IPW_DEBUG_TX("TX_QUEUE_4\n");
1973 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1974 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1975 }
1976
1977 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1978 IPW_WARNING("STATUS_CHANGE\n");
1979 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1980 }
1981
1982 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1983 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1984 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1985 }
1986
1987 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1988 IPW_WARNING("HOST_CMD_DONE\n");
1989 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1990 }
1991
1992 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1993 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1994 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1995 }
1996
1997 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1998 IPW_WARNING("PHY_OFF_DONE\n");
1999 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2000 }
2001
2002 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2003 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2004 priv->status |= STATUS_RF_KILL_HW;
2005 wake_up_interruptible(&priv->wait_command_queue);
2006 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2007 cancel_delayed_work(&priv->request_scan);
2008 cancel_delayed_work(&priv->scan_event);
2009 schedule_work(&priv->link_down);
2010 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2011 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2012 }
2013
2014 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2015 IPW_WARNING("Firmware error detected. Restarting.\n");
2016 if (priv->error) {
2017 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2018 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2019 struct ipw_fw_error *error =
2020 ipw_alloc_error_log(priv);
2021 ipw_dump_error_log(priv, error);
2022 kfree(error);
2023 }
2024 } else {
2025 priv->error = ipw_alloc_error_log(priv);
2026 if (priv->error)
2027 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2028 else
2029 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2030 "log.\n");
2031 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2032 ipw_dump_error_log(priv, priv->error);
2033 }
2034
2035 /* XXX: If hardware encryption is for WPA/WPA2,
2036 * we have to notify the supplicant. */
2037 if (priv->ieee->sec.encrypt) {
2038 priv->status &= ~STATUS_ASSOCIATED;
2039 notify_wx_assoc_event(priv);
2040 }
2041
2042 /* Keep the restart process from trying to send host
2043 * commands by clearing the INIT status bit */
2044 priv->status &= ~STATUS_INIT;
2045
2046 /* Cancel currently queued command. */
2047 priv->status &= ~STATUS_HCMD_ACTIVE;
2048 wake_up_interruptible(&priv->wait_command_queue);
2049
2050 queue_work(priv->workqueue, &priv->adapter_restart);
2051 handled |= IPW_INTA_BIT_FATAL_ERROR;
2052 }
2053
2054 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2055 IPW_ERROR("Parity error\n");
2056 handled |= IPW_INTA_BIT_PARITY_ERROR;
2057 }
2058
2059 if (handled != inta) {
2060 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2061 }
2062
2063 spin_unlock_irqrestore(&priv->lock, flags);
2064
2065 /* enable all interrupts */
2066 ipw_enable_interrupts(priv);
2067 }
2068
2069 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2070 static char *get_cmd_string(u8 cmd)
2071 {
2072 switch (cmd) {
2073 IPW_CMD(HOST_COMPLETE);
2074 IPW_CMD(POWER_DOWN);
2075 IPW_CMD(SYSTEM_CONFIG);
2076 IPW_CMD(MULTICAST_ADDRESS);
2077 IPW_CMD(SSID);
2078 IPW_CMD(ADAPTER_ADDRESS);
2079 IPW_CMD(PORT_TYPE);
2080 IPW_CMD(RTS_THRESHOLD);
2081 IPW_CMD(FRAG_THRESHOLD);
2082 IPW_CMD(POWER_MODE);
2083 IPW_CMD(WEP_KEY);
2084 IPW_CMD(TGI_TX_KEY);
2085 IPW_CMD(SCAN_REQUEST);
2086 IPW_CMD(SCAN_REQUEST_EXT);
2087 IPW_CMD(ASSOCIATE);
2088 IPW_CMD(SUPPORTED_RATES);
2089 IPW_CMD(SCAN_ABORT);
2090 IPW_CMD(TX_FLUSH);
2091 IPW_CMD(QOS_PARAMETERS);
2092 IPW_CMD(DINO_CONFIG);
2093 IPW_CMD(RSN_CAPABILITIES);
2094 IPW_CMD(RX_KEY);
2095 IPW_CMD(CARD_DISABLE);
2096 IPW_CMD(SEED_NUMBER);
2097 IPW_CMD(TX_POWER);
2098 IPW_CMD(COUNTRY_INFO);
2099 IPW_CMD(AIRONET_INFO);
2100 IPW_CMD(AP_TX_POWER);
2101 IPW_CMD(CCKM_INFO);
2102 IPW_CMD(CCX_VER_INFO);
2103 IPW_CMD(SET_CALIBRATION);
2104 IPW_CMD(SENSITIVITY_CALIB);
2105 IPW_CMD(RETRY_LIMIT);
2106 IPW_CMD(IPW_PRE_POWER_DOWN);
2107 IPW_CMD(VAP_BEACON_TEMPLATE);
2108 IPW_CMD(VAP_DTIM_PERIOD);
2109 IPW_CMD(EXT_SUPPORTED_RATES);
2110 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2111 IPW_CMD(VAP_QUIET_INTERVALS);
2112 IPW_CMD(VAP_CHANNEL_SWITCH);
2113 IPW_CMD(VAP_MANDATORY_CHANNELS);
2114 IPW_CMD(VAP_CELL_PWR_LIMIT);
2115 IPW_CMD(VAP_CF_PARAM_SET);
2116 IPW_CMD(VAP_SET_BEACONING_STATE);
2117 IPW_CMD(MEASUREMENT);
2118 IPW_CMD(POWER_CAPABILITY);
2119 IPW_CMD(SUPPORTED_CHANNELS);
2120 IPW_CMD(TPC_REPORT);
2121 IPW_CMD(WME_INFO);
2122 IPW_CMD(PRODUCTION_COMMAND);
2123 default:
2124 return "UNKNOWN";
2125 }
2126 }
2127
2128 #define HOST_COMPLETE_TIMEOUT HZ
2129
2130 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2131 {
2132 int rc = 0;
2133 unsigned long flags;
2134
2135 spin_lock_irqsave(&priv->lock, flags);
2136 if (priv->status & STATUS_HCMD_ACTIVE) {
2137 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2138 get_cmd_string(cmd->cmd));
2139 spin_unlock_irqrestore(&priv->lock, flags);
2140 return -EAGAIN;
2141 }
2142
2143 priv->status |= STATUS_HCMD_ACTIVE;
2144
2145 if (priv->cmdlog) {
2146 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2147 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2148 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2149 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2150 cmd->len);
2151 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2152 }
2153
2154 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2155 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2156 priv->status);
2157
2158 #ifndef DEBUG_CMD_WEP_KEY
2159 if (cmd->cmd == IPW_CMD_WEP_KEY)
2160 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2161 else
2162 #endif
2163 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2164
2165 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2166 if (rc) {
2167 priv->status &= ~STATUS_HCMD_ACTIVE;
2168 IPW_ERROR("Failed to send %s: Reason %d\n",
2169 get_cmd_string(cmd->cmd), rc);
2170 spin_unlock_irqrestore(&priv->lock, flags);
2171 goto exit;
2172 }
2173 spin_unlock_irqrestore(&priv->lock, flags);
2174
2175 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2176 !(priv->
2177 status & STATUS_HCMD_ACTIVE),
2178 HOST_COMPLETE_TIMEOUT);
2179 if (rc == 0) {
2180 spin_lock_irqsave(&priv->lock, flags);
2181 if (priv->status & STATUS_HCMD_ACTIVE) {
2182 IPW_ERROR("Failed to send %s: Command timed out.\n",
2183 get_cmd_string(cmd->cmd));
2184 priv->status &= ~STATUS_HCMD_ACTIVE;
2185 spin_unlock_irqrestore(&priv->lock, flags);
2186 rc = -EIO;
2187 goto exit;
2188 }
2189 spin_unlock_irqrestore(&priv->lock, flags);
2190 } else
2191 rc = 0;
2192
2193 if (priv->status & STATUS_RF_KILL_HW) {
2194 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2195 get_cmd_string(cmd->cmd));
2196 rc = -EIO;
2197 goto exit;
2198 }
2199
2200 exit:
2201 if (priv->cmdlog) {
2202 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2203 priv->cmdlog_pos %= priv->cmdlog_len;
2204 }
2205 return rc;
2206 }
2207
2208 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2209 {
2210 struct host_cmd cmd = {
2211 .cmd = command,
2212 };
2213
2214 return __ipw_send_cmd(priv, &cmd);
2215 }
2216
2217 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2218 void *data)
2219 {
2220 struct host_cmd cmd = {
2221 .cmd = command,
2222 .len = len,
2223 .param = data,
2224 };
2225
2226 return __ipw_send_cmd(priv, &cmd);
2227 }
2228
2229 static int ipw_send_host_complete(struct ipw_priv *priv)
2230 {
2231 if (!priv) {
2232 IPW_ERROR("Invalid args\n");
2233 return -1;
2234 }
2235
2236 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2237 }
2238
2239 static int ipw_send_system_config(struct ipw_priv *priv)
2240 {
2241 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2242 sizeof(priv->sys_config),
2243 &priv->sys_config);
2244 }
2245
2246 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2247 {
2248 if (!priv || !ssid) {
2249 IPW_ERROR("Invalid args\n");
2250 return -1;
2251 }
2252
2253 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2254 ssid);
2255 }
2256
2257 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2258 {
2259 if (!priv || !mac) {
2260 IPW_ERROR("Invalid args\n");
2261 return -1;
2262 }
2263
2264 IPW_DEBUG_INFO("%s: Setting MAC to %s\n",
2265 priv->net_dev->name, print_mac(mac, mac));
2266
2267 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2268 }
2269
2270 /*
2271 * NOTE: This must be executed from our workqueue as it results in udelay
2272 * being called which may corrupt the keyboard if executed on default
2273 * workqueue
2274 */
2275 static void ipw_adapter_restart(void *adapter)
2276 {
2277 struct ipw_priv *priv = adapter;
2278
2279 if (priv->status & STATUS_RF_KILL_MASK)
2280 return;
2281
2282 ipw_down(priv);
2283
2284 if (priv->assoc_network &&
2285 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2286 ipw_remove_current_network(priv);
2287
2288 if (ipw_up(priv)) {
2289 IPW_ERROR("Failed to up device\n");
2290 return;
2291 }
2292 }
2293
2294 static void ipw_bg_adapter_restart(struct work_struct *work)
2295 {
2296 struct ipw_priv *priv =
2297 container_of(work, struct ipw_priv, adapter_restart);
2298 mutex_lock(&priv->mutex);
2299 ipw_adapter_restart(priv);
2300 mutex_unlock(&priv->mutex);
2301 }
2302
2303 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2304
2305 static void ipw_scan_check(void *data)
2306 {
2307 struct ipw_priv *priv = data;
2308 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2309 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2310 "adapter after (%dms).\n",
2311 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2312 queue_work(priv->workqueue, &priv->adapter_restart);
2313 }
2314 }
2315
2316 static void ipw_bg_scan_check(struct work_struct *work)
2317 {
2318 struct ipw_priv *priv =
2319 container_of(work, struct ipw_priv, scan_check.work);
2320 mutex_lock(&priv->mutex);
2321 ipw_scan_check(priv);
2322 mutex_unlock(&priv->mutex);
2323 }
2324
2325 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2326 struct ipw_scan_request_ext *request)
2327 {
2328 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2329 sizeof(*request), request);
2330 }
2331
2332 static int ipw_send_scan_abort(struct ipw_priv *priv)
2333 {
2334 if (!priv) {
2335 IPW_ERROR("Invalid args\n");
2336 return -1;
2337 }
2338
2339 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2340 }
2341
2342 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2343 {
2344 struct ipw_sensitivity_calib calib = {
2345 .beacon_rssi_raw = cpu_to_le16(sens),
2346 };
2347
2348 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2349 &calib);
2350 }
2351
2352 static int ipw_send_associate(struct ipw_priv *priv,
2353 struct ipw_associate *associate)
2354 {
2355 if (!priv || !associate) {
2356 IPW_ERROR("Invalid args\n");
2357 return -1;
2358 }
2359
2360 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2361 associate);
2362 }
2363
2364 static int ipw_send_supported_rates(struct ipw_priv *priv,
2365 struct ipw_supported_rates *rates)
2366 {
2367 if (!priv || !rates) {
2368 IPW_ERROR("Invalid args\n");
2369 return -1;
2370 }
2371
2372 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2373 rates);
2374 }
2375
2376 static int ipw_set_random_seed(struct ipw_priv *priv)
2377 {
2378 u32 val;
2379
2380 if (!priv) {
2381 IPW_ERROR("Invalid args\n");
2382 return -1;
2383 }
2384
2385 get_random_bytes(&val, sizeof(val));
2386
2387 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2388 }
2389
2390 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2391 {
2392 __le32 v = cpu_to_le32(phy_off);
2393 if (!priv) {
2394 IPW_ERROR("Invalid args\n");
2395 return -1;
2396 }
2397
2398 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2399 }
2400
2401 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2402 {
2403 if (!priv || !power) {
2404 IPW_ERROR("Invalid args\n");
2405 return -1;
2406 }
2407
2408 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2409 }
2410
2411 static int ipw_set_tx_power(struct ipw_priv *priv)
2412 {
2413 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2414 struct ipw_tx_power tx_power;
2415 s8 max_power;
2416 int i;
2417
2418 memset(&tx_power, 0, sizeof(tx_power));
2419
2420 /* configure device for 'G' band */
2421 tx_power.ieee_mode = IPW_G_MODE;
2422 tx_power.num_channels = geo->bg_channels;
2423 for (i = 0; i < geo->bg_channels; i++) {
2424 max_power = geo->bg[i].max_power;
2425 tx_power.channels_tx_power[i].channel_number =
2426 geo->bg[i].channel;
2427 tx_power.channels_tx_power[i].tx_power = max_power ?
2428 min(max_power, priv->tx_power) : priv->tx_power;
2429 }
2430 if (ipw_send_tx_power(priv, &tx_power))
2431 return -EIO;
2432
2433 /* configure device to also handle 'B' band */
2434 tx_power.ieee_mode = IPW_B_MODE;
2435 if (ipw_send_tx_power(priv, &tx_power))
2436 return -EIO;
2437
2438 /* configure device to also handle 'A' band */
2439 if (priv->ieee->abg_true) {
2440 tx_power.ieee_mode = IPW_A_MODE;
2441 tx_power.num_channels = geo->a_channels;
2442 for (i = 0; i < tx_power.num_channels; i++) {
2443 max_power = geo->a[i].max_power;
2444 tx_power.channels_tx_power[i].channel_number =
2445 geo->a[i].channel;
2446 tx_power.channels_tx_power[i].tx_power = max_power ?
2447 min(max_power, priv->tx_power) : priv->tx_power;
2448 }
2449 if (ipw_send_tx_power(priv, &tx_power))
2450 return -EIO;
2451 }
2452 return 0;
2453 }
2454
2455 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2456 {
2457 struct ipw_rts_threshold rts_threshold = {
2458 .rts_threshold = cpu_to_le16(rts),
2459 };
2460
2461 if (!priv) {
2462 IPW_ERROR("Invalid args\n");
2463 return -1;
2464 }
2465
2466 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2467 sizeof(rts_threshold), &rts_threshold);
2468 }
2469
2470 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2471 {
2472 struct ipw_frag_threshold frag_threshold = {
2473 .frag_threshold = cpu_to_le16(frag),
2474 };
2475
2476 if (!priv) {
2477 IPW_ERROR("Invalid args\n");
2478 return -1;
2479 }
2480
2481 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2482 sizeof(frag_threshold), &frag_threshold);
2483 }
2484
2485 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2486 {
2487 __le32 param;
2488
2489 if (!priv) {
2490 IPW_ERROR("Invalid args\n");
2491 return -1;
2492 }
2493
2494 /* If on battery, set to 3, if AC set to CAM, else user
2495 * level */
2496 switch (mode) {
2497 case IPW_POWER_BATTERY:
2498 param = cpu_to_le32(IPW_POWER_INDEX_3);
2499 break;
2500 case IPW_POWER_AC:
2501 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2502 break;
2503 default:
2504 param = cpu_to_le32(mode);
2505 break;
2506 }
2507
2508 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2509 &param);
2510 }
2511
2512 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2513 {
2514 struct ipw_retry_limit retry_limit = {
2515 .short_retry_limit = slimit,
2516 .long_retry_limit = llimit
2517 };
2518
2519 if (!priv) {
2520 IPW_ERROR("Invalid args\n");
2521 return -1;
2522 }
2523
2524 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2525 &retry_limit);
2526 }
2527
2528 /*
2529 * The IPW device contains a Microwire compatible EEPROM that stores
2530 * various data like the MAC address. Usually the firmware has exclusive
2531 * access to the eeprom, but during device initialization (before the
2532 * device driver has sent the HostComplete command to the firmware) the
2533 * device driver has read access to the EEPROM by way of indirect addressing
2534 * through a couple of memory mapped registers.
2535 *
2536 * The following is a simplified implementation for pulling data out of the
2537 * the eeprom, along with some helper functions to find information in
2538 * the per device private data's copy of the eeprom.
2539 *
2540 * NOTE: To better understand how these functions work (i.e what is a chip
2541 * select and why do have to keep driving the eeprom clock?), read
2542 * just about any data sheet for a Microwire compatible EEPROM.
2543 */
2544
2545 /* write a 32 bit value into the indirect accessor register */
2546 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2547 {
2548 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2549
2550 /* the eeprom requires some time to complete the operation */
2551 udelay(p->eeprom_delay);
2552
2553 return;
2554 }
2555
2556 /* perform a chip select operation */
2557 static void eeprom_cs(struct ipw_priv *priv)
2558 {
2559 eeprom_write_reg(priv, 0);
2560 eeprom_write_reg(priv, EEPROM_BIT_CS);
2561 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2562 eeprom_write_reg(priv, EEPROM_BIT_CS);
2563 }
2564
2565 /* perform a chip select operation */
2566 static void eeprom_disable_cs(struct ipw_priv *priv)
2567 {
2568 eeprom_write_reg(priv, EEPROM_BIT_CS);
2569 eeprom_write_reg(priv, 0);
2570 eeprom_write_reg(priv, EEPROM_BIT_SK);
2571 }
2572
2573 /* push a single bit down to the eeprom */
2574 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2575 {
2576 int d = (bit ? EEPROM_BIT_DI : 0);
2577 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2578 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2579 }
2580
2581 /* push an opcode followed by an address down to the eeprom */
2582 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2583 {
2584 int i;
2585
2586 eeprom_cs(priv);
2587 eeprom_write_bit(priv, 1);
2588 eeprom_write_bit(priv, op & 2);
2589 eeprom_write_bit(priv, op & 1);
2590 for (i = 7; i >= 0; i--) {
2591 eeprom_write_bit(priv, addr & (1 << i));
2592 }
2593 }
2594
2595 /* pull 16 bits off the eeprom, one bit at a time */
2596 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2597 {
2598 int i;
2599 u16 r = 0;
2600
2601 /* Send READ Opcode */
2602 eeprom_op(priv, EEPROM_CMD_READ, addr);
2603
2604 /* Send dummy bit */
2605 eeprom_write_reg(priv, EEPROM_BIT_CS);
2606
2607 /* Read the byte off the eeprom one bit at a time */
2608 for (i = 0; i < 16; i++) {
2609 u32 data = 0;
2610 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2611 eeprom_write_reg(priv, EEPROM_BIT_CS);
2612 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2613 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2614 }
2615
2616 /* Send another dummy bit */
2617 eeprom_write_reg(priv, 0);
2618 eeprom_disable_cs(priv);
2619
2620 return r;
2621 }
2622
2623 /* helper function for pulling the mac address out of the private */
2624 /* data's copy of the eeprom data */
2625 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2626 {
2627 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2628 }
2629
2630 /*
2631 * Either the device driver (i.e. the host) or the firmware can
2632 * load eeprom data into the designated region in SRAM. If neither
2633 * happens then the FW will shutdown with a fatal error.
2634 *
2635 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2636 * bit needs region of shared SRAM needs to be non-zero.
2637 */
2638 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2639 {
2640 int i;
2641 __le16 *eeprom = (__le16 *) priv->eeprom;
2642
2643 IPW_DEBUG_TRACE(">>\n");
2644
2645 /* read entire contents of eeprom into private buffer */
2646 for (i = 0; i < 128; i++)
2647 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2648
2649 /*
2650 If the data looks correct, then copy it to our private
2651 copy. Otherwise let the firmware know to perform the operation
2652 on its own.
2653 */
2654 if (priv->eeprom[EEPROM_VERSION] != 0) {
2655 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2656
2657 /* write the eeprom data to sram */
2658 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2659 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2660
2661 /* Do not load eeprom data on fatal error or suspend */
2662 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2663 } else {
2664 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2665
2666 /* Load eeprom data on fatal error or suspend */
2667 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2668 }
2669
2670 IPW_DEBUG_TRACE("<<\n");
2671 }
2672
2673 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2674 {
2675 count >>= 2;
2676 if (!count)
2677 return;
2678 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2679 while (count--)
2680 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2681 }
2682
2683 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2684 {
2685 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2686 CB_NUMBER_OF_ELEMENTS_SMALL *
2687 sizeof(struct command_block));
2688 }
2689
2690 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2691 { /* start dma engine but no transfers yet */
2692
2693 IPW_DEBUG_FW(">> : \n");
2694
2695 /* Start the dma */
2696 ipw_fw_dma_reset_command_blocks(priv);
2697
2698 /* Write CB base address */
2699 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2700
2701 IPW_DEBUG_FW("<< : \n");
2702 return 0;
2703 }
2704
2705 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2706 {
2707 u32 control = 0;
2708
2709 IPW_DEBUG_FW(">> :\n");
2710
2711 /* set the Stop and Abort bit */
2712 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2713 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2714 priv->sram_desc.last_cb_index = 0;
2715
2716 IPW_DEBUG_FW("<< \n");
2717 }
2718
2719 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2720 struct command_block *cb)
2721 {
2722 u32 address =
2723 IPW_SHARED_SRAM_DMA_CONTROL +
2724 (sizeof(struct command_block) * index);
2725 IPW_DEBUG_FW(">> :\n");
2726
2727 ipw_write_indirect(priv, address, (u8 *) cb,
2728 (int)sizeof(struct command_block));
2729
2730 IPW_DEBUG_FW("<< :\n");
2731 return 0;
2732
2733 }
2734
2735 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2736 {
2737 u32 control = 0;
2738 u32 index = 0;
2739
2740 IPW_DEBUG_FW(">> :\n");
2741
2742 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2743 ipw_fw_dma_write_command_block(priv, index,
2744 &priv->sram_desc.cb_list[index]);
2745
2746 /* Enable the DMA in the CSR register */
2747 ipw_clear_bit(priv, IPW_RESET_REG,
2748 IPW_RESET_REG_MASTER_DISABLED |
2749 IPW_RESET_REG_STOP_MASTER);
2750
2751 /* Set the Start bit. */
2752 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2753 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2754
2755 IPW_DEBUG_FW("<< :\n");
2756 return 0;
2757 }
2758
2759 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2760 {
2761 u32 address;
2762 u32 register_value = 0;
2763 u32 cb_fields_address = 0;
2764
2765 IPW_DEBUG_FW(">> :\n");
2766 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2767 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2768
2769 /* Read the DMA Controlor register */
2770 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2771 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2772
2773 /* Print the CB values */
2774 cb_fields_address = address;
2775 register_value = ipw_read_reg32(priv, cb_fields_address);
2776 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2777
2778 cb_fields_address += sizeof(u32);
2779 register_value = ipw_read_reg32(priv, cb_fields_address);
2780 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2781
2782 cb_fields_address += sizeof(u32);
2783 register_value = ipw_read_reg32(priv, cb_fields_address);
2784 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2785 register_value);
2786
2787 cb_fields_address += sizeof(u32);
2788 register_value = ipw_read_reg32(priv, cb_fields_address);
2789 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2790
2791 IPW_DEBUG_FW(">> :\n");
2792 }
2793
2794 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2795 {
2796 u32 current_cb_address = 0;
2797 u32 current_cb_index = 0;
2798
2799 IPW_DEBUG_FW("<< :\n");
2800 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2801
2802 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2803 sizeof(struct command_block);
2804
2805 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2806 current_cb_index, current_cb_address);
2807
2808 IPW_DEBUG_FW(">> :\n");
2809 return current_cb_index;
2810
2811 }
2812
2813 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2814 u32 src_address,
2815 u32 dest_address,
2816 u32 length,
2817 int interrupt_enabled, int is_last)
2818 {
2819
2820 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2821 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2822 CB_DEST_SIZE_LONG;
2823 struct command_block *cb;
2824 u32 last_cb_element = 0;
2825
2826 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2827 src_address, dest_address, length);
2828
2829 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2830 return -1;
2831
2832 last_cb_element = priv->sram_desc.last_cb_index;
2833 cb = &priv->sram_desc.cb_list[last_cb_element];
2834 priv->sram_desc.last_cb_index++;
2835
2836 /* Calculate the new CB control word */
2837 if (interrupt_enabled)
2838 control |= CB_INT_ENABLED;
2839
2840 if (is_last)
2841 control |= CB_LAST_VALID;
2842
2843 control |= length;
2844
2845 /* Calculate the CB Element's checksum value */
2846 cb->status = control ^ src_address ^ dest_address;
2847
2848 /* Copy the Source and Destination addresses */
2849 cb->dest_addr = dest_address;
2850 cb->source_addr = src_address;
2851
2852 /* Copy the Control Word last */
2853 cb->control = control;
2854
2855 return 0;
2856 }
2857
2858 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2859 u32 src_phys, u32 dest_address, u32 length)
2860 {
2861 u32 bytes_left = length;
2862 u32 src_offset = 0;
2863 u32 dest_offset = 0;
2864 int status = 0;
2865 IPW_DEBUG_FW(">> \n");
2866 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2867 src_phys, dest_address, length);
2868 while (bytes_left > CB_MAX_LENGTH) {
2869 status = ipw_fw_dma_add_command_block(priv,
2870 src_phys + src_offset,
2871 dest_address +
2872 dest_offset,
2873 CB_MAX_LENGTH, 0, 0);
2874 if (status) {
2875 IPW_DEBUG_FW_INFO(": Failed\n");
2876 return -1;
2877 } else
2878 IPW_DEBUG_FW_INFO(": Added new cb\n");
2879
2880 src_offset += CB_MAX_LENGTH;
2881 dest_offset += CB_MAX_LENGTH;
2882 bytes_left -= CB_MAX_LENGTH;
2883 }
2884
2885 /* add the buffer tail */
2886 if (bytes_left > 0) {
2887 status =
2888 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2889 dest_address + dest_offset,
2890 bytes_left, 0, 0);
2891 if (status) {
2892 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2893 return -1;
2894 } else
2895 IPW_DEBUG_FW_INFO
2896 (": Adding new cb - the buffer tail\n");
2897 }
2898
2899 IPW_DEBUG_FW("<< \n");
2900 return 0;
2901 }
2902
2903 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2904 {
2905 u32 current_index = 0, previous_index;
2906 u32 watchdog = 0;
2907
2908 IPW_DEBUG_FW(">> : \n");
2909
2910 current_index = ipw_fw_dma_command_block_index(priv);
2911 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2912 (int)priv->sram_desc.last_cb_index);
2913
2914 while (current_index < priv->sram_desc.last_cb_index) {
2915 udelay(50);
2916 previous_index = current_index;
2917 current_index = ipw_fw_dma_command_block_index(priv);
2918
2919 if (previous_index < current_index) {
2920 watchdog = 0;
2921 continue;
2922 }
2923 if (++watchdog > 400) {
2924 IPW_DEBUG_FW_INFO("Timeout\n");
2925 ipw_fw_dma_dump_command_block(priv);
2926 ipw_fw_dma_abort(priv);
2927 return -1;
2928 }
2929 }
2930
2931 ipw_fw_dma_abort(priv);
2932
2933 /*Disable the DMA in the CSR register */
2934 ipw_set_bit(priv, IPW_RESET_REG,
2935 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2936
2937 IPW_DEBUG_FW("<< dmaWaitSync \n");
2938 return 0;
2939 }
2940
2941 static void ipw_remove_current_network(struct ipw_priv *priv)
2942 {
2943 struct list_head *element, *safe;
2944 struct ieee80211_network *network = NULL;
2945 unsigned long flags;
2946
2947 spin_lock_irqsave(&priv->ieee->lock, flags);
2948 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2949 network = list_entry(element, struct ieee80211_network, list);
2950 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2951 list_del(element);
2952 list_add_tail(&network->list,
2953 &priv->ieee->network_free_list);
2954 }
2955 }
2956 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2957 }
2958
2959 /**
2960 * Check that card is still alive.
2961 * Reads debug register from domain0.
2962 * If card is present, pre-defined value should
2963 * be found there.
2964 *
2965 * @param priv
2966 * @return 1 if card is present, 0 otherwise
2967 */
2968 static inline int ipw_alive(struct ipw_priv *priv)
2969 {
2970 return ipw_read32(priv, 0x90) == 0xd55555d5;
2971 }
2972
2973 /* timeout in msec, attempted in 10-msec quanta */
2974 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2975 int timeout)
2976 {
2977 int i = 0;
2978
2979 do {
2980 if ((ipw_read32(priv, addr) & mask) == mask)
2981 return i;
2982 mdelay(10);
2983 i += 10;
2984 } while (i < timeout);
2985
2986 return -ETIME;
2987 }
2988
2989 /* These functions load the firmware and micro code for the operation of
2990 * the ipw hardware. It assumes the buffer has all the bits for the
2991 * image and the caller is handling the memory allocation and clean up.
2992 */
2993
2994 static int ipw_stop_master(struct ipw_priv *priv)
2995 {
2996 int rc;
2997
2998 IPW_DEBUG_TRACE(">> \n");
2999 /* stop master. typical delay - 0 */
3000 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3001
3002 /* timeout is in msec, polled in 10-msec quanta */
3003 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3004 IPW_RESET_REG_MASTER_DISABLED, 100);
3005 if (rc < 0) {
3006 IPW_ERROR("wait for stop master failed after 100ms\n");
3007 return -1;
3008 }
3009
3010 IPW_DEBUG_INFO("stop master %dms\n", rc);
3011
3012 return rc;
3013 }
3014
3015 static void ipw_arc_release(struct ipw_priv *priv)
3016 {
3017 IPW_DEBUG_TRACE(">> \n");
3018 mdelay(5);
3019
3020 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3021
3022 /* no one knows timing, for safety add some delay */
3023 mdelay(5);
3024 }
3025
3026 struct fw_chunk {
3027 __le32 address;
3028 __le32 length;
3029 };
3030
3031 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3032 {
3033 int rc = 0, i, addr;
3034 u8 cr = 0;
3035 __le16 *image;
3036
3037 image = (__le16 *) data;
3038
3039 IPW_DEBUG_TRACE(">> \n");
3040
3041 rc = ipw_stop_master(priv);
3042
3043 if (rc < 0)
3044 return rc;
3045
3046 for (addr = IPW_SHARED_LOWER_BOUND;
3047 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3048 ipw_write32(priv, addr, 0);
3049 }
3050
3051 /* no ucode (yet) */
3052 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3053 /* destroy DMA queues */
3054 /* reset sequence */
3055
3056 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3057 ipw_arc_release(priv);
3058 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3059 mdelay(1);
3060
3061 /* reset PHY */
3062 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3063 mdelay(1);
3064
3065 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3066 mdelay(1);
3067
3068 /* enable ucode store */
3069 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3070 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3071 mdelay(1);
3072
3073 /* write ucode */
3074 /**
3075 * @bug
3076 * Do NOT set indirect address register once and then
3077 * store data to indirect data register in the loop.
3078 * It seems very reasonable, but in this case DINO do not
3079 * accept ucode. It is essential to set address each time.
3080 */
3081 /* load new ipw uCode */
3082 for (i = 0; i < len / 2; i++)
3083 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3084 le16_to_cpu(image[i]));
3085
3086 /* enable DINO */
3087 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3088 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3089
3090 /* this is where the igx / win driver deveates from the VAP driver. */
3091
3092 /* wait for alive response */
3093 for (i = 0; i < 100; i++) {
3094 /* poll for incoming data */
3095 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3096 if (cr & DINO_RXFIFO_DATA)
3097 break;
3098 mdelay(1);
3099 }
3100
3101 if (cr & DINO_RXFIFO_DATA) {
3102 /* alive_command_responce size is NOT multiple of 4 */
3103 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3104
3105 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3106 response_buffer[i] =
3107 cpu_to_le32(ipw_read_reg32(priv,
3108 IPW_BASEBAND_RX_FIFO_READ));
3109 memcpy(&priv->dino_alive, response_buffer,
3110 sizeof(priv->dino_alive));
3111 if (priv->dino_alive.alive_command == 1
3112 && priv->dino_alive.ucode_valid == 1) {
3113 rc = 0;
3114 IPW_DEBUG_INFO
3115 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3116 "of %02d/%02d/%02d %02d:%02d\n",
3117 priv->dino_alive.software_revision,
3118 priv->dino_alive.software_revision,
3119 priv->dino_alive.device_identifier,
3120 priv->dino_alive.device_identifier,
3121 priv->dino_alive.time_stamp[0],
3122 priv->dino_alive.time_stamp[1],
3123 priv->dino_alive.time_stamp[2],
3124 priv->dino_alive.time_stamp[3],
3125 priv->dino_alive.time_stamp[4]);
3126 } else {
3127 IPW_DEBUG_INFO("Microcode is not alive\n");
3128 rc = -EINVAL;
3129 }
3130 } else {
3131 IPW_DEBUG_INFO("No alive response from DINO\n");
3132 rc = -ETIME;
3133 }
3134
3135 /* disable DINO, otherwise for some reason
3136 firmware have problem getting alive resp. */
3137 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3138
3139 return rc;
3140 }
3141
3142 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3143 {
3144 int rc = -1;
3145 int offset = 0;
3146 struct fw_chunk *chunk;
3147 dma_addr_t shared_phys;
3148 u8 *shared_virt;
3149
3150 IPW_DEBUG_TRACE("<< : \n");
3151 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3152
3153 if (!shared_virt)
3154 return -ENOMEM;
3155
3156 memmove(shared_virt, data, len);
3157
3158 /* Start the Dma */
3159 rc = ipw_fw_dma_enable(priv);
3160
3161 if (priv->sram_desc.last_cb_index > 0) {
3162 /* the DMA is already ready this would be a bug. */
3163 BUG();
3164 goto out;
3165 }
3166
3167 do {
3168 chunk = (struct fw_chunk *)(data + offset);
3169 offset += sizeof(struct fw_chunk);
3170 /* build DMA packet and queue up for sending */
3171 /* dma to chunk->address, the chunk->length bytes from data +
3172 * offeset*/
3173 /* Dma loading */
3174 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3175 le32_to_cpu(chunk->address),
3176 le32_to_cpu(chunk->length));
3177 if (rc) {
3178 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3179 goto out;
3180 }
3181
3182 offset += le32_to_cpu(chunk->length);
3183 } while (offset < len);
3184
3185 /* Run the DMA and wait for the answer */
3186 rc = ipw_fw_dma_kick(priv);
3187 if (rc) {
3188 IPW_ERROR("dmaKick Failed\n");
3189 goto out;
3190 }
3191
3192 rc = ipw_fw_dma_wait(priv);
3193 if (rc) {
3194 IPW_ERROR("dmaWaitSync Failed\n");
3195 goto out;
3196 }
3197 out:
3198 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3199 return rc;
3200 }
3201
3202 /* stop nic */
3203 static int ipw_stop_nic(struct ipw_priv *priv)
3204 {
3205 int rc = 0;
3206
3207 /* stop */
3208 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3209
3210 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3211 IPW_RESET_REG_MASTER_DISABLED, 500);
3212 if (rc < 0) {
3213 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3214 return rc;
3215 }
3216
3217 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3218
3219 return rc;
3220 }
3221
3222 static void ipw_start_nic(struct ipw_priv *priv)
3223 {
3224 IPW_DEBUG_TRACE(">>\n");
3225
3226 /* prvHwStartNic release ARC */
3227 ipw_clear_bit(priv, IPW_RESET_REG,
3228 IPW_RESET_REG_MASTER_DISABLED |
3229 IPW_RESET_REG_STOP_MASTER |
3230 CBD_RESET_REG_PRINCETON_RESET);
3231
3232 /* enable power management */
3233 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3234 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3235
3236 IPW_DEBUG_TRACE("<<\n");
3237 }
3238
3239 static int ipw_init_nic(struct ipw_priv *priv)
3240 {
3241 int rc;
3242
3243 IPW_DEBUG_TRACE(">>\n");
3244 /* reset */
3245 /*prvHwInitNic */
3246 /* set "initialization complete" bit to move adapter to D0 state */
3247 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3248
3249 /* low-level PLL activation */
3250 ipw_write32(priv, IPW_READ_INT_REGISTER,
3251 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3252
3253 /* wait for clock stabilization */
3254 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3255 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3256 if (rc < 0)
3257 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3258
3259 /* assert SW reset */
3260 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3261
3262 udelay(10);
3263
3264 /* set "initialization complete" bit to move adapter to D0 state */
3265 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3266
3267 IPW_DEBUG_TRACE(">>\n");
3268 return 0;
3269 }
3270
3271 /* Call this function from process context, it will sleep in request_firmware.
3272 * Probe is an ok place to call this from.
3273 */
3274 static int ipw_reset_nic(struct ipw_priv *priv)
3275 {
3276 int rc = 0;
3277 unsigned long flags;
3278
3279 IPW_DEBUG_TRACE(">>\n");
3280
3281 rc = ipw_init_nic(priv);
3282
3283 spin_lock_irqsave(&priv->lock, flags);
3284 /* Clear the 'host command active' bit... */
3285 priv->status &= ~STATUS_HCMD_ACTIVE;
3286 wake_up_interruptible(&priv->wait_command_queue);
3287 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3288 wake_up_interruptible(&priv->wait_state);
3289 spin_unlock_irqrestore(&priv->lock, flags);
3290
3291 IPW_DEBUG_TRACE("<<\n");
3292 return rc;
3293 }
3294
3295
3296 struct ipw_fw {
3297 __le32 ver;
3298 __le32 boot_size;
3299 __le32 ucode_size;
3300 __le32 fw_size;
3301 u8 data[0];
3302 };
3303
3304 static int ipw_get_fw(struct ipw_priv *priv,
3305 const struct firmware **raw, const char *name)
3306 {
3307 struct ipw_fw *fw;
3308 int rc;
3309
3310 /* ask firmware_class module to get the boot firmware off disk */
3311 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3312 if (rc < 0) {
3313 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3314 return rc;
3315 }
3316
3317 if ((*raw)->size < sizeof(*fw)) {
3318 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3319 return -EINVAL;
3320 }
3321
3322 fw = (void *)(*raw)->data;
3323
3324 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3325 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3326 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3327 name, (*raw)->size);
3328 return -EINVAL;
3329 }
3330
3331 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3332 name,
3333 le32_to_cpu(fw->ver) >> 16,
3334 le32_to_cpu(fw->ver) & 0xff,
3335 (*raw)->size - sizeof(*fw));
3336 return 0;
3337 }
3338
3339 #define IPW_RX_BUF_SIZE (3000)
3340
3341 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3342 struct ipw_rx_queue *rxq)
3343 {
3344 unsigned long flags;
3345 int i;
3346
3347 spin_lock_irqsave(&rxq->lock, flags);
3348
3349 INIT_LIST_HEAD(&rxq->rx_free);
3350 INIT_LIST_HEAD(&rxq->rx_used);
3351
3352 /* Fill the rx_used queue with _all_ of the Rx buffers */
3353 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3354 /* In the reset function, these buffers may have been allocated
3355 * to an SKB, so we need to unmap and free potential storage */
3356 if (rxq->pool[i].skb != NULL) {
3357 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3358 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3359 dev_kfree_skb(rxq->pool[i].skb);
3360 rxq->pool[i].skb = NULL;
3361 }
3362 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3363 }
3364
3365 /* Set us so that we have processed and used all buffers, but have
3366 * not restocked the Rx queue with fresh buffers */
3367 rxq->read = rxq->write = 0;
3368 rxq->processed = RX_QUEUE_SIZE - 1;
3369 rxq->free_count = 0;
3370 spin_unlock_irqrestore(&rxq->lock, flags);
3371 }
3372
3373 #ifdef CONFIG_PM
3374 static int fw_loaded = 0;
3375 static const struct firmware *raw = NULL;
3376
3377 static void free_firmware(void)
3378 {
3379 if (fw_loaded) {
3380 release_firmware(raw);
3381 raw = NULL;
3382 fw_loaded = 0;
3383 }
3384 }
3385 #else
3386 #define free_firmware() do {} while (0)
3387 #endif
3388
3389 static int ipw_load(struct ipw_priv *priv)
3390 {
3391 #ifndef CONFIG_PM
3392 const struct firmware *raw = NULL;
3393 #endif
3394 struct ipw_fw *fw;
3395 u8 *boot_img, *ucode_img, *fw_img;
3396 u8 *name = NULL;
3397 int rc = 0, retries = 3;
3398
3399 switch (priv->ieee->iw_mode) {
3400 case IW_MODE_ADHOC:
3401 name = "ipw2200-ibss.fw";
3402 break;
3403 #ifdef CONFIG_IPW2200_MONITOR
3404 case IW_MODE_MONITOR:
3405 name = "ipw2200-sniffer.fw";
3406 break;
3407 #endif
3408 case IW_MODE_INFRA:
3409 name = "ipw2200-bss.fw";
3410 break;
3411 }
3412
3413 if (!name) {
3414 rc = -EINVAL;
3415 goto error;
3416 }
3417
3418 #ifdef CONFIG_PM
3419 if (!fw_loaded) {
3420 #endif
3421 rc = ipw_get_fw(priv, &raw, name);
3422 if (rc < 0)
3423 goto error;
3424 #ifdef CONFIG_PM
3425 }
3426 #endif
3427
3428 fw = (void *)raw->data;
3429 boot_img = &fw->data[0];
3430 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3431 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3432 le32_to_cpu(fw->ucode_size)];
3433
3434 if (rc < 0)
3435 goto error;
3436
3437 if (!priv->rxq)
3438 priv->rxq = ipw_rx_queue_alloc(priv);
3439 else
3440 ipw_rx_queue_reset(priv, priv->rxq);
3441 if (!priv->rxq) {
3442 IPW_ERROR("Unable to initialize Rx queue\n");
3443 goto error;
3444 }
3445
3446 retry:
3447 /* Ensure interrupts are disabled */
3448 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3449 priv->status &= ~STATUS_INT_ENABLED;
3450
3451 /* ack pending interrupts */
3452 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3453
3454 ipw_stop_nic(priv);
3455
3456 rc = ipw_reset_nic(priv);
3457 if (rc < 0) {
3458 IPW_ERROR("Unable to reset NIC\n");
3459 goto error;
3460 }
3461
3462 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3463 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3464
3465 /* DMA the initial boot firmware into the device */
3466 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3467 if (rc < 0) {
3468 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3469 goto error;
3470 }
3471
3472 /* kick start the device */
3473 ipw_start_nic(priv);
3474
3475 /* wait for the device to finish its initial startup sequence */
3476 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3477 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3478 if (rc < 0) {
3479 IPW_ERROR("device failed to boot initial fw image\n");
3480 goto error;
3481 }
3482 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3483
3484 /* ack fw init done interrupt */
3485 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3486
3487 /* DMA the ucode into the device */
3488 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3489 if (rc < 0) {
3490 IPW_ERROR("Unable to load ucode: %d\n", rc);
3491 goto error;
3492 }
3493
3494 /* stop nic */
3495 ipw_stop_nic(priv);
3496
3497 /* DMA bss firmware into the device */
3498 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3499 if (rc < 0) {
3500 IPW_ERROR("Unable to load firmware: %d\n", rc);
3501 goto error;
3502 }
3503 #ifdef CONFIG_PM
3504 fw_loaded = 1;
3505 #endif
3506
3507 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3508
3509 rc = ipw_queue_reset(priv);
3510 if (rc < 0) {
3511 IPW_ERROR("Unable to initialize queues\n");
3512 goto error;
3513 }
3514
3515 /* Ensure interrupts are disabled */
3516 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3517 /* ack pending interrupts */
3518 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3519
3520 /* kick start the device */
3521 ipw_start_nic(priv);
3522
3523 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3524 if (retries > 0) {
3525 IPW_WARNING("Parity error. Retrying init.\n");
3526 retries--;
3527 goto retry;
3528 }
3529
3530 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3531 rc = -EIO;
3532 goto error;
3533 }
3534
3535 /* wait for the device */
3536 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3537 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3538 if (rc < 0) {
3539 IPW_ERROR("device failed to start within 500ms\n");
3540 goto error;
3541 }
3542 IPW_DEBUG_INFO("device response after %dms\n", rc);
3543
3544 /* ack fw init done interrupt */
3545 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3546
3547 /* read eeprom data and initialize the eeprom region of sram */
3548 priv->eeprom_delay = 1;
3549 ipw_eeprom_init_sram(priv);
3550
3551 /* enable interrupts */
3552 ipw_enable_interrupts(priv);
3553
3554 /* Ensure our queue has valid packets */
3555 ipw_rx_queue_replenish(priv);
3556
3557 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3558
3559 /* ack pending interrupts */
3560 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3561
3562 #ifndef CONFIG_PM
3563 release_firmware(raw);
3564 #endif
3565 return 0;
3566
3567 error:
3568 if (priv->rxq) {
3569 ipw_rx_queue_free(priv, priv->rxq);
3570 priv->rxq = NULL;
3571 }
3572 ipw_tx_queue_free(priv);
3573 if (raw)
3574 release_firmware(raw);
3575 #ifdef CONFIG_PM
3576 fw_loaded = 0;
3577 raw = NULL;
3578 #endif
3579
3580 return rc;
3581 }
3582
3583 /**
3584 * DMA services
3585 *
3586 * Theory of operation
3587 *
3588 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3589 * 2 empty entries always kept in the buffer to protect from overflow.
3590 *
3591 * For Tx queue, there are low mark and high mark limits. If, after queuing
3592 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3593 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3594 * Tx queue resumed.
3595 *
3596 * The IPW operates with six queues, one receive queue in the device's
3597 * sram, one transmit queue for sending commands to the device firmware,
3598 * and four transmit queues for data.
3599 *
3600 * The four transmit queues allow for performing quality of service (qos)
3601 * transmissions as per the 802.11 protocol. Currently Linux does not
3602 * provide a mechanism to the user for utilizing prioritized queues, so
3603 * we only utilize the first data transmit queue (queue1).
3604 */
3605
3606 /**
3607 * Driver allocates buffers of this size for Rx
3608 */
3609
3610 static inline int ipw_queue_space(const struct clx2_queue *q)
3611 {
3612 int s = q->last_used - q->first_empty;
3613 if (s <= 0)
3614 s += q->n_bd;
3615 s -= 2; /* keep some reserve to not confuse empty and full situations */
3616 if (s < 0)
3617 s = 0;
3618 return s;
3619 }
3620
3621 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3622 {
3623 return (++index == n_bd) ? 0 : index;
3624 }
3625
3626 /**
3627 * Initialize common DMA queue structure
3628 *
3629 * @param q queue to init
3630 * @param count Number of BD's to allocate. Should be power of 2
3631 * @param read_register Address for 'read' register
3632 * (not offset within BAR, full address)
3633 * @param write_register Address for 'write' register
3634 * (not offset within BAR, full address)
3635 * @param base_register Address for 'base' register
3636 * (not offset within BAR, full address)
3637 * @param size Address for 'size' register
3638 * (not offset within BAR, full address)
3639 */
3640 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3641 int count, u32 read, u32 write, u32 base, u32 size)
3642 {
3643 q->n_bd = count;
3644
3645 q->low_mark = q->n_bd / 4;
3646 if (q->low_mark < 4)
3647 q->low_mark = 4;
3648
3649 q->high_mark = q->n_bd / 8;
3650 if (q->high_mark < 2)
3651 q->high_mark = 2;
3652
3653 q->first_empty = q->last_used = 0;
3654 q->reg_r = read;
3655 q->reg_w = write;
3656
3657 ipw_write32(priv, base, q->dma_addr);
3658 ipw_write32(priv, size, count);
3659 ipw_write32(priv, read, 0);
3660 ipw_write32(priv, write, 0);
3661
3662 _ipw_read32(priv, 0x90);
3663 }
3664
3665 static int ipw_queue_tx_init(struct ipw_priv *priv,
3666 struct clx2_tx_queue *q,
3667 int count, u32 read, u32 write, u32 base, u32 size)
3668 {
3669 struct pci_dev *dev = priv->pci_dev;
3670
3671 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3672 if (!q->txb) {
3673 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3674 return -ENOMEM;
3675 }
3676
3677 q->bd =
3678 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3679 if (!q->bd) {
3680 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3681 sizeof(q->bd[0]) * count);
3682 kfree(q->txb);
3683 q->txb = NULL;
3684 return -ENOMEM;
3685 }
3686
3687 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3688 return 0;
3689 }
3690
3691 /**
3692 * Free one TFD, those at index [txq->q.last_used].
3693 * Do NOT advance any indexes
3694 *
3695 * @param dev
3696 * @param txq
3697 */
3698 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3699 struct clx2_tx_queue *txq)
3700 {
3701 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3702 struct pci_dev *dev = priv->pci_dev;
3703 int i;
3704
3705 /* classify bd */
3706 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3707 /* nothing to cleanup after for host commands */
3708 return;
3709
3710 /* sanity check */
3711 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3712 IPW_ERROR("Too many chunks: %i\n",
3713 le32_to_cpu(bd->u.data.num_chunks));
3714 /** @todo issue fatal error, it is quite serious situation */
3715 return;
3716 }
3717
3718 /* unmap chunks if any */
3719 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3720 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3721 le16_to_cpu(bd->u.data.chunk_len[i]),
3722 PCI_DMA_TODEVICE);
3723 if (txq->txb[txq->q.last_used]) {
3724 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3725 txq->txb[txq->q.last_used] = NULL;
3726 }
3727 }
3728 }
3729
3730 /**
3731 * Deallocate DMA queue.
3732 *
3733 * Empty queue by removing and destroying all BD's.
3734 * Free all buffers.
3735 *
3736 * @param dev
3737 * @param q
3738 */
3739 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3740 {
3741 struct clx2_queue *q = &txq->q;
3742 struct pci_dev *dev = priv->pci_dev;
3743
3744 if (q->n_bd == 0)
3745 return;
3746
3747 /* first, empty all BD's */
3748 for (; q->first_empty != q->last_used;
3749 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3750 ipw_queue_tx_free_tfd(priv, txq);
3751 }
3752
3753 /* free buffers belonging to queue itself */
3754 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3755 q->dma_addr);
3756 kfree(txq->txb);
3757
3758 /* 0 fill whole structure */
3759 memset(txq, 0, sizeof(*txq));
3760 }
3761
3762 /**
3763 * Destroy all DMA queues and structures
3764 *
3765 * @param priv
3766 */
3767 static void ipw_tx_queue_free(struct ipw_priv *priv)
3768 {
3769 /* Tx CMD queue */
3770 ipw_queue_tx_free(priv, &priv->txq_cmd);
3771
3772 /* Tx queues */
3773 ipw_queue_tx_free(priv, &priv->txq[0]);
3774 ipw_queue_tx_free(priv, &priv->txq[1]);
3775 ipw_queue_tx_free(priv, &priv->txq[2]);
3776 ipw_queue_tx_free(priv, &priv->txq[3]);
3777 }
3778
3779 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3780 {
3781 /* First 3 bytes are manufacturer */
3782 bssid[0] = priv->mac_addr[0];
3783 bssid[1] = priv->mac_addr[1];
3784 bssid[2] = priv->mac_addr[2];
3785
3786 /* Last bytes are random */
3787 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3788
3789 bssid[0] &= 0xfe; /* clear multicast bit */
3790 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3791 }
3792
3793 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3794 {
3795 struct ipw_station_entry entry;
3796 int i;
3797 DECLARE_MAC_BUF(mac);
3798
3799 for (i = 0; i < priv->num_stations; i++) {
3800 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3801 /* Another node is active in network */
3802 priv->missed_adhoc_beacons = 0;
3803 if (!(priv->config & CFG_STATIC_CHANNEL))
3804 /* when other nodes drop out, we drop out */
3805 priv->config &= ~CFG_ADHOC_PERSIST;
3806
3807 return i;
3808 }
3809 }
3810
3811 if (i == MAX_STATIONS)
3812 return IPW_INVALID_STATION;
3813
3814 IPW_DEBUG_SCAN("Adding AdHoc station: %s\n", print_mac(mac, bssid));
3815
3816 entry.reserved = 0;
3817 entry.support_mode = 0;
3818 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3819 memcpy(priv->stations[i], bssid, ETH_ALEN);
3820 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3821 &entry, sizeof(entry));
3822 priv->num_stations++;
3823
3824 return i;
3825 }
3826
3827 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3828 {
3829 int i;
3830
3831 for (i = 0; i < priv->num_stations; i++)
3832 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3833 return i;
3834
3835 return IPW_INVALID_STATION;
3836 }
3837
3838 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3839 {
3840 int err;
3841 DECLARE_MAC_BUF(mac);
3842
3843 if (priv->status & STATUS_ASSOCIATING) {
3844 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3845 queue_work(priv->workqueue, &priv->disassociate);
3846 return;
3847 }
3848
3849 if (!(priv->status & STATUS_ASSOCIATED)) {
3850 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3851 return;
3852 }
3853
3854 IPW_DEBUG_ASSOC("Disassocation attempt from %s "
3855 "on channel %d.\n",
3856 print_mac(mac, priv->assoc_request.bssid),
3857 priv->assoc_request.channel);
3858
3859 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3860 priv->status |= STATUS_DISASSOCIATING;
3861
3862 if (quiet)
3863 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3864 else
3865 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3866
3867 err = ipw_send_associate(priv, &priv->assoc_request);
3868 if (err) {
3869 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3870 "failed.\n");
3871 return;
3872 }
3873
3874 }
3875
3876 static int ipw_disassociate(void *data)
3877 {
3878 struct ipw_priv *priv = data;
3879 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3880 return 0;
3881 ipw_send_disassociate(data, 0);
3882 return 1;
3883 }
3884
3885 static void ipw_bg_disassociate(struct work_struct *work)
3886 {
3887 struct ipw_priv *priv =
3888 container_of(work, struct ipw_priv, disassociate);
3889 mutex_lock(&priv->mutex);
3890 ipw_disassociate(priv);
3891 mutex_unlock(&priv->mutex);
3892 }
3893
3894 static void ipw_system_config(struct work_struct *work)
3895 {
3896 struct ipw_priv *priv =
3897 container_of(work, struct ipw_priv, system_config);
3898
3899 #ifdef CONFIG_IPW2200_PROMISCUOUS
3900 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3901 priv->sys_config.accept_all_data_frames = 1;
3902 priv->sys_config.accept_non_directed_frames = 1;
3903 priv->sys_config.accept_all_mgmt_bcpr = 1;
3904 priv->sys_config.accept_all_mgmt_frames = 1;
3905 }
3906 #endif
3907
3908 ipw_send_system_config(priv);
3909 }
3910
3911 struct ipw_status_code {
3912 u16 status;
3913 const char *reason;
3914 };
3915
3916 static const struct ipw_status_code ipw_status_codes[] = {
3917 {0x00, "Successful"},
3918 {0x01, "Unspecified failure"},
3919 {0x0A, "Cannot support all requested capabilities in the "
3920 "Capability information field"},
3921 {0x0B, "Reassociation denied due to inability to confirm that "
3922 "association exists"},
3923 {0x0C, "Association denied due to reason outside the scope of this "
3924 "standard"},
3925 {0x0D,
3926 "Responding station does not support the specified authentication "
3927 "algorithm"},
3928 {0x0E,
3929 "Received an Authentication frame with authentication sequence "
3930 "transaction sequence number out of expected sequence"},
3931 {0x0F, "Authentication rejected because of challenge failure"},
3932 {0x10, "Authentication rejected due to timeout waiting for next "
3933 "frame in sequence"},
3934 {0x11, "Association denied because AP is unable to handle additional "
3935 "associated stations"},
3936 {0x12,
3937 "Association denied due to requesting station not supporting all "
3938 "of the datarates in the BSSBasicServiceSet Parameter"},
3939 {0x13,
3940 "Association denied due to requesting station not supporting "
3941 "short preamble operation"},
3942 {0x14,
3943 "Association denied due to requesting station not supporting "
3944 "PBCC encoding"},
3945 {0x15,
3946 "Association denied due to requesting station not supporting "
3947 "channel agility"},
3948 {0x19,
3949 "Association denied due to requesting station not supporting "
3950 "short slot operation"},
3951 {0x1A,
3952 "Association denied due to requesting station not supporting "
3953 "DSSS-OFDM operation"},
3954 {0x28, "Invalid Information Element"},
3955 {0x29, "Group Cipher is not valid"},
3956 {0x2A, "Pairwise Cipher is not valid"},
3957 {0x2B, "AKMP is not valid"},
3958 {0x2C, "Unsupported RSN IE version"},
3959 {0x2D, "Invalid RSN IE Capabilities"},
3960 {0x2E, "Cipher suite is rejected per security policy"},
3961 };
3962
3963 static const char *ipw_get_status_code(u16 status)
3964 {
3965 int i;
3966 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3967 if (ipw_status_codes[i].status == (status & 0xff))
3968 return ipw_status_codes[i].reason;
3969 return "Unknown status value.";
3970 }
3971
3972 static void inline average_init(struct average *avg)
3973 {
3974 memset(avg, 0, sizeof(*avg));
3975 }
3976
3977 #define DEPTH_RSSI 8
3978 #define DEPTH_NOISE 16
3979 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3980 {
3981 return ((depth-1)*prev_avg + val)/depth;
3982 }
3983
3984 static void average_add(struct average *avg, s16 val)
3985 {
3986 avg->sum -= avg->entries[avg->pos];
3987 avg->sum += val;
3988 avg->entries[avg->pos++] = val;
3989 if (unlikely(avg->pos == AVG_ENTRIES)) {
3990 avg->init = 1;
3991 avg->pos = 0;
3992 }
3993 }
3994
3995 static s16 average_value(struct average *avg)
3996 {
3997 if (!unlikely(avg->init)) {
3998 if (avg->pos)
3999 return avg->sum / avg->pos;
4000 return 0;
4001 }
4002
4003 return avg->sum / AVG_ENTRIES;
4004 }
4005
4006 static void ipw_reset_stats(struct ipw_priv *priv)
4007 {
4008 u32 len = sizeof(u32);
4009
4010 priv->quality = 0;
4011
4012 average_init(&priv->average_missed_beacons);
4013 priv->exp_avg_rssi = -60;
4014 priv->exp_avg_noise = -85 + 0x100;
4015
4016 priv->last_rate = 0;
4017 priv->last_missed_beacons = 0;
4018 priv->last_rx_packets = 0;
4019 priv->last_tx_packets = 0;
4020 priv->last_tx_failures = 0;
4021
4022 /* Firmware managed, reset only when NIC is restarted, so we have to
4023 * normalize on the current value */
4024 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4025 &priv->last_rx_err, &len);
4026 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4027 &priv->last_tx_failures, &len);
4028
4029 /* Driver managed, reset with each association */
4030 priv->missed_adhoc_beacons = 0;
4031 priv->missed_beacons = 0;
4032 priv->tx_packets = 0;
4033 priv->rx_packets = 0;
4034
4035 }
4036
4037 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4038 {
4039 u32 i = 0x80000000;
4040 u32 mask = priv->rates_mask;
4041 /* If currently associated in B mode, restrict the maximum
4042 * rate match to B rates */
4043 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4044 mask &= IEEE80211_CCK_RATES_MASK;
4045
4046 /* TODO: Verify that the rate is supported by the current rates
4047 * list. */
4048
4049 while (i && !(mask & i))
4050 i >>= 1;
4051 switch (i) {
4052 case IEEE80211_CCK_RATE_1MB_MASK:
4053 return 1000000;
4054 case IEEE80211_CCK_RATE_2MB_MASK:
4055 return 2000000;
4056 case IEEE80211_CCK_RATE_5MB_MASK:
4057 return 5500000;
4058 case IEEE80211_OFDM_RATE_6MB_MASK:
4059 return 6000000;
4060 case IEEE80211_OFDM_RATE_9MB_MASK:
4061 return 9000000;
4062 case IEEE80211_CCK_RATE_11MB_MASK:
4063 return 11000000;
4064 case IEEE80211_OFDM_RATE_12MB_MASK:
4065 return 12000000;
4066 case IEEE80211_OFDM_RATE_18MB_MASK:
4067 return 18000000;
4068 case IEEE80211_OFDM_RATE_24MB_MASK:
4069 return 24000000;
4070 case IEEE80211_OFDM_RATE_36MB_MASK:
4071 return 36000000;
4072 case IEEE80211_OFDM_RATE_48MB_MASK:
4073 return 48000000;
4074 case IEEE80211_OFDM_RATE_54MB_MASK:
4075 return 54000000;
4076 }
4077
4078 if (priv->ieee->mode == IEEE_B)
4079 return 11000000;
4080 else
4081 return 54000000;
4082 }
4083
4084 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4085 {
4086 u32 rate, len = sizeof(rate);
4087 int err;
4088
4089 if (!(priv->status & STATUS_ASSOCIATED))
4090 return 0;
4091
4092 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4093 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4094 &len);
4095 if (err) {
4096 IPW_DEBUG_INFO("failed querying ordinals.\n");
4097 return 0;
4098 }
4099 } else
4100 return ipw_get_max_rate(priv);
4101
4102 switch (rate) {
4103 case IPW_TX_RATE_1MB:
4104 return 1000000;
4105 case IPW_TX_RATE_2MB:
4106 return 2000000;
4107 case IPW_TX_RATE_5MB:
4108 return 5500000;
4109 case IPW_TX_RATE_6MB:
4110 return 6000000;
4111 case IPW_TX_RATE_9MB:
4112 return 9000000;
4113 case IPW_TX_RATE_11MB:
4114 return 11000000;
4115 case IPW_TX_RATE_12MB:
4116 return 12000000;
4117 case IPW_TX_RATE_18MB:
4118 return 18000000;
4119 case IPW_TX_RATE_24MB:
4120 return 24000000;
4121 case IPW_TX_RATE_36MB:
4122 return 36000000;
4123 case IPW_TX_RATE_48MB:
4124 return 48000000;
4125 case IPW_TX_RATE_54MB:
4126 return 54000000;
4127 }
4128
4129 return 0;
4130 }
4131
4132 #define IPW_STATS_INTERVAL (2 * HZ)
4133 static void ipw_gather_stats(struct ipw_priv *priv)
4134 {
4135 u32 rx_err, rx_err_delta, rx_packets_delta;
4136 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4137 u32 missed_beacons_percent, missed_beacons_delta;
4138 u32 quality = 0;
4139 u32 len = sizeof(u32);
4140 s16 rssi;
4141 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4142 rate_quality;
4143 u32 max_rate;
4144
4145 if (!(priv->status & STATUS_ASSOCIATED)) {
4146 priv->quality = 0;
4147 return;
4148 }
4149
4150 /* Update the statistics */
4151 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4152 &priv->missed_beacons, &len);
4153 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4154 priv->last_missed_beacons = priv->missed_beacons;
4155 if (priv->assoc_request.beacon_interval) {
4156 missed_beacons_percent = missed_beacons_delta *
4157 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4158 (IPW_STATS_INTERVAL * 10);
4159 } else {
4160 missed_beacons_percent = 0;
4161 }
4162 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4163
4164 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4165 rx_err_delta = rx_err - priv->last_rx_err;
4166 priv->last_rx_err = rx_err;
4167
4168 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4169 tx_failures_delta = tx_failures - priv->last_tx_failures;
4170 priv->last_tx_failures = tx_failures;
4171
4172 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4173 priv->last_rx_packets = priv->rx_packets;
4174
4175 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4176 priv->last_tx_packets = priv->tx_packets;
4177
4178 /* Calculate quality based on the following:
4179 *
4180 * Missed beacon: 100% = 0, 0% = 70% missed
4181 * Rate: 60% = 1Mbs, 100% = Max
4182 * Rx and Tx errors represent a straight % of total Rx/Tx
4183 * RSSI: 100% = > -50, 0% = < -80
4184 * Rx errors: 100% = 0, 0% = 50% missed
4185 *
4186 * The lowest computed quality is used.
4187 *
4188 */
4189 #define BEACON_THRESHOLD 5
4190 beacon_quality = 100 - missed_beacons_percent;
4191 if (beacon_quality < BEACON_THRESHOLD)
4192 beacon_quality = 0;
4193 else
4194 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4195 (100 - BEACON_THRESHOLD);
4196 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4197 beacon_quality, missed_beacons_percent);
4198
4199 priv->last_rate = ipw_get_current_rate(priv);
4200 max_rate = ipw_get_max_rate(priv);
4201 rate_quality = priv->last_rate * 40 / max_rate + 60;
4202 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4203 rate_quality, priv->last_rate / 1000000);
4204
4205 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4206 rx_quality = 100 - (rx_err_delta * 100) /
4207 (rx_packets_delta + rx_err_delta);
4208 else
4209 rx_quality = 100;
4210 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4211 rx_quality, rx_err_delta, rx_packets_delta);
4212
4213 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4214 tx_quality = 100 - (tx_failures_delta * 100) /
4215 (tx_packets_delta + tx_failures_delta);
4216 else
4217 tx_quality = 100;
4218 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4219 tx_quality, tx_failures_delta, tx_packets_delta);
4220
4221 rssi = priv->exp_avg_rssi;
4222 signal_quality =
4223 (100 *
4224 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4225 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4226 (priv->ieee->perfect_rssi - rssi) *
4227 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4228 62 * (priv->ieee->perfect_rssi - rssi))) /
4229 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4230 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4231 if (signal_quality > 100)
4232 signal_quality = 100;
4233 else if (signal_quality < 1)
4234 signal_quality = 0;
4235
4236 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4237 signal_quality, rssi);
4238
4239 quality = min(beacon_quality,
4240 min(rate_quality,
4241 min(tx_quality, min(rx_quality, signal_quality))));
4242 if (quality == beacon_quality)
4243 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4244 quality);
4245 if (quality == rate_quality)
4246 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4247 quality);
4248 if (quality == tx_quality)
4249 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4250 quality);
4251 if (quality == rx_quality)
4252 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4253 quality);
4254 if (quality == signal_quality)
4255 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4256 quality);
4257
4258 priv->quality = quality;
4259
4260 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4261 IPW_STATS_INTERVAL);
4262 }
4263
4264 static void ipw_bg_gather_stats(struct work_struct *work)
4265 {
4266 struct ipw_priv *priv =
4267 container_of(work, struct ipw_priv, gather_stats.work);
4268 mutex_lock(&priv->mutex);
4269 ipw_gather_stats(priv);
4270 mutex_unlock(&priv->mutex);
4271 }
4272
4273 /* Missed beacon behavior:
4274 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4275 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4276 * Above disassociate threshold, give up and stop scanning.
4277 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4278 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4279 int missed_count)
4280 {
4281 priv->notif_missed_beacons = missed_count;
4282
4283 if (missed_count > priv->disassociate_threshold &&
4284 priv->status & STATUS_ASSOCIATED) {
4285 /* If associated and we've hit the missed
4286 * beacon threshold, disassociate, turn
4287 * off roaming, and abort any active scans */
4288 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4289 IPW_DL_STATE | IPW_DL_ASSOC,
4290 "Missed beacon: %d - disassociate\n", missed_count);
4291 priv->status &= ~STATUS_ROAMING;
4292 if (priv->status & STATUS_SCANNING) {
4293 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4294 IPW_DL_STATE,
4295 "Aborting scan with missed beacon.\n");
4296 queue_work(priv->workqueue, &priv->abort_scan);
4297 }
4298
4299 queue_work(priv->workqueue, &priv->disassociate);
4300 return;
4301 }
4302
4303 if (priv->status & STATUS_ROAMING) {
4304 /* If we are currently roaming, then just
4305 * print a debug statement... */
4306 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4307 "Missed beacon: %d - roam in progress\n",
4308 missed_count);
4309 return;
4310 }
4311
4312 if (roaming &&
4313 (missed_count > priv->roaming_threshold &&
4314 missed_count <= priv->disassociate_threshold)) {
4315 /* If we are not already roaming, set the ROAM
4316 * bit in the status and kick off a scan.
4317 * This can happen several times before we reach
4318 * disassociate_threshold. */
4319 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4320 "Missed beacon: %d - initiate "
4321 "roaming\n", missed_count);
4322 if (!(priv->status & STATUS_ROAMING)) {
4323 priv->status |= STATUS_ROAMING;
4324 if (!(priv->status & STATUS_SCANNING))
4325 queue_delayed_work(priv->workqueue,
4326 &priv->request_scan, 0);
4327 }
4328 return;
4329 }
4330
4331 if (priv->status & STATUS_SCANNING) {
4332 /* Stop scan to keep fw from getting
4333 * stuck (only if we aren't roaming --
4334 * otherwise we'll never scan more than 2 or 3
4335 * channels..) */
4336 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4337 "Aborting scan with missed beacon.\n");
4338 queue_work(priv->workqueue, &priv->abort_scan);
4339 }
4340
4341 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4342 }
4343
4344 static void ipw_scan_event(struct work_struct *work)
4345 {
4346 union iwreq_data wrqu;
4347
4348 struct ipw_priv *priv =
4349 container_of(work, struct ipw_priv, scan_event.work);
4350
4351 wrqu.data.length = 0;
4352 wrqu.data.flags = 0;
4353 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4354 }
4355
4356 static void handle_scan_event(struct ipw_priv *priv)
4357 {
4358 /* Only userspace-requested scan completion events go out immediately */
4359 if (!priv->user_requested_scan) {
4360 if (!delayed_work_pending(&priv->scan_event))
4361 queue_delayed_work(priv->workqueue, &priv->scan_event,
4362 round_jiffies_relative(msecs_to_jiffies(4000)));
4363 } else {
4364 union iwreq_data wrqu;
4365
4366 priv->user_requested_scan = 0;
4367 cancel_delayed_work(&priv->scan_event);
4368
4369 wrqu.data.length = 0;
4370 wrqu.data.flags = 0;
4371 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4372 }
4373 }
4374
4375 /**
4376 * Handle host notification packet.
4377 * Called from interrupt routine
4378 */
4379 static void ipw_rx_notification(struct ipw_priv *priv,
4380 struct ipw_rx_notification *notif)
4381 {
4382 DECLARE_MAC_BUF(mac);
4383 u16 size = le16_to_cpu(notif->size);
4384 notif->size = le16_to_cpu(notif->size);
4385
4386 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4387
4388 switch (notif->subtype) {
4389 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4390 struct notif_association *assoc = &notif->u.assoc;
4391
4392 switch (assoc->state) {
4393 case CMAS_ASSOCIATED:{
4394 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4395 IPW_DL_ASSOC,
4396 "associated: '%s' %s"
4397 " \n",
4398 escape_essid(priv->essid,
4399 priv->essid_len),
4400 print_mac(mac, priv->bssid));
4401
4402 switch (priv->ieee->iw_mode) {
4403 case IW_MODE_INFRA:
4404 memcpy(priv->ieee->bssid,
4405 priv->bssid, ETH_ALEN);
4406 break;
4407
4408 case IW_MODE_ADHOC:
4409 memcpy(priv->ieee->bssid,
4410 priv->bssid, ETH_ALEN);
4411
4412 /* clear out the station table */
4413 priv->num_stations = 0;
4414
4415 IPW_DEBUG_ASSOC
4416 ("queueing adhoc check\n");
4417 queue_delayed_work(priv->
4418 workqueue,
4419 &priv->
4420 adhoc_check,
4421 le16_to_cpu(priv->
4422 assoc_request.
4423 beacon_interval));
4424 break;
4425 }
4426
4427 priv->status &= ~STATUS_ASSOCIATING;
4428 priv->status |= STATUS_ASSOCIATED;
4429 queue_work(priv->workqueue,
4430 &priv->system_config);
4431
4432 #ifdef CONFIG_IPW2200_QOS
4433 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4434 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4435 if ((priv->status & STATUS_AUTH) &&
4436 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4437 == IEEE80211_STYPE_ASSOC_RESP)) {
4438 if ((sizeof
4439 (struct
4440 ieee80211_assoc_response)
4441 <= size)
4442 && (size <= 2314)) {
4443 struct
4444 ieee80211_rx_stats
4445 stats = {
4446 .len = size - 1,
4447 };
4448
4449 IPW_DEBUG_QOS
4450 ("QoS Associate "
4451 "size %d\n", size);
4452 ieee80211_rx_mgt(priv->
4453 ieee,
4454 (struct
4455 ieee80211_hdr_4addr
4456 *)
4457 &notif->u.raw, &stats);
4458 }
4459 }
4460 #endif
4461
4462 schedule_work(&priv->link_up);
4463
4464 break;
4465 }
4466
4467 case CMAS_AUTHENTICATED:{
4468 if (priv->
4469 status & (STATUS_ASSOCIATED |
4470 STATUS_AUTH)) {
4471 struct notif_authenticate *auth
4472 = &notif->u.auth;
4473 IPW_DEBUG(IPW_DL_NOTIF |
4474 IPW_DL_STATE |
4475 IPW_DL_ASSOC,
4476 "deauthenticated: '%s' "
4477 "%s"
4478 ": (0x%04X) - %s \n",
4479 escape_essid(priv->
4480 essid,
4481 priv->
4482 essid_len),
4483 print_mac(mac, priv->bssid),
4484 ntohs(auth->status),
4485 ipw_get_status_code
4486 (ntohs
4487 (auth->status)));
4488
4489 priv->status &=
4490 ~(STATUS_ASSOCIATING |
4491 STATUS_AUTH |
4492 STATUS_ASSOCIATED);
4493
4494 schedule_work(&priv->link_down);
4495 break;
4496 }
4497
4498 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4499 IPW_DL_ASSOC,
4500 "authenticated: '%s' %s"
4501 "\n",
4502 escape_essid(priv->essid,
4503 priv->essid_len),
4504 print_mac(mac, priv->bssid));
4505 break;
4506 }
4507
4508 case CMAS_INIT:{
4509 if (priv->status & STATUS_AUTH) {
4510 struct
4511 ieee80211_assoc_response
4512 *resp;
4513 resp =
4514 (struct
4515 ieee80211_assoc_response
4516 *)&notif->u.raw;
4517 IPW_DEBUG(IPW_DL_NOTIF |
4518 IPW_DL_STATE |
4519 IPW_DL_ASSOC,
4520 "association failed (0x%04X): %s\n",
4521 ntohs(resp->status),
4522 ipw_get_status_code
4523 (ntohs
4524 (resp->status)));
4525 }
4526
4527 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4528 IPW_DL_ASSOC,
4529 "disassociated: '%s' %s"
4530 " \n",
4531 escape_essid(priv->essid,
4532 priv->essid_len),
4533 print_mac(mac, priv->bssid));
4534
4535 priv->status &=
4536 ~(STATUS_DISASSOCIATING |
4537 STATUS_ASSOCIATING |
4538 STATUS_ASSOCIATED | STATUS_AUTH);
4539 if (priv->assoc_network
4540 && (priv->assoc_network->
4541 capability &
4542 WLAN_CAPABILITY_IBSS))
4543 ipw_remove_current_network
4544 (priv);
4545
4546 schedule_work(&priv->link_down);
4547
4548 break;
4549 }
4550
4551 case CMAS_RX_ASSOC_RESP:
4552 break;
4553
4554 default:
4555 IPW_ERROR("assoc: unknown (%d)\n",
4556 assoc->state);
4557 break;
4558 }
4559
4560 break;
4561 }
4562
4563 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4564 struct notif_authenticate *auth = &notif->u.auth;
4565 switch (auth->state) {
4566 case CMAS_AUTHENTICATED:
4567 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4568 "authenticated: '%s' %s \n",
4569 escape_essid(priv->essid,
4570 priv->essid_len),
4571 print_mac(mac, priv->bssid));
4572 priv->status |= STATUS_AUTH;
4573 break;
4574
4575 case CMAS_INIT:
4576 if (priv->status & STATUS_AUTH) {
4577 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4578 IPW_DL_ASSOC,
4579 "authentication failed (0x%04X): %s\n",
4580 ntohs(auth->status),
4581 ipw_get_status_code(ntohs
4582 (auth->
4583 status)));
4584 }
4585 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4586 IPW_DL_ASSOC,
4587 "deauthenticated: '%s' %s\n",
4588 escape_essid(priv->essid,
4589 priv->essid_len),
4590 print_mac(mac, priv->bssid));
4591
4592 priv->status &= ~(STATUS_ASSOCIATING |
4593 STATUS_AUTH |
4594 STATUS_ASSOCIATED);
4595
4596 schedule_work(&priv->link_down);
4597 break;
4598
4599 case CMAS_TX_AUTH_SEQ_1:
4600 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4601 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4602 break;
4603 case CMAS_RX_AUTH_SEQ_2:
4604 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4605 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4606 break;
4607 case CMAS_AUTH_SEQ_1_PASS:
4608 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4609 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4610 break;
4611 case CMAS_AUTH_SEQ_1_FAIL:
4612 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4613 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4614 break;
4615 case CMAS_TX_AUTH_SEQ_3:
4616 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4617 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4618 break;
4619 case CMAS_RX_AUTH_SEQ_4:
4620 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4621 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4622 break;
4623 case CMAS_AUTH_SEQ_2_PASS:
4624 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4625 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4626 break;
4627 case CMAS_AUTH_SEQ_2_FAIL:
4628 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4629 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4630 break;
4631 case CMAS_TX_ASSOC:
4632 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4633 IPW_DL_ASSOC, "TX_ASSOC\n");
4634 break;
4635 case CMAS_RX_ASSOC_RESP:
4636 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4637 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4638
4639 break;
4640 case CMAS_ASSOCIATED:
4641 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4642 IPW_DL_ASSOC, "ASSOCIATED\n");
4643 break;
4644 default:
4645 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4646 auth->state);
4647 break;
4648 }
4649 break;
4650 }
4651
4652 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4653 struct notif_channel_result *x =
4654 &notif->u.channel_result;
4655
4656 if (size == sizeof(*x)) {
4657 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4658 x->channel_num);
4659 } else {
4660 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4661 "(should be %zd)\n",
4662 size, sizeof(*x));
4663 }
4664 break;
4665 }
4666
4667 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4668 struct notif_scan_complete *x = &notif->u.scan_complete;
4669 if (size == sizeof(*x)) {
4670 IPW_DEBUG_SCAN
4671 ("Scan completed: type %d, %d channels, "
4672 "%d status\n", x->scan_type,
4673 x->num_channels, x->status);
4674 } else {
4675 IPW_ERROR("Scan completed of wrong size %d "
4676 "(should be %zd)\n",
4677 size, sizeof(*x));
4678 }
4679
4680 priv->status &=
4681 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4682
4683 wake_up_interruptible(&priv->wait_state);
4684 cancel_delayed_work(&priv->scan_check);
4685
4686 if (priv->status & STATUS_EXIT_PENDING)
4687 break;
4688
4689 priv->ieee->scans++;
4690
4691 #ifdef CONFIG_IPW2200_MONITOR
4692 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4693 priv->status |= STATUS_SCAN_FORCED;
4694 queue_delayed_work(priv->workqueue,
4695 &priv->request_scan, 0);
4696 break;
4697 }
4698 priv->status &= ~STATUS_SCAN_FORCED;
4699 #endif /* CONFIG_IPW2200_MONITOR */
4700
4701 if (!(priv->status & (STATUS_ASSOCIATED |
4702 STATUS_ASSOCIATING |
4703 STATUS_ROAMING |
4704 STATUS_DISASSOCIATING)))
4705 queue_work(priv->workqueue, &priv->associate);
4706 else if (priv->status & STATUS_ROAMING) {
4707 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4708 /* If a scan completed and we are in roam mode, then
4709 * the scan that completed was the one requested as a
4710 * result of entering roam... so, schedule the
4711 * roam work */
4712 queue_work(priv->workqueue,
4713 &priv->roam);
4714 else
4715 /* Don't schedule if we aborted the scan */
4716 priv->status &= ~STATUS_ROAMING;
4717 } else if (priv->status & STATUS_SCAN_PENDING)
4718 queue_delayed_work(priv->workqueue,
4719 &priv->request_scan, 0);
4720 else if (priv->config & CFG_BACKGROUND_SCAN
4721 && priv->status & STATUS_ASSOCIATED)
4722 queue_delayed_work(priv->workqueue,
4723 &priv->request_scan,
4724 round_jiffies_relative(HZ));
4725
4726 /* Send an empty event to user space.
4727 * We don't send the received data on the event because
4728 * it would require us to do complex transcoding, and
4729 * we want to minimise the work done in the irq handler
4730 * Use a request to extract the data.
4731 * Also, we generate this even for any scan, regardless
4732 * on how the scan was initiated. User space can just
4733 * sync on periodic scan to get fresh data...
4734 * Jean II */
4735 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4736 handle_scan_event(priv);
4737 break;
4738 }
4739
4740 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4741 struct notif_frag_length *x = &notif->u.frag_len;
4742
4743 if (size == sizeof(*x))
4744 IPW_ERROR("Frag length: %d\n",
4745 le16_to_cpu(x->frag_length));
4746 else
4747 IPW_ERROR("Frag length of wrong size %d "
4748 "(should be %zd)\n",
4749 size, sizeof(*x));
4750 break;
4751 }
4752
4753 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4754 struct notif_link_deterioration *x =
4755 &notif->u.link_deterioration;
4756
4757 if (size == sizeof(*x)) {
4758 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4759 "link deterioration: type %d, cnt %d\n",
4760 x->silence_notification_type,
4761 x->silence_count);
4762 memcpy(&priv->last_link_deterioration, x,
4763 sizeof(*x));
4764 } else {
4765 IPW_ERROR("Link Deterioration of wrong size %d "
4766 "(should be %zd)\n",
4767 size, sizeof(*x));
4768 }
4769 break;
4770 }
4771
4772 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4773 IPW_ERROR("Dino config\n");
4774 if (priv->hcmd
4775 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4776 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4777
4778 break;
4779 }
4780
4781 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4782 struct notif_beacon_state *x = &notif->u.beacon_state;
4783 if (size != sizeof(*x)) {
4784 IPW_ERROR
4785 ("Beacon state of wrong size %d (should "
4786 "be %zd)\n", size, sizeof(*x));
4787 break;
4788 }
4789
4790 if (le32_to_cpu(x->state) ==
4791 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4792 ipw_handle_missed_beacon(priv,
4793 le32_to_cpu(x->
4794 number));
4795
4796 break;
4797 }
4798
4799 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4800 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4801 if (size == sizeof(*x)) {
4802 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4803 "0x%02x station %d\n",
4804 x->key_state, x->security_type,
4805 x->station_index);
4806 break;
4807 }
4808
4809 IPW_ERROR
4810 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4811 size, sizeof(*x));
4812 break;
4813 }
4814
4815 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4816 struct notif_calibration *x = &notif->u.calibration;
4817
4818 if (size == sizeof(*x)) {
4819 memcpy(&priv->calib, x, sizeof(*x));
4820 IPW_DEBUG_INFO("TODO: Calibration\n");
4821 break;
4822 }
4823
4824 IPW_ERROR
4825 ("Calibration of wrong size %d (should be %zd)\n",
4826 size, sizeof(*x));
4827 break;
4828 }
4829
4830 case HOST_NOTIFICATION_NOISE_STATS:{
4831 if (size == sizeof(u32)) {
4832 priv->exp_avg_noise =
4833 exponential_average(priv->exp_avg_noise,
4834 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4835 DEPTH_NOISE);
4836 break;
4837 }
4838
4839 IPW_ERROR
4840 ("Noise stat is wrong size %d (should be %zd)\n",
4841 size, sizeof(u32));
4842 break;
4843 }
4844
4845 default:
4846 IPW_DEBUG_NOTIF("Unknown notification: "
4847 "subtype=%d,flags=0x%2x,size=%d\n",
4848 notif->subtype, notif->flags, size);
4849 }
4850 }
4851
4852 /**
4853 * Destroys all DMA structures and initialise them again
4854 *
4855 * @param priv
4856 * @return error code
4857 */
4858 static int ipw_queue_reset(struct ipw_priv *priv)
4859 {
4860 int rc = 0;
4861 /** @todo customize queue sizes */
4862 int nTx = 64, nTxCmd = 8;
4863 ipw_tx_queue_free(priv);
4864 /* Tx CMD queue */
4865 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4866 IPW_TX_CMD_QUEUE_READ_INDEX,
4867 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4868 IPW_TX_CMD_QUEUE_BD_BASE,
4869 IPW_TX_CMD_QUEUE_BD_SIZE);
4870 if (rc) {
4871 IPW_ERROR("Tx Cmd queue init failed\n");
4872 goto error;
4873 }
4874 /* Tx queue(s) */
4875 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4876 IPW_TX_QUEUE_0_READ_INDEX,
4877 IPW_TX_QUEUE_0_WRITE_INDEX,
4878 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4879 if (rc) {
4880 IPW_ERROR("Tx 0 queue init failed\n");
4881 goto error;
4882 }
4883 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4884 IPW_TX_QUEUE_1_READ_INDEX,
4885 IPW_TX_QUEUE_1_WRITE_INDEX,
4886 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4887 if (rc) {
4888 IPW_ERROR("Tx 1 queue init failed\n");
4889 goto error;
4890 }
4891 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4892 IPW_TX_QUEUE_2_READ_INDEX,
4893 IPW_TX_QUEUE_2_WRITE_INDEX,
4894 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4895 if (rc) {
4896 IPW_ERROR("Tx 2 queue init failed\n");
4897 goto error;
4898 }
4899 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4900 IPW_TX_QUEUE_3_READ_INDEX,
4901 IPW_TX_QUEUE_3_WRITE_INDEX,
4902 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4903 if (rc) {
4904 IPW_ERROR("Tx 3 queue init failed\n");
4905 goto error;
4906 }
4907 /* statistics */
4908 priv->rx_bufs_min = 0;
4909 priv->rx_pend_max = 0;
4910 return rc;
4911
4912 error:
4913 ipw_tx_queue_free(priv);
4914 return rc;
4915 }
4916
4917 /**
4918 * Reclaim Tx queue entries no more used by NIC.
4919 *
4920 * When FW advances 'R' index, all entries between old and
4921 * new 'R' index need to be reclaimed. As result, some free space
4922 * forms. If there is enough free space (> low mark), wake Tx queue.
4923 *
4924 * @note Need to protect against garbage in 'R' index
4925 * @param priv
4926 * @param txq
4927 * @param qindex
4928 * @return Number of used entries remains in the queue
4929 */
4930 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4931 struct clx2_tx_queue *txq, int qindex)
4932 {
4933 u32 hw_tail;
4934 int used;
4935 struct clx2_queue *q = &txq->q;
4936
4937 hw_tail = ipw_read32(priv, q->reg_r);
4938 if (hw_tail >= q->n_bd) {
4939 IPW_ERROR
4940 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4941 hw_tail, q->n_bd);
4942 goto done;
4943 }
4944 for (; q->last_used != hw_tail;
4945 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4946 ipw_queue_tx_free_tfd(priv, txq);
4947 priv->tx_packets++;
4948 }
4949 done:
4950 if ((ipw_queue_space(q) > q->low_mark) &&
4951 (qindex >= 0) &&
4952 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4953 netif_wake_queue(priv->net_dev);
4954 used = q->first_empty - q->last_used;
4955 if (used < 0)
4956 used += q->n_bd;
4957
4958 return used;
4959 }
4960
4961 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4962 int len, int sync)
4963 {
4964 struct clx2_tx_queue *txq = &priv->txq_cmd;
4965 struct clx2_queue *q = &txq->q;
4966 struct tfd_frame *tfd;
4967
4968 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4969 IPW_ERROR("No space for Tx\n");
4970 return -EBUSY;
4971 }
4972
4973 tfd = &txq->bd[q->first_empty];
4974 txq->txb[q->first_empty] = NULL;
4975
4976 memset(tfd, 0, sizeof(*tfd));
4977 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4978 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4979 priv->hcmd_seq++;
4980 tfd->u.cmd.index = hcmd;
4981 tfd->u.cmd.length = len;
4982 memcpy(tfd->u.cmd.payload, buf, len);
4983 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4984 ipw_write32(priv, q->reg_w, q->first_empty);
4985 _ipw_read32(priv, 0x90);
4986
4987 return 0;
4988 }
4989
4990 /*
4991 * Rx theory of operation
4992 *
4993 * The host allocates 32 DMA target addresses and passes the host address
4994 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4995 * 0 to 31
4996 *
4997 * Rx Queue Indexes
4998 * The host/firmware share two index registers for managing the Rx buffers.
4999 *
5000 * The READ index maps to the first position that the firmware may be writing
5001 * to -- the driver can read up to (but not including) this position and get
5002 * good data.
5003 * The READ index is managed by the firmware once the card is enabled.
5004 *
5005 * The WRITE index maps to the last position the driver has read from -- the
5006 * position preceding WRITE is the last slot the firmware can place a packet.
5007 *
5008 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5009 * WRITE = READ.
5010 *
5011 * During initialization the host sets up the READ queue position to the first
5012 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5013 *
5014 * When the firmware places a packet in a buffer it will advance the READ index
5015 * and fire the RX interrupt. The driver can then query the READ index and
5016 * process as many packets as possible, moving the WRITE index forward as it
5017 * resets the Rx queue buffers with new memory.
5018 *
5019 * The management in the driver is as follows:
5020 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5021 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5022 * to replensish the ipw->rxq->rx_free.
5023 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5024 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5025 * 'processed' and 'read' driver indexes as well)
5026 * + A received packet is processed and handed to the kernel network stack,
5027 * detached from the ipw->rxq. The driver 'processed' index is updated.
5028 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5029 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5030 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5031 * were enough free buffers and RX_STALLED is set it is cleared.
5032 *
5033 *
5034 * Driver sequence:
5035 *
5036 * ipw_rx_queue_alloc() Allocates rx_free
5037 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5038 * ipw_rx_queue_restock
5039 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5040 * queue, updates firmware pointers, and updates
5041 * the WRITE index. If insufficient rx_free buffers
5042 * are available, schedules ipw_rx_queue_replenish
5043 *
5044 * -- enable interrupts --
5045 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5046 * READ INDEX, detaching the SKB from the pool.
5047 * Moves the packet buffer from queue to rx_used.
5048 * Calls ipw_rx_queue_restock to refill any empty
5049 * slots.
5050 * ...
5051 *
5052 */
5053
5054 /*
5055 * If there are slots in the RX queue that need to be restocked,
5056 * and we have free pre-allocated buffers, fill the ranks as much
5057 * as we can pulling from rx_free.
5058 *
5059 * This moves the 'write' index forward to catch up with 'processed', and
5060 * also updates the memory address in the firmware to reference the new
5061 * target buffer.
5062 */
5063 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5064 {
5065 struct ipw_rx_queue *rxq = priv->rxq;
5066 struct list_head *element;
5067 struct ipw_rx_mem_buffer *rxb;
5068 unsigned long flags;
5069 int write;
5070
5071 spin_lock_irqsave(&rxq->lock, flags);
5072 write = rxq->write;
5073 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
5074 element = rxq->rx_free.next;
5075 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5076 list_del(element);
5077
5078 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5079 rxb->dma_addr);
5080 rxq->queue[rxq->write] = rxb;
5081 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5082 rxq->free_count--;
5083 }
5084 spin_unlock_irqrestore(&rxq->lock, flags);
5085
5086 /* If the pre-allocated buffer pool is dropping low, schedule to
5087 * refill it */
5088 if (rxq->free_count <= RX_LOW_WATERMARK)
5089 queue_work(priv->workqueue, &priv->rx_replenish);
5090
5091 /* If we've added more space for the firmware to place data, tell it */
5092 if (write != rxq->write)
5093 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5094 }
5095
5096 /*
5097 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5098 * Also restock the Rx queue via ipw_rx_queue_restock.
5099 *
5100 * This is called as a scheduled work item (except for during intialization)
5101 */
5102 static void ipw_rx_queue_replenish(void *data)
5103 {
5104 struct ipw_priv *priv = data;
5105 struct ipw_rx_queue *rxq = priv->rxq;
5106 struct list_head *element;
5107 struct ipw_rx_mem_buffer *rxb;
5108 unsigned long flags;
5109
5110 spin_lock_irqsave(&rxq->lock, flags);
5111 while (!list_empty(&rxq->rx_used)) {
5112 element = rxq->rx_used.next;
5113 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5114 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5115 if (!rxb->skb) {
5116 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5117 priv->net_dev->name);
5118 /* We don't reschedule replenish work here -- we will
5119 * call the restock method and if it still needs
5120 * more buffers it will schedule replenish */
5121 break;
5122 }
5123 list_del(element);
5124
5125 rxb->dma_addr =
5126 pci_map_single(priv->pci_dev, rxb->skb->data,
5127 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5128
5129 list_add_tail(&rxb->list, &rxq->rx_free);
5130 rxq->free_count++;
5131 }
5132 spin_unlock_irqrestore(&rxq->lock, flags);
5133
5134 ipw_rx_queue_restock(priv);
5135 }
5136
5137 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5138 {
5139 struct ipw_priv *priv =
5140 container_of(work, struct ipw_priv, rx_replenish);
5141 mutex_lock(&priv->mutex);
5142 ipw_rx_queue_replenish(priv);
5143 mutex_unlock(&priv->mutex);
5144 }
5145
5146 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5147 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5148 * This free routine walks the list of POOL entries and if SKB is set to
5149 * non NULL it is unmapped and freed
5150 */
5151 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5152 {
5153 int i;
5154
5155 if (!rxq)
5156 return;
5157
5158 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5159 if (rxq->pool[i].skb != NULL) {
5160 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5161 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5162 dev_kfree_skb(rxq->pool[i].skb);
5163 }
5164 }
5165
5166 kfree(rxq);
5167 }
5168
5169 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5170 {
5171 struct ipw_rx_queue *rxq;
5172 int i;
5173
5174 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5175 if (unlikely(!rxq)) {
5176 IPW_ERROR("memory allocation failed\n");
5177 return NULL;
5178 }
5179 spin_lock_init(&rxq->lock);
5180 INIT_LIST_HEAD(&rxq->rx_free);
5181 INIT_LIST_HEAD(&rxq->rx_used);
5182
5183 /* Fill the rx_used queue with _all_ of the Rx buffers */
5184 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5185 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5186
5187 /* Set us so that we have processed and used all buffers, but have
5188 * not restocked the Rx queue with fresh buffers */
5189 rxq->read = rxq->write = 0;
5190 rxq->processed = RX_QUEUE_SIZE - 1;
5191 rxq->free_count = 0;
5192
5193 return rxq;
5194 }
5195
5196 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5197 {
5198 rate &= ~IEEE80211_BASIC_RATE_MASK;
5199 if (ieee_mode == IEEE_A) {
5200 switch (rate) {
5201 case IEEE80211_OFDM_RATE_6MB:
5202 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5203 1 : 0;
5204 case IEEE80211_OFDM_RATE_9MB:
5205 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5206 1 : 0;
5207 case IEEE80211_OFDM_RATE_12MB:
5208 return priv->
5209 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5210 case IEEE80211_OFDM_RATE_18MB:
5211 return priv->
5212 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5213 case IEEE80211_OFDM_RATE_24MB:
5214 return priv->
5215 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5216 case IEEE80211_OFDM_RATE_36MB:
5217 return priv->
5218 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5219 case IEEE80211_OFDM_RATE_48MB:
5220 return priv->
5221 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5222 case IEEE80211_OFDM_RATE_54MB:
5223 return priv->
5224 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5225 default:
5226 return 0;
5227 }
5228 }
5229
5230 /* B and G mixed */
5231 switch (rate) {
5232 case IEEE80211_CCK_RATE_1MB:
5233 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5234 case IEEE80211_CCK_RATE_2MB:
5235 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5236 case IEEE80211_CCK_RATE_5MB:
5237 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5238 case IEEE80211_CCK_RATE_11MB:
5239 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5240 }
5241
5242 /* If we are limited to B modulations, bail at this point */
5243 if (ieee_mode == IEEE_B)
5244 return 0;
5245
5246 /* G */
5247 switch (rate) {
5248 case IEEE80211_OFDM_RATE_6MB:
5249 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5250 case IEEE80211_OFDM_RATE_9MB:
5251 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5252 case IEEE80211_OFDM_RATE_12MB:
5253 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5254 case IEEE80211_OFDM_RATE_18MB:
5255 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5256 case IEEE80211_OFDM_RATE_24MB:
5257 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5258 case IEEE80211_OFDM_RATE_36MB:
5259 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5260 case IEEE80211_OFDM_RATE_48MB:
5261 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5262 case IEEE80211_OFDM_RATE_54MB:
5263 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5264 }
5265
5266 return 0;
5267 }
5268
5269 static int ipw_compatible_rates(struct ipw_priv *priv,
5270 const struct ieee80211_network *network,
5271 struct ipw_supported_rates *rates)
5272 {
5273 int num_rates, i;
5274
5275 memset(rates, 0, sizeof(*rates));
5276 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5277 rates->num_rates = 0;
5278 for (i = 0; i < num_rates; i++) {
5279 if (!ipw_is_rate_in_mask(priv, network->mode,
5280 network->rates[i])) {
5281
5282 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5283 IPW_DEBUG_SCAN("Adding masked mandatory "
5284 "rate %02X\n",
5285 network->rates[i]);
5286 rates->supported_rates[rates->num_rates++] =
5287 network->rates[i];
5288 continue;
5289 }
5290
5291 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5292 network->rates[i], priv->rates_mask);
5293 continue;
5294 }
5295
5296 rates->supported_rates[rates->num_rates++] = network->rates[i];
5297 }
5298
5299 num_rates = min(network->rates_ex_len,
5300 (u8) (IPW_MAX_RATES - num_rates));
5301 for (i = 0; i < num_rates; i++) {
5302 if (!ipw_is_rate_in_mask(priv, network->mode,
5303 network->rates_ex[i])) {
5304 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5305 IPW_DEBUG_SCAN("Adding masked mandatory "
5306 "rate %02X\n",
5307 network->rates_ex[i]);
5308 rates->supported_rates[rates->num_rates++] =
5309 network->rates[i];
5310 continue;
5311 }
5312
5313 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5314 network->rates_ex[i], priv->rates_mask);
5315 continue;
5316 }
5317
5318 rates->supported_rates[rates->num_rates++] =
5319 network->rates_ex[i];
5320 }
5321
5322 return 1;
5323 }
5324
5325 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5326 const struct ipw_supported_rates *src)
5327 {
5328 u8 i;
5329 for (i = 0; i < src->num_rates; i++)
5330 dest->supported_rates[i] = src->supported_rates[i];
5331 dest->num_rates = src->num_rates;
5332 }
5333
5334 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5335 * mask should ever be used -- right now all callers to add the scan rates are
5336 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5337 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5338 u8 modulation, u32 rate_mask)
5339 {
5340 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5341 IEEE80211_BASIC_RATE_MASK : 0;
5342
5343 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5344 rates->supported_rates[rates->num_rates++] =
5345 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5346
5347 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5348 rates->supported_rates[rates->num_rates++] =
5349 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5350
5351 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5352 rates->supported_rates[rates->num_rates++] = basic_mask |
5353 IEEE80211_CCK_RATE_5MB;
5354
5355 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5356 rates->supported_rates[rates->num_rates++] = basic_mask |
5357 IEEE80211_CCK_RATE_11MB;
5358 }
5359
5360 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5361 u8 modulation, u32 rate_mask)
5362 {
5363 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5364 IEEE80211_BASIC_RATE_MASK : 0;
5365
5366 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5367 rates->supported_rates[rates->num_rates++] = basic_mask |
5368 IEEE80211_OFDM_RATE_6MB;
5369
5370 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5371 rates->supported_rates[rates->num_rates++] =
5372 IEEE80211_OFDM_RATE_9MB;
5373
5374 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5375 rates->supported_rates[rates->num_rates++] = basic_mask |
5376 IEEE80211_OFDM_RATE_12MB;
5377
5378 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5379 rates->supported_rates[rates->num_rates++] =
5380 IEEE80211_OFDM_RATE_18MB;
5381
5382 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5383 rates->supported_rates[rates->num_rates++] = basic_mask |
5384 IEEE80211_OFDM_RATE_24MB;
5385
5386 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5387 rates->supported_rates[rates->num_rates++] =
5388 IEEE80211_OFDM_RATE_36MB;
5389
5390 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5391 rates->supported_rates[rates->num_rates++] =
5392 IEEE80211_OFDM_RATE_48MB;
5393
5394 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5395 rates->supported_rates[rates->num_rates++] =
5396 IEEE80211_OFDM_RATE_54MB;
5397 }
5398
5399 struct ipw_network_match {
5400 struct ieee80211_network *network;
5401 struct ipw_supported_rates rates;
5402 };
5403
5404 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5405 struct ipw_network_match *match,
5406 struct ieee80211_network *network,
5407 int roaming)
5408 {
5409 struct ipw_supported_rates rates;
5410 DECLARE_MAC_BUF(mac);
5411 DECLARE_MAC_BUF(mac2);
5412
5413 /* Verify that this network's capability is compatible with the
5414 * current mode (AdHoc or Infrastructure) */
5415 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5416 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5417 IPW_DEBUG_MERGE("Network '%s (%s)' excluded due to "
5418 "capability mismatch.\n",
5419 escape_essid(network->ssid, network->ssid_len),
5420 print_mac(mac, network->bssid));
5421 return 0;
5422 }
5423
5424 /* If we do not have an ESSID for this AP, we can not associate with
5425 * it */
5426 if (network->flags & NETWORK_EMPTY_ESSID) {
5427 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5428 "because of hidden ESSID.\n",
5429 escape_essid(network->ssid, network->ssid_len),
5430 print_mac(mac, network->bssid));
5431 return 0;
5432 }
5433
5434 if (unlikely(roaming)) {
5435 /* If we are roaming, then ensure check if this is a valid
5436 * network to try and roam to */
5437 if ((network->ssid_len != match->network->ssid_len) ||
5438 memcmp(network->ssid, match->network->ssid,
5439 network->ssid_len)) {
5440 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5441 "because of non-network ESSID.\n",
5442 escape_essid(network->ssid,
5443 network->ssid_len),
5444 print_mac(mac, network->bssid));
5445 return 0;
5446 }
5447 } else {
5448 /* If an ESSID has been configured then compare the broadcast
5449 * ESSID to ours */
5450 if ((priv->config & CFG_STATIC_ESSID) &&
5451 ((network->ssid_len != priv->essid_len) ||
5452 memcmp(network->ssid, priv->essid,
5453 min(network->ssid_len, priv->essid_len)))) {
5454 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5455
5456 strncpy(escaped,
5457 escape_essid(network->ssid, network->ssid_len),
5458 sizeof(escaped));
5459 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5460 "because of ESSID mismatch: '%s'.\n",
5461 escaped, print_mac(mac, network->bssid),
5462 escape_essid(priv->essid,
5463 priv->essid_len));
5464 return 0;
5465 }
5466 }
5467
5468 /* If the old network rate is better than this one, don't bother
5469 * testing everything else. */
5470
5471 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5472 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5473 "current network.\n",
5474 escape_essid(match->network->ssid,
5475 match->network->ssid_len));
5476 return 0;
5477 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5478 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5479 "current network.\n",
5480 escape_essid(match->network->ssid,
5481 match->network->ssid_len));
5482 return 0;
5483 }
5484
5485 /* Now go through and see if the requested network is valid... */
5486 if (priv->ieee->scan_age != 0 &&
5487 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5488 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5489 "because of age: %ums.\n",
5490 escape_essid(network->ssid, network->ssid_len),
5491 print_mac(mac, network->bssid),
5492 jiffies_to_msecs(jiffies -
5493 network->last_scanned));
5494 return 0;
5495 }
5496
5497 if ((priv->config & CFG_STATIC_CHANNEL) &&
5498 (network->channel != priv->channel)) {
5499 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5500 "because of channel mismatch: %d != %d.\n",
5501 escape_essid(network->ssid, network->ssid_len),
5502 print_mac(mac, network->bssid),
5503 network->channel, priv->channel);
5504 return 0;
5505 }
5506
5507 /* Verify privacy compatability */
5508 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5509 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5510 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5511 "because of privacy mismatch: %s != %s.\n",
5512 escape_essid(network->ssid, network->ssid_len),
5513 print_mac(mac, network->bssid),
5514 priv->
5515 capability & CAP_PRIVACY_ON ? "on" : "off",
5516 network->
5517 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5518 "off");
5519 return 0;
5520 }
5521
5522 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5523 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5524 "because of the same BSSID match: %s"
5525 ".\n", escape_essid(network->ssid,
5526 network->ssid_len),
5527 print_mac(mac, network->bssid),
5528 print_mac(mac2, priv->bssid));
5529 return 0;
5530 }
5531
5532 /* Filter out any incompatible freq / mode combinations */
5533 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5534 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5535 "because of invalid frequency/mode "
5536 "combination.\n",
5537 escape_essid(network->ssid, network->ssid_len),
5538 print_mac(mac, network->bssid));
5539 return 0;
5540 }
5541
5542 /* Ensure that the rates supported by the driver are compatible with
5543 * this AP, including verification of basic rates (mandatory) */
5544 if (!ipw_compatible_rates(priv, network, &rates)) {
5545 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5546 "because configured rate mask excludes "
5547 "AP mandatory rate.\n",
5548 escape_essid(network->ssid, network->ssid_len),
5549 print_mac(mac, network->bssid));
5550 return 0;
5551 }
5552
5553 if (rates.num_rates == 0) {
5554 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5555 "because of no compatible rates.\n",
5556 escape_essid(network->ssid, network->ssid_len),
5557 print_mac(mac, network->bssid));
5558 return 0;
5559 }
5560
5561 /* TODO: Perform any further minimal comparititive tests. We do not
5562 * want to put too much policy logic here; intelligent scan selection
5563 * should occur within a generic IEEE 802.11 user space tool. */
5564
5565 /* Set up 'new' AP to this network */
5566 ipw_copy_rates(&match->rates, &rates);
5567 match->network = network;
5568 IPW_DEBUG_MERGE("Network '%s (%s)' is a viable match.\n",
5569 escape_essid(network->ssid, network->ssid_len),
5570 print_mac(mac, network->bssid));
5571
5572 return 1;
5573 }
5574
5575 static void ipw_merge_adhoc_network(struct work_struct *work)
5576 {
5577 struct ipw_priv *priv =
5578 container_of(work, struct ipw_priv, merge_networks);
5579 struct ieee80211_network *network = NULL;
5580 struct ipw_network_match match = {
5581 .network = priv->assoc_network
5582 };
5583
5584 if ((priv->status & STATUS_ASSOCIATED) &&
5585 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5586 /* First pass through ROAM process -- look for a better
5587 * network */
5588 unsigned long flags;
5589
5590 spin_lock_irqsave(&priv->ieee->lock, flags);
5591 list_for_each_entry(network, &priv->ieee->network_list, list) {
5592 if (network != priv->assoc_network)
5593 ipw_find_adhoc_network(priv, &match, network,
5594 1);
5595 }
5596 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5597
5598 if (match.network == priv->assoc_network) {
5599 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5600 "merge to.\n");
5601 return;
5602 }
5603
5604 mutex_lock(&priv->mutex);
5605 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5606 IPW_DEBUG_MERGE("remove network %s\n",
5607 escape_essid(priv->essid,
5608 priv->essid_len));
5609 ipw_remove_current_network(priv);
5610 }
5611
5612 ipw_disassociate(priv);
5613 priv->assoc_network = match.network;
5614 mutex_unlock(&priv->mutex);
5615 return;
5616 }
5617 }
5618
5619 static int ipw_best_network(struct ipw_priv *priv,
5620 struct ipw_network_match *match,
5621 struct ieee80211_network *network, int roaming)
5622 {
5623 struct ipw_supported_rates rates;
5624 DECLARE_MAC_BUF(mac);
5625
5626 /* Verify that this network's capability is compatible with the
5627 * current mode (AdHoc or Infrastructure) */
5628 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5629 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5630 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5631 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5632 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded due to "
5633 "capability mismatch.\n",
5634 escape_essid(network->ssid, network->ssid_len),
5635 print_mac(mac, network->bssid));
5636 return 0;
5637 }
5638
5639 /* If we do not have an ESSID for this AP, we can not associate with
5640 * it */
5641 if (network->flags & NETWORK_EMPTY_ESSID) {
5642 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5643 "because of hidden ESSID.\n",
5644 escape_essid(network->ssid, network->ssid_len),
5645 print_mac(mac, network->bssid));
5646 return 0;
5647 }
5648
5649 if (unlikely(roaming)) {
5650 /* If we are roaming, then ensure check if this is a valid
5651 * network to try and roam to */
5652 if ((network->ssid_len != match->network->ssid_len) ||
5653 memcmp(network->ssid, match->network->ssid,
5654 network->ssid_len)) {
5655 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5656 "because of non-network ESSID.\n",
5657 escape_essid(network->ssid,
5658 network->ssid_len),
5659 print_mac(mac, network->bssid));
5660 return 0;
5661 }
5662 } else {
5663 /* If an ESSID has been configured then compare the broadcast
5664 * ESSID to ours */
5665 if ((priv->config & CFG_STATIC_ESSID) &&
5666 ((network->ssid_len != priv->essid_len) ||
5667 memcmp(network->ssid, priv->essid,
5668 min(network->ssid_len, priv->essid_len)))) {
5669 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5670 strncpy(escaped,
5671 escape_essid(network->ssid, network->ssid_len),
5672 sizeof(escaped));
5673 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5674 "because of ESSID mismatch: '%s'.\n",
5675 escaped, print_mac(mac, network->bssid),
5676 escape_essid(priv->essid,
5677 priv->essid_len));
5678 return 0;
5679 }
5680 }
5681
5682 /* If the old network rate is better than this one, don't bother
5683 * testing everything else. */
5684 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5685 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5686 strncpy(escaped,
5687 escape_essid(network->ssid, network->ssid_len),
5688 sizeof(escaped));
5689 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded because "
5690 "'%s (%s)' has a stronger signal.\n",
5691 escaped, print_mac(mac, network->bssid),
5692 escape_essid(match->network->ssid,
5693 match->network->ssid_len),
5694 print_mac(mac, match->network->bssid));
5695 return 0;
5696 }
5697
5698 /* If this network has already had an association attempt within the
5699 * last 3 seconds, do not try and associate again... */
5700 if (network->last_associate &&
5701 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5702 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5703 "because of storming (%ums since last "
5704 "assoc attempt).\n",
5705 escape_essid(network->ssid, network->ssid_len),
5706 print_mac(mac, network->bssid),
5707 jiffies_to_msecs(jiffies -
5708 network->last_associate));
5709 return 0;
5710 }
5711
5712 /* Now go through and see if the requested network is valid... */
5713 if (priv->ieee->scan_age != 0 &&
5714 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5715 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5716 "because of age: %ums.\n",
5717 escape_essid(network->ssid, network->ssid_len),
5718 print_mac(mac, network->bssid),
5719 jiffies_to_msecs(jiffies -
5720 network->last_scanned));
5721 return 0;
5722 }
5723
5724 if ((priv->config & CFG_STATIC_CHANNEL) &&
5725 (network->channel != priv->channel)) {
5726 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5727 "because of channel mismatch: %d != %d.\n",
5728 escape_essid(network->ssid, network->ssid_len),
5729 print_mac(mac, network->bssid),
5730 network->channel, priv->channel);
5731 return 0;
5732 }
5733
5734 /* Verify privacy compatability */
5735 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5736 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5737 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5738 "because of privacy mismatch: %s != %s.\n",
5739 escape_essid(network->ssid, network->ssid_len),
5740 print_mac(mac, network->bssid),
5741 priv->capability & CAP_PRIVACY_ON ? "on" :
5742 "off",
5743 network->capability &
5744 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5745 return 0;
5746 }
5747
5748 if ((priv->config & CFG_STATIC_BSSID) &&
5749 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5750 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5751 "because of BSSID mismatch: %s.\n",
5752 escape_essid(network->ssid, network->ssid_len),
5753 print_mac(mac, network->bssid), print_mac(mac, priv->bssid));
5754 return 0;
5755 }
5756
5757 /* Filter out any incompatible freq / mode combinations */
5758 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5759 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5760 "because of invalid frequency/mode "
5761 "combination.\n",
5762 escape_essid(network->ssid, network->ssid_len),
5763 print_mac(mac, network->bssid));
5764 return 0;
5765 }
5766
5767 /* Filter out invalid channel in current GEO */
5768 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5769 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5770 "because of invalid channel in current GEO\n",
5771 escape_essid(network->ssid, network->ssid_len),
5772 print_mac(mac, network->bssid));
5773 return 0;
5774 }
5775
5776 /* Ensure that the rates supported by the driver are compatible with
5777 * this AP, including verification of basic rates (mandatory) */
5778 if (!ipw_compatible_rates(priv, network, &rates)) {
5779 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5780 "because configured rate mask excludes "
5781 "AP mandatory rate.\n",
5782 escape_essid(network->ssid, network->ssid_len),
5783 print_mac(mac, network->bssid));
5784 return 0;
5785 }
5786
5787 if (rates.num_rates == 0) {
5788 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5789 "because of no compatible rates.\n",
5790 escape_essid(network->ssid, network->ssid_len),
5791 print_mac(mac, network->bssid));
5792 return 0;
5793 }
5794
5795 /* TODO: Perform any further minimal comparititive tests. We do not
5796 * want to put too much policy logic here; intelligent scan selection
5797 * should occur within a generic IEEE 802.11 user space tool. */
5798
5799 /* Set up 'new' AP to this network */
5800 ipw_copy_rates(&match->rates, &rates);
5801 match->network = network;
5802
5803 IPW_DEBUG_ASSOC("Network '%s (%s)' is a viable match.\n",
5804 escape_essid(network->ssid, network->ssid_len),
5805 print_mac(mac, network->bssid));
5806
5807 return 1;
5808 }
5809
5810 static void ipw_adhoc_create(struct ipw_priv *priv,
5811 struct ieee80211_network *network)
5812 {
5813 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5814 int i;
5815
5816 /*
5817 * For the purposes of scanning, we can set our wireless mode
5818 * to trigger scans across combinations of bands, but when it
5819 * comes to creating a new ad-hoc network, we have tell the FW
5820 * exactly which band to use.
5821 *
5822 * We also have the possibility of an invalid channel for the
5823 * chossen band. Attempting to create a new ad-hoc network
5824 * with an invalid channel for wireless mode will trigger a
5825 * FW fatal error.
5826 *
5827 */
5828 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5829 case IEEE80211_52GHZ_BAND:
5830 network->mode = IEEE_A;
5831 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5832 BUG_ON(i == -1);
5833 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5834 IPW_WARNING("Overriding invalid channel\n");
5835 priv->channel = geo->a[0].channel;
5836 }
5837 break;
5838
5839 case IEEE80211_24GHZ_BAND:
5840 if (priv->ieee->mode & IEEE_G)
5841 network->mode = IEEE_G;
5842 else
5843 network->mode = IEEE_B;
5844 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5845 BUG_ON(i == -1);
5846 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5847 IPW_WARNING("Overriding invalid channel\n");
5848 priv->channel = geo->bg[0].channel;
5849 }
5850 break;
5851
5852 default:
5853 IPW_WARNING("Overriding invalid channel\n");
5854 if (priv->ieee->mode & IEEE_A) {
5855 network->mode = IEEE_A;
5856 priv->channel = geo->a[0].channel;
5857 } else if (priv->ieee->mode & IEEE_G) {
5858 network->mode = IEEE_G;
5859 priv->channel = geo->bg[0].channel;
5860 } else {
5861 network->mode = IEEE_B;
5862 priv->channel = geo->bg[0].channel;
5863 }
5864 break;
5865 }
5866
5867 network->channel = priv->channel;
5868 priv->config |= CFG_ADHOC_PERSIST;
5869 ipw_create_bssid(priv, network->bssid);
5870 network->ssid_len = priv->essid_len;
5871 memcpy(network->ssid, priv->essid, priv->essid_len);
5872 memset(&network->stats, 0, sizeof(network->stats));
5873 network->capability = WLAN_CAPABILITY_IBSS;
5874 if (!(priv->config & CFG_PREAMBLE_LONG))
5875 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5876 if (priv->capability & CAP_PRIVACY_ON)
5877 network->capability |= WLAN_CAPABILITY_PRIVACY;
5878 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5879 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5880 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5881 memcpy(network->rates_ex,
5882 &priv->rates.supported_rates[network->rates_len],
5883 network->rates_ex_len);
5884 network->last_scanned = 0;
5885 network->flags = 0;
5886 network->last_associate = 0;
5887 network->time_stamp[0] = 0;
5888 network->time_stamp[1] = 0;
5889 network->beacon_interval = 100; /* Default */
5890 network->listen_interval = 10; /* Default */
5891 network->atim_window = 0; /* Default */
5892 network->wpa_ie_len = 0;
5893 network->rsn_ie_len = 0;
5894 }
5895
5896 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5897 {
5898 struct ipw_tgi_tx_key key;
5899
5900 if (!(priv->ieee->sec.flags & (1 << index)))
5901 return;
5902
5903 key.key_id = index;
5904 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5905 key.security_type = type;
5906 key.station_index = 0; /* always 0 for BSS */
5907 key.flags = 0;
5908 /* 0 for new key; previous value of counter (after fatal error) */
5909 key.tx_counter[0] = cpu_to_le32(0);
5910 key.tx_counter[1] = cpu_to_le32(0);
5911
5912 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5913 }
5914
5915 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5916 {
5917 struct ipw_wep_key key;
5918 int i;
5919
5920 key.cmd_id = DINO_CMD_WEP_KEY;
5921 key.seq_num = 0;
5922
5923 /* Note: AES keys cannot be set for multiple times.
5924 * Only set it at the first time. */
5925 for (i = 0; i < 4; i++) {
5926 key.key_index = i | type;
5927 if (!(priv->ieee->sec.flags & (1 << i))) {
5928 key.key_size = 0;
5929 continue;
5930 }
5931
5932 key.key_size = priv->ieee->sec.key_sizes[i];
5933 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5934
5935 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5936 }
5937 }
5938
5939 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5940 {
5941 if (priv->ieee->host_encrypt)
5942 return;
5943
5944 switch (level) {
5945 case SEC_LEVEL_3:
5946 priv->sys_config.disable_unicast_decryption = 0;
5947 priv->ieee->host_decrypt = 0;
5948 break;
5949 case SEC_LEVEL_2:
5950 priv->sys_config.disable_unicast_decryption = 1;
5951 priv->ieee->host_decrypt = 1;
5952 break;
5953 case SEC_LEVEL_1:
5954 priv->sys_config.disable_unicast_decryption = 0;
5955 priv->ieee->host_decrypt = 0;
5956 break;
5957 case SEC_LEVEL_0:
5958 priv->sys_config.disable_unicast_decryption = 1;
5959 break;
5960 default:
5961 break;
5962 }
5963 }
5964
5965 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5966 {
5967 if (priv->ieee->host_encrypt)
5968 return;
5969
5970 switch (level) {
5971 case SEC_LEVEL_3:
5972 priv->sys_config.disable_multicast_decryption = 0;
5973 break;
5974 case SEC_LEVEL_2:
5975 priv->sys_config.disable_multicast_decryption = 1;
5976 break;
5977 case SEC_LEVEL_1:
5978 priv->sys_config.disable_multicast_decryption = 0;
5979 break;
5980 case SEC_LEVEL_0:
5981 priv->sys_config.disable_multicast_decryption = 1;
5982 break;
5983 default:
5984 break;
5985 }
5986 }
5987
5988 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5989 {
5990 switch (priv->ieee->sec.level) {
5991 case SEC_LEVEL_3:
5992 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5993 ipw_send_tgi_tx_key(priv,
5994 DCT_FLAG_EXT_SECURITY_CCM,
5995 priv->ieee->sec.active_key);
5996
5997 if (!priv->ieee->host_mc_decrypt)
5998 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5999 break;
6000 case SEC_LEVEL_2:
6001 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6002 ipw_send_tgi_tx_key(priv,
6003 DCT_FLAG_EXT_SECURITY_TKIP,
6004 priv->ieee->sec.active_key);
6005 break;
6006 case SEC_LEVEL_1:
6007 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6008 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6009 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6010 break;
6011 case SEC_LEVEL_0:
6012 default:
6013 break;
6014 }
6015 }
6016
6017 static void ipw_adhoc_check(void *data)
6018 {
6019 struct ipw_priv *priv = data;
6020
6021 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6022 !(priv->config & CFG_ADHOC_PERSIST)) {
6023 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6024 IPW_DL_STATE | IPW_DL_ASSOC,
6025 "Missed beacon: %d - disassociate\n",
6026 priv->missed_adhoc_beacons);
6027 ipw_remove_current_network(priv);
6028 ipw_disassociate(priv);
6029 return;
6030 }
6031
6032 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6033 le16_to_cpu(priv->assoc_request.beacon_interval));
6034 }
6035
6036 static void ipw_bg_adhoc_check(struct work_struct *work)
6037 {
6038 struct ipw_priv *priv =
6039 container_of(work, struct ipw_priv, adhoc_check.work);
6040 mutex_lock(&priv->mutex);
6041 ipw_adhoc_check(priv);
6042 mutex_unlock(&priv->mutex);
6043 }
6044
6045 static void ipw_debug_config(struct ipw_priv *priv)
6046 {
6047 DECLARE_MAC_BUF(mac);
6048 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6049 "[CFG 0x%08X]\n", priv->config);
6050 if (priv->config & CFG_STATIC_CHANNEL)
6051 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6052 else
6053 IPW_DEBUG_INFO("Channel unlocked.\n");
6054 if (priv->config & CFG_STATIC_ESSID)
6055 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6056 escape_essid(priv->essid, priv->essid_len));
6057 else
6058 IPW_DEBUG_INFO("ESSID unlocked.\n");
6059 if (priv->config & CFG_STATIC_BSSID)
6060 IPW_DEBUG_INFO("BSSID locked to %s\n",
6061 print_mac(mac, priv->bssid));
6062 else
6063 IPW_DEBUG_INFO("BSSID unlocked.\n");
6064 if (priv->capability & CAP_PRIVACY_ON)
6065 IPW_DEBUG_INFO("PRIVACY on\n");
6066 else
6067 IPW_DEBUG_INFO("PRIVACY off\n");
6068 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6069 }
6070
6071 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6072 {
6073 /* TODO: Verify that this works... */
6074 struct ipw_fixed_rate fr = {
6075 .tx_rates = priv->rates_mask
6076 };
6077 u32 reg;
6078 u16 mask = 0;
6079
6080 /* Identify 'current FW band' and match it with the fixed
6081 * Tx rates */
6082
6083 switch (priv->ieee->freq_band) {
6084 case IEEE80211_52GHZ_BAND: /* A only */
6085 /* IEEE_A */
6086 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6087 /* Invalid fixed rate mask */
6088 IPW_DEBUG_WX
6089 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6090 fr.tx_rates = 0;
6091 break;
6092 }
6093
6094 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6095 break;
6096
6097 default: /* 2.4Ghz or Mixed */
6098 /* IEEE_B */
6099 if (mode == IEEE_B) {
6100 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6101 /* Invalid fixed rate mask */
6102 IPW_DEBUG_WX
6103 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6104 fr.tx_rates = 0;
6105 }
6106 break;
6107 }
6108
6109 /* IEEE_G */
6110 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6111 IEEE80211_OFDM_RATES_MASK)) {
6112 /* Invalid fixed rate mask */
6113 IPW_DEBUG_WX
6114 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6115 fr.tx_rates = 0;
6116 break;
6117 }
6118
6119 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6120 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6121 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6122 }
6123
6124 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6125 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6126 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6127 }
6128
6129 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6130 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6131 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6132 }
6133
6134 fr.tx_rates |= mask;
6135 break;
6136 }
6137
6138 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6139 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6140 }
6141
6142 static void ipw_abort_scan(struct ipw_priv *priv)
6143 {
6144 int err;
6145
6146 if (priv->status & STATUS_SCAN_ABORTING) {
6147 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6148 return;
6149 }
6150 priv->status |= STATUS_SCAN_ABORTING;
6151
6152 err = ipw_send_scan_abort(priv);
6153 if (err)
6154 IPW_DEBUG_HC("Request to abort scan failed.\n");
6155 }
6156
6157 static void ipw_add_scan_channels(struct ipw_priv *priv,
6158 struct ipw_scan_request_ext *scan,
6159 int scan_type)
6160 {
6161 int channel_index = 0;
6162 const struct ieee80211_geo *geo;
6163 int i;
6164
6165 geo = ieee80211_get_geo(priv->ieee);
6166
6167 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6168 int start = channel_index;
6169 for (i = 0; i < geo->a_channels; i++) {
6170 if ((priv->status & STATUS_ASSOCIATED) &&
6171 geo->a[i].channel == priv->channel)
6172 continue;
6173 channel_index++;
6174 scan->channels_list[channel_index] = geo->a[i].channel;
6175 ipw_set_scan_type(scan, channel_index,
6176 geo->a[i].
6177 flags & IEEE80211_CH_PASSIVE_ONLY ?
6178 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6179 scan_type);
6180 }
6181
6182 if (start != channel_index) {
6183 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6184 (channel_index - start);
6185 channel_index++;
6186 }
6187 }
6188
6189 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6190 int start = channel_index;
6191 if (priv->config & CFG_SPEED_SCAN) {
6192 int index;
6193 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6194 /* nop out the list */
6195 [0] = 0
6196 };
6197
6198 u8 channel;
6199 while (channel_index < IPW_SCAN_CHANNELS) {
6200 channel =
6201 priv->speed_scan[priv->speed_scan_pos];
6202 if (channel == 0) {
6203 priv->speed_scan_pos = 0;
6204 channel = priv->speed_scan[0];
6205 }
6206 if ((priv->status & STATUS_ASSOCIATED) &&
6207 channel == priv->channel) {
6208 priv->speed_scan_pos++;
6209 continue;
6210 }
6211
6212 /* If this channel has already been
6213 * added in scan, break from loop
6214 * and this will be the first channel
6215 * in the next scan.
6216 */
6217 if (channels[channel - 1] != 0)
6218 break;
6219
6220 channels[channel - 1] = 1;
6221 priv->speed_scan_pos++;
6222 channel_index++;
6223 scan->channels_list[channel_index] = channel;
6224 index =
6225 ieee80211_channel_to_index(priv->ieee, channel);
6226 ipw_set_scan_type(scan, channel_index,
6227 geo->bg[index].
6228 flags &
6229 IEEE80211_CH_PASSIVE_ONLY ?
6230 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6231 : scan_type);
6232 }
6233 } else {
6234 for (i = 0; i < geo->bg_channels; i++) {
6235 if ((priv->status & STATUS_ASSOCIATED) &&
6236 geo->bg[i].channel == priv->channel)
6237 continue;
6238 channel_index++;
6239 scan->channels_list[channel_index] =
6240 geo->bg[i].channel;
6241 ipw_set_scan_type(scan, channel_index,
6242 geo->bg[i].
6243 flags &
6244 IEEE80211_CH_PASSIVE_ONLY ?
6245 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6246 : scan_type);
6247 }
6248 }
6249
6250 if (start != channel_index) {
6251 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6252 (channel_index - start);
6253 }
6254 }
6255 }
6256
6257 static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
6258 {
6259 struct ipw_scan_request_ext scan;
6260 int err = 0, scan_type;
6261
6262 if (!(priv->status & STATUS_INIT) ||
6263 (priv->status & STATUS_EXIT_PENDING))
6264 return 0;
6265
6266 mutex_lock(&priv->mutex);
6267
6268 if (priv->status & STATUS_SCANNING) {
6269 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6270 priv->status |= STATUS_SCAN_PENDING;
6271 goto done;
6272 }
6273
6274 if (!(priv->status & STATUS_SCAN_FORCED) &&
6275 priv->status & STATUS_SCAN_ABORTING) {
6276 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6277 priv->status |= STATUS_SCAN_PENDING;
6278 goto done;
6279 }
6280
6281 if (priv->status & STATUS_RF_KILL_MASK) {
6282 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6283 priv->status |= STATUS_SCAN_PENDING;
6284 goto done;
6285 }
6286
6287 memset(&scan, 0, sizeof(scan));
6288 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6289
6290 if (type == IW_SCAN_TYPE_PASSIVE) {
6291 IPW_DEBUG_WX("use passive scanning\n");
6292 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6293 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6294 cpu_to_le16(120);
6295 ipw_add_scan_channels(priv, &scan, scan_type);
6296 goto send_request;
6297 }
6298
6299 /* Use active scan by default. */
6300 if (priv->config & CFG_SPEED_SCAN)
6301 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6302 cpu_to_le16(30);
6303 else
6304 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6305 cpu_to_le16(20);
6306
6307 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6308 cpu_to_le16(20);
6309
6310 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6311
6312 #ifdef CONFIG_IPW2200_MONITOR
6313 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6314 u8 channel;
6315 u8 band = 0;
6316
6317 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6318 case IEEE80211_52GHZ_BAND:
6319 band = (u8) (IPW_A_MODE << 6) | 1;
6320 channel = priv->channel;
6321 break;
6322
6323 case IEEE80211_24GHZ_BAND:
6324 band = (u8) (IPW_B_MODE << 6) | 1;
6325 channel = priv->channel;
6326 break;
6327
6328 default:
6329 band = (u8) (IPW_B_MODE << 6) | 1;
6330 channel = 9;
6331 break;
6332 }
6333
6334 scan.channels_list[0] = band;
6335 scan.channels_list[1] = channel;
6336 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6337
6338 /* NOTE: The card will sit on this channel for this time
6339 * period. Scan aborts are timing sensitive and frequently
6340 * result in firmware restarts. As such, it is best to
6341 * set a small dwell_time here and just keep re-issuing
6342 * scans. Otherwise fast channel hopping will not actually
6343 * hop channels.
6344 *
6345 * TODO: Move SPEED SCAN support to all modes and bands */
6346 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6347 cpu_to_le16(2000);
6348 } else {
6349 #endif /* CONFIG_IPW2200_MONITOR */
6350 /* If we are roaming, then make this a directed scan for the
6351 * current network. Otherwise, ensure that every other scan
6352 * is a fast channel hop scan */
6353 if ((priv->status & STATUS_ROAMING)
6354 || (!(priv->status & STATUS_ASSOCIATED)
6355 && (priv->config & CFG_STATIC_ESSID)
6356 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6357 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6358 if (err) {
6359 IPW_DEBUG_HC("Attempt to send SSID command "
6360 "failed.\n");
6361 goto done;
6362 }
6363
6364 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6365 } else
6366 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6367
6368 ipw_add_scan_channels(priv, &scan, scan_type);
6369 #ifdef CONFIG_IPW2200_MONITOR
6370 }
6371 #endif
6372
6373 send_request:
6374 err = ipw_send_scan_request_ext(priv, &scan);
6375 if (err) {
6376 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6377 goto done;
6378 }
6379
6380 priv->status |= STATUS_SCANNING;
6381 priv->status &= ~STATUS_SCAN_PENDING;
6382 queue_delayed_work(priv->workqueue, &priv->scan_check,
6383 IPW_SCAN_CHECK_WATCHDOG);
6384 done:
6385 mutex_unlock(&priv->mutex);
6386 return err;
6387 }
6388
6389 static void ipw_request_passive_scan(struct work_struct *work)
6390 {
6391 struct ipw_priv *priv =
6392 container_of(work, struct ipw_priv, request_passive_scan);
6393 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
6394 }
6395
6396 static void ipw_request_scan(struct work_struct *work)
6397 {
6398 struct ipw_priv *priv =
6399 container_of(work, struct ipw_priv, request_scan.work);
6400 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
6401 }
6402
6403 static void ipw_bg_abort_scan(struct work_struct *work)
6404 {
6405 struct ipw_priv *priv =
6406 container_of(work, struct ipw_priv, abort_scan);
6407 mutex_lock(&priv->mutex);
6408 ipw_abort_scan(priv);
6409 mutex_unlock(&priv->mutex);
6410 }
6411
6412 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6413 {
6414 /* This is called when wpa_supplicant loads and closes the driver
6415 * interface. */
6416 priv->ieee->wpa_enabled = value;
6417 return 0;
6418 }
6419
6420 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6421 {
6422 struct ieee80211_device *ieee = priv->ieee;
6423 struct ieee80211_security sec = {
6424 .flags = SEC_AUTH_MODE,
6425 };
6426 int ret = 0;
6427
6428 if (value & IW_AUTH_ALG_SHARED_KEY) {
6429 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6430 ieee->open_wep = 0;
6431 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6432 sec.auth_mode = WLAN_AUTH_OPEN;
6433 ieee->open_wep = 1;
6434 } else if (value & IW_AUTH_ALG_LEAP) {
6435 sec.auth_mode = WLAN_AUTH_LEAP;
6436 ieee->open_wep = 1;
6437 } else
6438 return -EINVAL;
6439
6440 if (ieee->set_security)
6441 ieee->set_security(ieee->dev, &sec);
6442 else
6443 ret = -EOPNOTSUPP;
6444
6445 return ret;
6446 }
6447
6448 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6449 int wpa_ie_len)
6450 {
6451 /* make sure WPA is enabled */
6452 ipw_wpa_enable(priv, 1);
6453 }
6454
6455 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6456 char *capabilities, int length)
6457 {
6458 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6459
6460 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6461 capabilities);
6462 }
6463
6464 /*
6465 * WE-18 support
6466 */
6467
6468 /* SIOCSIWGENIE */
6469 static int ipw_wx_set_genie(struct net_device *dev,
6470 struct iw_request_info *info,
6471 union iwreq_data *wrqu, char *extra)
6472 {
6473 struct ipw_priv *priv = ieee80211_priv(dev);
6474 struct ieee80211_device *ieee = priv->ieee;
6475 u8 *buf;
6476 int err = 0;
6477
6478 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6479 (wrqu->data.length && extra == NULL))
6480 return -EINVAL;
6481
6482 if (wrqu->data.length) {
6483 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6484 if (buf == NULL) {
6485 err = -ENOMEM;
6486 goto out;
6487 }
6488
6489 memcpy(buf, extra, wrqu->data.length);
6490 kfree(ieee->wpa_ie);
6491 ieee->wpa_ie = buf;
6492 ieee->wpa_ie_len = wrqu->data.length;
6493 } else {
6494 kfree(ieee->wpa_ie);
6495 ieee->wpa_ie = NULL;
6496 ieee->wpa_ie_len = 0;
6497 }
6498
6499 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6500 out:
6501 return err;
6502 }
6503
6504 /* SIOCGIWGENIE */
6505 static int ipw_wx_get_genie(struct net_device *dev,
6506 struct iw_request_info *info,
6507 union iwreq_data *wrqu, char *extra)
6508 {
6509 struct ipw_priv *priv = ieee80211_priv(dev);
6510 struct ieee80211_device *ieee = priv->ieee;
6511 int err = 0;
6512
6513 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6514 wrqu->data.length = 0;
6515 goto out;
6516 }
6517
6518 if (wrqu->data.length < ieee->wpa_ie_len) {
6519 err = -E2BIG;
6520 goto out;
6521 }
6522
6523 wrqu->data.length = ieee->wpa_ie_len;
6524 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6525
6526 out:
6527 return err;
6528 }
6529
6530 static int wext_cipher2level(int cipher)
6531 {
6532 switch (cipher) {
6533 case IW_AUTH_CIPHER_NONE:
6534 return SEC_LEVEL_0;
6535 case IW_AUTH_CIPHER_WEP40:
6536 case IW_AUTH_CIPHER_WEP104:
6537 return SEC_LEVEL_1;
6538 case IW_AUTH_CIPHER_TKIP:
6539 return SEC_LEVEL_2;
6540 case IW_AUTH_CIPHER_CCMP:
6541 return SEC_LEVEL_3;
6542 default:
6543 return -1;
6544 }
6545 }
6546
6547 /* SIOCSIWAUTH */
6548 static int ipw_wx_set_auth(struct net_device *dev,
6549 struct iw_request_info *info,
6550 union iwreq_data *wrqu, char *extra)
6551 {
6552 struct ipw_priv *priv = ieee80211_priv(dev);
6553 struct ieee80211_device *ieee = priv->ieee;
6554 struct iw_param *param = &wrqu->param;
6555 struct ieee80211_crypt_data *crypt;
6556 unsigned long flags;
6557 int ret = 0;
6558
6559 switch (param->flags & IW_AUTH_INDEX) {
6560 case IW_AUTH_WPA_VERSION:
6561 break;
6562 case IW_AUTH_CIPHER_PAIRWISE:
6563 ipw_set_hw_decrypt_unicast(priv,
6564 wext_cipher2level(param->value));
6565 break;
6566 case IW_AUTH_CIPHER_GROUP:
6567 ipw_set_hw_decrypt_multicast(priv,
6568 wext_cipher2level(param->value));
6569 break;
6570 case IW_AUTH_KEY_MGMT:
6571 /*
6572 * ipw2200 does not use these parameters
6573 */
6574 break;
6575
6576 case IW_AUTH_TKIP_COUNTERMEASURES:
6577 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6578 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6579 break;
6580
6581 flags = crypt->ops->get_flags(crypt->priv);
6582
6583 if (param->value)
6584 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6585 else
6586 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6587
6588 crypt->ops->set_flags(flags, crypt->priv);
6589
6590 break;
6591
6592 case IW_AUTH_DROP_UNENCRYPTED:{
6593 /* HACK:
6594 *
6595 * wpa_supplicant calls set_wpa_enabled when the driver
6596 * is loaded and unloaded, regardless of if WPA is being
6597 * used. No other calls are made which can be used to
6598 * determine if encryption will be used or not prior to
6599 * association being expected. If encryption is not being
6600 * used, drop_unencrypted is set to false, else true -- we
6601 * can use this to determine if the CAP_PRIVACY_ON bit should
6602 * be set.
6603 */
6604 struct ieee80211_security sec = {
6605 .flags = SEC_ENABLED,
6606 .enabled = param->value,
6607 };
6608 priv->ieee->drop_unencrypted = param->value;
6609 /* We only change SEC_LEVEL for open mode. Others
6610 * are set by ipw_wpa_set_encryption.
6611 */
6612 if (!param->value) {
6613 sec.flags |= SEC_LEVEL;
6614 sec.level = SEC_LEVEL_0;
6615 } else {
6616 sec.flags |= SEC_LEVEL;
6617 sec.level = SEC_LEVEL_1;
6618 }
6619 if (priv->ieee->set_security)
6620 priv->ieee->set_security(priv->ieee->dev, &sec);
6621 break;
6622 }
6623
6624 case IW_AUTH_80211_AUTH_ALG:
6625 ret = ipw_wpa_set_auth_algs(priv, param->value);
6626 break;
6627
6628 case IW_AUTH_WPA_ENABLED:
6629 ret = ipw_wpa_enable(priv, param->value);
6630 ipw_disassociate(priv);
6631 break;
6632
6633 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6634 ieee->ieee802_1x = param->value;
6635 break;
6636
6637 case IW_AUTH_PRIVACY_INVOKED:
6638 ieee->privacy_invoked = param->value;
6639 break;
6640
6641 default:
6642 return -EOPNOTSUPP;
6643 }
6644 return ret;
6645 }
6646
6647 /* SIOCGIWAUTH */
6648 static int ipw_wx_get_auth(struct net_device *dev,
6649 struct iw_request_info *info,
6650 union iwreq_data *wrqu, char *extra)
6651 {
6652 struct ipw_priv *priv = ieee80211_priv(dev);
6653 struct ieee80211_device *ieee = priv->ieee;
6654 struct ieee80211_crypt_data *crypt;
6655 struct iw_param *param = &wrqu->param;
6656 int ret = 0;
6657
6658 switch (param->flags & IW_AUTH_INDEX) {
6659 case IW_AUTH_WPA_VERSION:
6660 case IW_AUTH_CIPHER_PAIRWISE:
6661 case IW_AUTH_CIPHER_GROUP:
6662 case IW_AUTH_KEY_MGMT:
6663 /*
6664 * wpa_supplicant will control these internally
6665 */
6666 ret = -EOPNOTSUPP;
6667 break;
6668
6669 case IW_AUTH_TKIP_COUNTERMEASURES:
6670 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6671 if (!crypt || !crypt->ops->get_flags)
6672 break;
6673
6674 param->value = (crypt->ops->get_flags(crypt->priv) &
6675 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6676
6677 break;
6678
6679 case IW_AUTH_DROP_UNENCRYPTED:
6680 param->value = ieee->drop_unencrypted;
6681 break;
6682
6683 case IW_AUTH_80211_AUTH_ALG:
6684 param->value = ieee->sec.auth_mode;
6685 break;
6686
6687 case IW_AUTH_WPA_ENABLED:
6688 param->value = ieee->wpa_enabled;
6689 break;
6690
6691 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6692 param->value = ieee->ieee802_1x;
6693 break;
6694
6695 case IW_AUTH_ROAMING_CONTROL:
6696 case IW_AUTH_PRIVACY_INVOKED:
6697 param->value = ieee->privacy_invoked;
6698 break;
6699
6700 default:
6701 return -EOPNOTSUPP;
6702 }
6703 return 0;
6704 }
6705
6706 /* SIOCSIWENCODEEXT */
6707 static int ipw_wx_set_encodeext(struct net_device *dev,
6708 struct iw_request_info *info,
6709 union iwreq_data *wrqu, char *extra)
6710 {
6711 struct ipw_priv *priv = ieee80211_priv(dev);
6712 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6713
6714 if (hwcrypto) {
6715 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6716 /* IPW HW can't build TKIP MIC,
6717 host decryption still needed */
6718 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6719 priv->ieee->host_mc_decrypt = 1;
6720 else {
6721 priv->ieee->host_encrypt = 0;
6722 priv->ieee->host_encrypt_msdu = 1;
6723 priv->ieee->host_decrypt = 1;
6724 }
6725 } else {
6726 priv->ieee->host_encrypt = 0;
6727 priv->ieee->host_encrypt_msdu = 0;
6728 priv->ieee->host_decrypt = 0;
6729 priv->ieee->host_mc_decrypt = 0;
6730 }
6731 }
6732
6733 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6734 }
6735
6736 /* SIOCGIWENCODEEXT */
6737 static int ipw_wx_get_encodeext(struct net_device *dev,
6738 struct iw_request_info *info,
6739 union iwreq_data *wrqu, char *extra)
6740 {
6741 struct ipw_priv *priv = ieee80211_priv(dev);
6742 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6743 }
6744
6745 /* SIOCSIWMLME */
6746 static int ipw_wx_set_mlme(struct net_device *dev,
6747 struct iw_request_info *info,
6748 union iwreq_data *wrqu, char *extra)
6749 {
6750 struct ipw_priv *priv = ieee80211_priv(dev);
6751 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6752 __le16 reason;
6753
6754 reason = cpu_to_le16(mlme->reason_code);
6755
6756 switch (mlme->cmd) {
6757 case IW_MLME_DEAUTH:
6758 /* silently ignore */
6759 break;
6760
6761 case IW_MLME_DISASSOC:
6762 ipw_disassociate(priv);
6763 break;
6764
6765 default:
6766 return -EOPNOTSUPP;
6767 }
6768 return 0;
6769 }
6770
6771 #ifdef CONFIG_IPW2200_QOS
6772
6773 /* QoS */
6774 /*
6775 * get the modulation type of the current network or
6776 * the card current mode
6777 */
6778 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6779 {
6780 u8 mode = 0;
6781
6782 if (priv->status & STATUS_ASSOCIATED) {
6783 unsigned long flags;
6784
6785 spin_lock_irqsave(&priv->ieee->lock, flags);
6786 mode = priv->assoc_network->mode;
6787 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6788 } else {
6789 mode = priv->ieee->mode;
6790 }
6791 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6792 return mode;
6793 }
6794
6795 /*
6796 * Handle management frame beacon and probe response
6797 */
6798 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6799 int active_network,
6800 struct ieee80211_network *network)
6801 {
6802 u32 size = sizeof(struct ieee80211_qos_parameters);
6803
6804 if (network->capability & WLAN_CAPABILITY_IBSS)
6805 network->qos_data.active = network->qos_data.supported;
6806
6807 if (network->flags & NETWORK_HAS_QOS_MASK) {
6808 if (active_network &&
6809 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6810 network->qos_data.active = network->qos_data.supported;
6811
6812 if ((network->qos_data.active == 1) && (active_network == 1) &&
6813 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6814 (network->qos_data.old_param_count !=
6815 network->qos_data.param_count)) {
6816 network->qos_data.old_param_count =
6817 network->qos_data.param_count;
6818 schedule_work(&priv->qos_activate);
6819 IPW_DEBUG_QOS("QoS parameters change call "
6820 "qos_activate\n");
6821 }
6822 } else {
6823 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6824 memcpy(&network->qos_data.parameters,
6825 &def_parameters_CCK, size);
6826 else
6827 memcpy(&network->qos_data.parameters,
6828 &def_parameters_OFDM, size);
6829
6830 if ((network->qos_data.active == 1) && (active_network == 1)) {
6831 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6832 schedule_work(&priv->qos_activate);
6833 }
6834
6835 network->qos_data.active = 0;
6836 network->qos_data.supported = 0;
6837 }
6838 if ((priv->status & STATUS_ASSOCIATED) &&
6839 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6840 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6841 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6842 !(network->flags & NETWORK_EMPTY_ESSID))
6843 if ((network->ssid_len ==
6844 priv->assoc_network->ssid_len) &&
6845 !memcmp(network->ssid,
6846 priv->assoc_network->ssid,
6847 network->ssid_len)) {
6848 queue_work(priv->workqueue,
6849 &priv->merge_networks);
6850 }
6851 }
6852
6853 return 0;
6854 }
6855
6856 /*
6857 * This function set up the firmware to support QoS. It sends
6858 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6859 */
6860 static int ipw_qos_activate(struct ipw_priv *priv,
6861 struct ieee80211_qos_data *qos_network_data)
6862 {
6863 int err;
6864 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6865 struct ieee80211_qos_parameters *active_one = NULL;
6866 u32 size = sizeof(struct ieee80211_qos_parameters);
6867 u32 burst_duration;
6868 int i;
6869 u8 type;
6870
6871 type = ipw_qos_current_mode(priv);
6872
6873 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6874 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6875 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6876 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6877
6878 if (qos_network_data == NULL) {
6879 if (type == IEEE_B) {
6880 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6881 active_one = &def_parameters_CCK;
6882 } else
6883 active_one = &def_parameters_OFDM;
6884
6885 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6886 burst_duration = ipw_qos_get_burst_duration(priv);
6887 for (i = 0; i < QOS_QUEUE_NUM; i++)
6888 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6889 cpu_to_le16(burst_duration);
6890 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6891 if (type == IEEE_B) {
6892 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6893 type);
6894 if (priv->qos_data.qos_enable == 0)
6895 active_one = &def_parameters_CCK;
6896 else
6897 active_one = priv->qos_data.def_qos_parm_CCK;
6898 } else {
6899 if (priv->qos_data.qos_enable == 0)
6900 active_one = &def_parameters_OFDM;
6901 else
6902 active_one = priv->qos_data.def_qos_parm_OFDM;
6903 }
6904 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6905 } else {
6906 unsigned long flags;
6907 int active;
6908
6909 spin_lock_irqsave(&priv->ieee->lock, flags);
6910 active_one = &(qos_network_data->parameters);
6911 qos_network_data->old_param_count =
6912 qos_network_data->param_count;
6913 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6914 active = qos_network_data->supported;
6915 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6916
6917 if (active == 0) {
6918 burst_duration = ipw_qos_get_burst_duration(priv);
6919 for (i = 0; i < QOS_QUEUE_NUM; i++)
6920 qos_parameters[QOS_PARAM_SET_ACTIVE].
6921 tx_op_limit[i] = cpu_to_le16(burst_duration);
6922 }
6923 }
6924
6925 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6926 err = ipw_send_qos_params_command(priv,
6927 (struct ieee80211_qos_parameters *)
6928 &(qos_parameters[0]));
6929 if (err)
6930 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6931
6932 return err;
6933 }
6934
6935 /*
6936 * send IPW_CMD_WME_INFO to the firmware
6937 */
6938 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6939 {
6940 int ret = 0;
6941 struct ieee80211_qos_information_element qos_info;
6942
6943 if (priv == NULL)
6944 return -1;
6945
6946 qos_info.elementID = QOS_ELEMENT_ID;
6947 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6948
6949 qos_info.version = QOS_VERSION_1;
6950 qos_info.ac_info = 0;
6951
6952 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6953 qos_info.qui_type = QOS_OUI_TYPE;
6954 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6955
6956 ret = ipw_send_qos_info_command(priv, &qos_info);
6957 if (ret != 0) {
6958 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6959 }
6960 return ret;
6961 }
6962
6963 /*
6964 * Set the QoS parameter with the association request structure
6965 */
6966 static int ipw_qos_association(struct ipw_priv *priv,
6967 struct ieee80211_network *network)
6968 {
6969 int err = 0;
6970 struct ieee80211_qos_data *qos_data = NULL;
6971 struct ieee80211_qos_data ibss_data = {
6972 .supported = 1,
6973 .active = 1,
6974 };
6975
6976 switch (priv->ieee->iw_mode) {
6977 case IW_MODE_ADHOC:
6978 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6979
6980 qos_data = &ibss_data;
6981 break;
6982
6983 case IW_MODE_INFRA:
6984 qos_data = &network->qos_data;
6985 break;
6986
6987 default:
6988 BUG();
6989 break;
6990 }
6991
6992 err = ipw_qos_activate(priv, qos_data);
6993 if (err) {
6994 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6995 return err;
6996 }
6997
6998 if (priv->qos_data.qos_enable && qos_data->supported) {
6999 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7000 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7001 return ipw_qos_set_info_element(priv);
7002 }
7003
7004 return 0;
7005 }
7006
7007 /*
7008 * handling the beaconing responses. if we get different QoS setting
7009 * off the network from the associated setting, adjust the QoS
7010 * setting
7011 */
7012 static int ipw_qos_association_resp(struct ipw_priv *priv,
7013 struct ieee80211_network *network)
7014 {
7015 int ret = 0;
7016 unsigned long flags;
7017 u32 size = sizeof(struct ieee80211_qos_parameters);
7018 int set_qos_param = 0;
7019
7020 if ((priv == NULL) || (network == NULL) ||
7021 (priv->assoc_network == NULL))
7022 return ret;
7023
7024 if (!(priv->status & STATUS_ASSOCIATED))
7025 return ret;
7026
7027 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7028 return ret;
7029
7030 spin_lock_irqsave(&priv->ieee->lock, flags);
7031 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7032 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7033 sizeof(struct ieee80211_qos_data));
7034 priv->assoc_network->qos_data.active = 1;
7035 if ((network->qos_data.old_param_count !=
7036 network->qos_data.param_count)) {
7037 set_qos_param = 1;
7038 network->qos_data.old_param_count =
7039 network->qos_data.param_count;
7040 }
7041
7042 } else {
7043 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7044 memcpy(&priv->assoc_network->qos_data.parameters,
7045 &def_parameters_CCK, size);
7046 else
7047 memcpy(&priv->assoc_network->qos_data.parameters,
7048 &def_parameters_OFDM, size);
7049 priv->assoc_network->qos_data.active = 0;
7050 priv->assoc_network->qos_data.supported = 0;
7051 set_qos_param = 1;
7052 }
7053
7054 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7055
7056 if (set_qos_param == 1)
7057 schedule_work(&priv->qos_activate);
7058
7059 return ret;
7060 }
7061
7062 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7063 {
7064 u32 ret = 0;
7065
7066 if ((priv == NULL))
7067 return 0;
7068
7069 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
7070 ret = priv->qos_data.burst_duration_CCK;
7071 else
7072 ret = priv->qos_data.burst_duration_OFDM;
7073
7074 return ret;
7075 }
7076
7077 /*
7078 * Initialize the setting of QoS global
7079 */
7080 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7081 int burst_enable, u32 burst_duration_CCK,
7082 u32 burst_duration_OFDM)
7083 {
7084 priv->qos_data.qos_enable = enable;
7085
7086 if (priv->qos_data.qos_enable) {
7087 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7088 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7089 IPW_DEBUG_QOS("QoS is enabled\n");
7090 } else {
7091 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7092 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7093 IPW_DEBUG_QOS("QoS is not enabled\n");
7094 }
7095
7096 priv->qos_data.burst_enable = burst_enable;
7097
7098 if (burst_enable) {
7099 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7100 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7101 } else {
7102 priv->qos_data.burst_duration_CCK = 0;
7103 priv->qos_data.burst_duration_OFDM = 0;
7104 }
7105 }
7106
7107 /*
7108 * map the packet priority to the right TX Queue
7109 */
7110 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7111 {
7112 if (priority > 7 || !priv->qos_data.qos_enable)
7113 priority = 0;
7114
7115 return from_priority_to_tx_queue[priority] - 1;
7116 }
7117
7118 static int ipw_is_qos_active(struct net_device *dev,
7119 struct sk_buff *skb)
7120 {
7121 struct ipw_priv *priv = ieee80211_priv(dev);
7122 struct ieee80211_qos_data *qos_data = NULL;
7123 int active, supported;
7124 u8 *daddr = skb->data + ETH_ALEN;
7125 int unicast = !is_multicast_ether_addr(daddr);
7126
7127 if (!(priv->status & STATUS_ASSOCIATED))
7128 return 0;
7129
7130 qos_data = &priv->assoc_network->qos_data;
7131
7132 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7133 if (unicast == 0)
7134 qos_data->active = 0;
7135 else
7136 qos_data->active = qos_data->supported;
7137 }
7138 active = qos_data->active;
7139 supported = qos_data->supported;
7140 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7141 "unicast %d\n",
7142 priv->qos_data.qos_enable, active, supported, unicast);
7143 if (active && priv->qos_data.qos_enable)
7144 return 1;
7145
7146 return 0;
7147
7148 }
7149 /*
7150 * add QoS parameter to the TX command
7151 */
7152 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7153 u16 priority,
7154 struct tfd_data *tfd)
7155 {
7156 int tx_queue_id = 0;
7157
7158
7159 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7160 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7161
7162 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7163 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7164 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7165 }
7166 return 0;
7167 }
7168
7169 /*
7170 * background support to run QoS activate functionality
7171 */
7172 static void ipw_bg_qos_activate(struct work_struct *work)
7173 {
7174 struct ipw_priv *priv =
7175 container_of(work, struct ipw_priv, qos_activate);
7176
7177 if (priv == NULL)
7178 return;
7179
7180 mutex_lock(&priv->mutex);
7181
7182 if (priv->status & STATUS_ASSOCIATED)
7183 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7184
7185 mutex_unlock(&priv->mutex);
7186 }
7187
7188 static int ipw_handle_probe_response(struct net_device *dev,
7189 struct ieee80211_probe_response *resp,
7190 struct ieee80211_network *network)
7191 {
7192 struct ipw_priv *priv = ieee80211_priv(dev);
7193 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7194 (network == priv->assoc_network));
7195
7196 ipw_qos_handle_probe_response(priv, active_network, network);
7197
7198 return 0;
7199 }
7200
7201 static int ipw_handle_beacon(struct net_device *dev,
7202 struct ieee80211_beacon *resp,
7203 struct ieee80211_network *network)
7204 {
7205 struct ipw_priv *priv = ieee80211_priv(dev);
7206 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7207 (network == priv->assoc_network));
7208
7209 ipw_qos_handle_probe_response(priv, active_network, network);
7210
7211 return 0;
7212 }
7213
7214 static int ipw_handle_assoc_response(struct net_device *dev,
7215 struct ieee80211_assoc_response *resp,
7216 struct ieee80211_network *network)
7217 {
7218 struct ipw_priv *priv = ieee80211_priv(dev);
7219 ipw_qos_association_resp(priv, network);
7220 return 0;
7221 }
7222
7223 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7224 *qos_param)
7225 {
7226 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7227 sizeof(*qos_param) * 3, qos_param);
7228 }
7229
7230 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7231 *qos_param)
7232 {
7233 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7234 qos_param);
7235 }
7236
7237 #endif /* CONFIG_IPW2200_QOS */
7238
7239 static int ipw_associate_network(struct ipw_priv *priv,
7240 struct ieee80211_network *network,
7241 struct ipw_supported_rates *rates, int roaming)
7242 {
7243 int err;
7244 DECLARE_MAC_BUF(mac);
7245
7246 if (priv->config & CFG_FIXED_RATE)
7247 ipw_set_fixed_rate(priv, network->mode);
7248
7249 if (!(priv->config & CFG_STATIC_ESSID)) {
7250 priv->essid_len = min(network->ssid_len,
7251 (u8) IW_ESSID_MAX_SIZE);
7252 memcpy(priv->essid, network->ssid, priv->essid_len);
7253 }
7254
7255 network->last_associate = jiffies;
7256
7257 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7258 priv->assoc_request.channel = network->channel;
7259 priv->assoc_request.auth_key = 0;
7260
7261 if ((priv->capability & CAP_PRIVACY_ON) &&
7262 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7263 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7264 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7265
7266 if (priv->ieee->sec.level == SEC_LEVEL_1)
7267 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7268
7269 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7270 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7271 priv->assoc_request.auth_type = AUTH_LEAP;
7272 else
7273 priv->assoc_request.auth_type = AUTH_OPEN;
7274
7275 if (priv->ieee->wpa_ie_len) {
7276 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7277 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7278 priv->ieee->wpa_ie_len);
7279 }
7280
7281 /*
7282 * It is valid for our ieee device to support multiple modes, but
7283 * when it comes to associating to a given network we have to choose
7284 * just one mode.
7285 */
7286 if (network->mode & priv->ieee->mode & IEEE_A)
7287 priv->assoc_request.ieee_mode = IPW_A_MODE;
7288 else if (network->mode & priv->ieee->mode & IEEE_G)
7289 priv->assoc_request.ieee_mode = IPW_G_MODE;
7290 else if (network->mode & priv->ieee->mode & IEEE_B)
7291 priv->assoc_request.ieee_mode = IPW_B_MODE;
7292
7293 priv->assoc_request.capability = cpu_to_le16(network->capability);
7294 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7295 && !(priv->config & CFG_PREAMBLE_LONG)) {
7296 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7297 } else {
7298 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7299
7300 /* Clear the short preamble if we won't be supporting it */
7301 priv->assoc_request.capability &=
7302 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7303 }
7304
7305 /* Clear capability bits that aren't used in Ad Hoc */
7306 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7307 priv->assoc_request.capability &=
7308 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7309
7310 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7311 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7312 roaming ? "Rea" : "A",
7313 escape_essid(priv->essid, priv->essid_len),
7314 network->channel,
7315 ipw_modes[priv->assoc_request.ieee_mode],
7316 rates->num_rates,
7317 (priv->assoc_request.preamble_length ==
7318 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7319 network->capability &
7320 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7321 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7322 priv->capability & CAP_PRIVACY_ON ?
7323 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7324 "(open)") : "",
7325 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7326 priv->capability & CAP_PRIVACY_ON ?
7327 '1' + priv->ieee->sec.active_key : '.',
7328 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7329
7330 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7331 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7332 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7333 priv->assoc_request.assoc_type = HC_IBSS_START;
7334 priv->assoc_request.assoc_tsf_msw = 0;
7335 priv->assoc_request.assoc_tsf_lsw = 0;
7336 } else {
7337 if (unlikely(roaming))
7338 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7339 else
7340 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7341 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7342 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7343 }
7344
7345 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7346
7347 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7348 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7349 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7350 } else {
7351 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7352 priv->assoc_request.atim_window = 0;
7353 }
7354
7355 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7356
7357 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7358 if (err) {
7359 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7360 return err;
7361 }
7362
7363 rates->ieee_mode = priv->assoc_request.ieee_mode;
7364 rates->purpose = IPW_RATE_CONNECT;
7365 ipw_send_supported_rates(priv, rates);
7366
7367 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7368 priv->sys_config.dot11g_auto_detection = 1;
7369 else
7370 priv->sys_config.dot11g_auto_detection = 0;
7371
7372 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7373 priv->sys_config.answer_broadcast_ssid_probe = 1;
7374 else
7375 priv->sys_config.answer_broadcast_ssid_probe = 0;
7376
7377 err = ipw_send_system_config(priv);
7378 if (err) {
7379 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7380 return err;
7381 }
7382
7383 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7384 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7385 if (err) {
7386 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7387 return err;
7388 }
7389
7390 /*
7391 * If preemption is enabled, it is possible for the association
7392 * to complete before we return from ipw_send_associate. Therefore
7393 * we have to be sure and update our priviate data first.
7394 */
7395 priv->channel = network->channel;
7396 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7397 priv->status |= STATUS_ASSOCIATING;
7398 priv->status &= ~STATUS_SECURITY_UPDATED;
7399
7400 priv->assoc_network = network;
7401
7402 #ifdef CONFIG_IPW2200_QOS
7403 ipw_qos_association(priv, network);
7404 #endif
7405
7406 err = ipw_send_associate(priv, &priv->assoc_request);
7407 if (err) {
7408 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7409 return err;
7410 }
7411
7412 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %s \n",
7413 escape_essid(priv->essid, priv->essid_len),
7414 print_mac(mac, priv->bssid));
7415
7416 return 0;
7417 }
7418
7419 static void ipw_roam(void *data)
7420 {
7421 struct ipw_priv *priv = data;
7422 struct ieee80211_network *network = NULL;
7423 struct ipw_network_match match = {
7424 .network = priv->assoc_network
7425 };
7426
7427 /* The roaming process is as follows:
7428 *
7429 * 1. Missed beacon threshold triggers the roaming process by
7430 * setting the status ROAM bit and requesting a scan.
7431 * 2. When the scan completes, it schedules the ROAM work
7432 * 3. The ROAM work looks at all of the known networks for one that
7433 * is a better network than the currently associated. If none
7434 * found, the ROAM process is over (ROAM bit cleared)
7435 * 4. If a better network is found, a disassociation request is
7436 * sent.
7437 * 5. When the disassociation completes, the roam work is again
7438 * scheduled. The second time through, the driver is no longer
7439 * associated, and the newly selected network is sent an
7440 * association request.
7441 * 6. At this point ,the roaming process is complete and the ROAM
7442 * status bit is cleared.
7443 */
7444
7445 /* If we are no longer associated, and the roaming bit is no longer
7446 * set, then we are not actively roaming, so just return */
7447 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7448 return;
7449
7450 if (priv->status & STATUS_ASSOCIATED) {
7451 /* First pass through ROAM process -- look for a better
7452 * network */
7453 unsigned long flags;
7454 u8 rssi = priv->assoc_network->stats.rssi;
7455 priv->assoc_network->stats.rssi = -128;
7456 spin_lock_irqsave(&priv->ieee->lock, flags);
7457 list_for_each_entry(network, &priv->ieee->network_list, list) {
7458 if (network != priv->assoc_network)
7459 ipw_best_network(priv, &match, network, 1);
7460 }
7461 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7462 priv->assoc_network->stats.rssi = rssi;
7463
7464 if (match.network == priv->assoc_network) {
7465 IPW_DEBUG_ASSOC("No better APs in this network to "
7466 "roam to.\n");
7467 priv->status &= ~STATUS_ROAMING;
7468 ipw_debug_config(priv);
7469 return;
7470 }
7471
7472 ipw_send_disassociate(priv, 1);
7473 priv->assoc_network = match.network;
7474
7475 return;
7476 }
7477
7478 /* Second pass through ROAM process -- request association */
7479 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7480 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7481 priv->status &= ~STATUS_ROAMING;
7482 }
7483
7484 static void ipw_bg_roam(struct work_struct *work)
7485 {
7486 struct ipw_priv *priv =
7487 container_of(work, struct ipw_priv, roam);
7488 mutex_lock(&priv->mutex);
7489 ipw_roam(priv);
7490 mutex_unlock(&priv->mutex);
7491 }
7492
7493 static int ipw_associate(void *data)
7494 {
7495 struct ipw_priv *priv = data;
7496
7497 struct ieee80211_network *network = NULL;
7498 struct ipw_network_match match = {
7499 .network = NULL
7500 };
7501 struct ipw_supported_rates *rates;
7502 struct list_head *element;
7503 unsigned long flags;
7504
7505 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7506 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7507 return 0;
7508 }
7509
7510 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7511 IPW_DEBUG_ASSOC("Not attempting association (already in "
7512 "progress)\n");
7513 return 0;
7514 }
7515
7516 if (priv->status & STATUS_DISASSOCIATING) {
7517 IPW_DEBUG_ASSOC("Not attempting association (in "
7518 "disassociating)\n ");
7519 queue_work(priv->workqueue, &priv->associate);
7520 return 0;
7521 }
7522
7523 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7524 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7525 "initialized)\n");
7526 return 0;
7527 }
7528
7529 if (!(priv->config & CFG_ASSOCIATE) &&
7530 !(priv->config & (CFG_STATIC_ESSID |
7531 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7532 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7533 return 0;
7534 }
7535
7536 /* Protect our use of the network_list */
7537 spin_lock_irqsave(&priv->ieee->lock, flags);
7538 list_for_each_entry(network, &priv->ieee->network_list, list)
7539 ipw_best_network(priv, &match, network, 0);
7540
7541 network = match.network;
7542 rates = &match.rates;
7543
7544 if (network == NULL &&
7545 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7546 priv->config & CFG_ADHOC_CREATE &&
7547 priv->config & CFG_STATIC_ESSID &&
7548 priv->config & CFG_STATIC_CHANNEL &&
7549 !list_empty(&priv->ieee->network_free_list)) {
7550 element = priv->ieee->network_free_list.next;
7551 network = list_entry(element, struct ieee80211_network, list);
7552 ipw_adhoc_create(priv, network);
7553 rates = &priv->rates;
7554 list_del(element);
7555 list_add_tail(&network->list, &priv->ieee->network_list);
7556 }
7557 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7558
7559 /* If we reached the end of the list, then we don't have any valid
7560 * matching APs */
7561 if (!network) {
7562 ipw_debug_config(priv);
7563
7564 if (!(priv->status & STATUS_SCANNING)) {
7565 if (!(priv->config & CFG_SPEED_SCAN))
7566 queue_delayed_work(priv->workqueue,
7567 &priv->request_scan,
7568 SCAN_INTERVAL);
7569 else
7570 queue_delayed_work(priv->workqueue,
7571 &priv->request_scan, 0);
7572 }
7573
7574 return 0;
7575 }
7576
7577 ipw_associate_network(priv, network, rates, 0);
7578
7579 return 1;
7580 }
7581
7582 static void ipw_bg_associate(struct work_struct *work)
7583 {
7584 struct ipw_priv *priv =
7585 container_of(work, struct ipw_priv, associate);
7586 mutex_lock(&priv->mutex);
7587 ipw_associate(priv);
7588 mutex_unlock(&priv->mutex);
7589 }
7590
7591 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7592 struct sk_buff *skb)
7593 {
7594 struct ieee80211_hdr *hdr;
7595 u16 fc;
7596
7597 hdr = (struct ieee80211_hdr *)skb->data;
7598 fc = le16_to_cpu(hdr->frame_ctl);
7599 if (!(fc & IEEE80211_FCTL_PROTECTED))
7600 return;
7601
7602 fc &= ~IEEE80211_FCTL_PROTECTED;
7603 hdr->frame_ctl = cpu_to_le16(fc);
7604 switch (priv->ieee->sec.level) {
7605 case SEC_LEVEL_3:
7606 /* Remove CCMP HDR */
7607 memmove(skb->data + IEEE80211_3ADDR_LEN,
7608 skb->data + IEEE80211_3ADDR_LEN + 8,
7609 skb->len - IEEE80211_3ADDR_LEN - 8);
7610 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7611 break;
7612 case SEC_LEVEL_2:
7613 break;
7614 case SEC_LEVEL_1:
7615 /* Remove IV */
7616 memmove(skb->data + IEEE80211_3ADDR_LEN,
7617 skb->data + IEEE80211_3ADDR_LEN + 4,
7618 skb->len - IEEE80211_3ADDR_LEN - 4);
7619 skb_trim(skb, skb->len - 8); /* IV + ICV */
7620 break;
7621 case SEC_LEVEL_0:
7622 break;
7623 default:
7624 printk(KERN_ERR "Unknow security level %d\n",
7625 priv->ieee->sec.level);
7626 break;
7627 }
7628 }
7629
7630 static void ipw_handle_data_packet(struct ipw_priv *priv,
7631 struct ipw_rx_mem_buffer *rxb,
7632 struct ieee80211_rx_stats *stats)
7633 {
7634 struct ieee80211_hdr_4addr *hdr;
7635 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7636
7637 /* We received data from the HW, so stop the watchdog */
7638 priv->net_dev->trans_start = jiffies;
7639
7640 /* We only process data packets if the
7641 * interface is open */
7642 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7643 skb_tailroom(rxb->skb))) {
7644 priv->ieee->stats.rx_errors++;
7645 priv->wstats.discard.misc++;
7646 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7647 return;
7648 } else if (unlikely(!netif_running(priv->net_dev))) {
7649 priv->ieee->stats.rx_dropped++;
7650 priv->wstats.discard.misc++;
7651 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7652 return;
7653 }
7654
7655 /* Advance skb->data to the start of the actual payload */
7656 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7657
7658 /* Set the size of the skb to the size of the frame */
7659 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7660
7661 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7662
7663 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7664 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7665 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7666 (is_multicast_ether_addr(hdr->addr1) ?
7667 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7668 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7669
7670 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7671 priv->ieee->stats.rx_errors++;
7672 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7673 rxb->skb = NULL;
7674 __ipw_led_activity_on(priv);
7675 }
7676 }
7677
7678 #ifdef CONFIG_IPW2200_RADIOTAP
7679 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7680 struct ipw_rx_mem_buffer *rxb,
7681 struct ieee80211_rx_stats *stats)
7682 {
7683 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7684 struct ipw_rx_frame *frame = &pkt->u.frame;
7685
7686 /* initial pull of some data */
7687 u16 received_channel = frame->received_channel;
7688 u8 antennaAndPhy = frame->antennaAndPhy;
7689 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7690 u16 pktrate = frame->rate;
7691
7692 /* Magic struct that slots into the radiotap header -- no reason
7693 * to build this manually element by element, we can write it much
7694 * more efficiently than we can parse it. ORDER MATTERS HERE */
7695 struct ipw_rt_hdr *ipw_rt;
7696
7697 short len = le16_to_cpu(pkt->u.frame.length);
7698
7699 /* We received data from the HW, so stop the watchdog */
7700 priv->net_dev->trans_start = jiffies;
7701
7702 /* We only process data packets if the
7703 * interface is open */
7704 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7705 skb_tailroom(rxb->skb))) {
7706 priv->ieee->stats.rx_errors++;
7707 priv->wstats.discard.misc++;
7708 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7709 return;
7710 } else if (unlikely(!netif_running(priv->net_dev))) {
7711 priv->ieee->stats.rx_dropped++;
7712 priv->wstats.discard.misc++;
7713 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7714 return;
7715 }
7716
7717 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7718 * that now */
7719 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7720 /* FIXME: Should alloc bigger skb instead */
7721 priv->ieee->stats.rx_dropped++;
7722 priv->wstats.discard.misc++;
7723 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7724 return;
7725 }
7726
7727 /* copy the frame itself */
7728 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7729 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7730
7731 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7732 * part of our real header, saves a little time.
7733 *
7734 * No longer necessary since we fill in all our data. Purge before merging
7735 * patch officially.
7736 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7737 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7738 */
7739
7740 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7741
7742 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7743 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7744 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7745
7746 /* Big bitfield of all the fields we provide in radiotap */
7747 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7748 (1 << IEEE80211_RADIOTAP_TSFT) |
7749 (1 << IEEE80211_RADIOTAP_FLAGS) |
7750 (1 << IEEE80211_RADIOTAP_RATE) |
7751 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7752 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7753 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7754 (1 << IEEE80211_RADIOTAP_ANTENNA));
7755
7756 /* Zero the flags, we'll add to them as we go */
7757 ipw_rt->rt_flags = 0;
7758 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7759 frame->parent_tsf[2] << 16 |
7760 frame->parent_tsf[1] << 8 |
7761 frame->parent_tsf[0]);
7762
7763 /* Convert signal to DBM */
7764 ipw_rt->rt_dbmsignal = antsignal;
7765 ipw_rt->rt_dbmnoise = frame->noise;
7766
7767 /* Convert the channel data and set the flags */
7768 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7769 if (received_channel > 14) { /* 802.11a */
7770 ipw_rt->rt_chbitmask =
7771 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7772 } else if (antennaAndPhy & 32) { /* 802.11b */
7773 ipw_rt->rt_chbitmask =
7774 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7775 } else { /* 802.11g */
7776 ipw_rt->rt_chbitmask =
7777 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7778 }
7779
7780 /* set the rate in multiples of 500k/s */
7781 switch (pktrate) {
7782 case IPW_TX_RATE_1MB:
7783 ipw_rt->rt_rate = 2;
7784 break;
7785 case IPW_TX_RATE_2MB:
7786 ipw_rt->rt_rate = 4;
7787 break;
7788 case IPW_TX_RATE_5MB:
7789 ipw_rt->rt_rate = 10;
7790 break;
7791 case IPW_TX_RATE_6MB:
7792 ipw_rt->rt_rate = 12;
7793 break;
7794 case IPW_TX_RATE_9MB:
7795 ipw_rt->rt_rate = 18;
7796 break;
7797 case IPW_TX_RATE_11MB:
7798 ipw_rt->rt_rate = 22;
7799 break;
7800 case IPW_TX_RATE_12MB:
7801 ipw_rt->rt_rate = 24;
7802 break;
7803 case IPW_TX_RATE_18MB:
7804 ipw_rt->rt_rate = 36;
7805 break;
7806 case IPW_TX_RATE_24MB:
7807 ipw_rt->rt_rate = 48;
7808 break;
7809 case IPW_TX_RATE_36MB:
7810 ipw_rt->rt_rate = 72;
7811 break;
7812 case IPW_TX_RATE_48MB:
7813 ipw_rt->rt_rate = 96;
7814 break;
7815 case IPW_TX_RATE_54MB:
7816 ipw_rt->rt_rate = 108;
7817 break;
7818 default:
7819 ipw_rt->rt_rate = 0;
7820 break;
7821 }
7822
7823 /* antenna number */
7824 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7825
7826 /* set the preamble flag if we have it */
7827 if ((antennaAndPhy & 64))
7828 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7829
7830 /* Set the size of the skb to the size of the frame */
7831 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7832
7833 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7834
7835 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7836 priv->ieee->stats.rx_errors++;
7837 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7838 rxb->skb = NULL;
7839 /* no LED during capture */
7840 }
7841 }
7842 #endif
7843
7844 #ifdef CONFIG_IPW2200_PROMISCUOUS
7845 #define ieee80211_is_probe_response(fc) \
7846 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7847 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7848
7849 #define ieee80211_is_management(fc) \
7850 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7851
7852 #define ieee80211_is_control(fc) \
7853 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7854
7855 #define ieee80211_is_data(fc) \
7856 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7857
7858 #define ieee80211_is_assoc_request(fc) \
7859 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7860
7861 #define ieee80211_is_reassoc_request(fc) \
7862 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7863
7864 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7865 struct ipw_rx_mem_buffer *rxb,
7866 struct ieee80211_rx_stats *stats)
7867 {
7868 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7869 struct ipw_rx_frame *frame = &pkt->u.frame;
7870 struct ipw_rt_hdr *ipw_rt;
7871
7872 /* First cache any information we need before we overwrite
7873 * the information provided in the skb from the hardware */
7874 struct ieee80211_hdr *hdr;
7875 u16 channel = frame->received_channel;
7876 u8 phy_flags = frame->antennaAndPhy;
7877 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7878 s8 noise = frame->noise;
7879 u8 rate = frame->rate;
7880 short len = le16_to_cpu(pkt->u.frame.length);
7881 struct sk_buff *skb;
7882 int hdr_only = 0;
7883 u16 filter = priv->prom_priv->filter;
7884
7885 /* If the filter is set to not include Rx frames then return */
7886 if (filter & IPW_PROM_NO_RX)
7887 return;
7888
7889 /* We received data from the HW, so stop the watchdog */
7890 priv->prom_net_dev->trans_start = jiffies;
7891
7892 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7893 priv->prom_priv->ieee->stats.rx_errors++;
7894 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7895 return;
7896 }
7897
7898 /* We only process data packets if the interface is open */
7899 if (unlikely(!netif_running(priv->prom_net_dev))) {
7900 priv->prom_priv->ieee->stats.rx_dropped++;
7901 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7902 return;
7903 }
7904
7905 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7906 * that now */
7907 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7908 /* FIXME: Should alloc bigger skb instead */
7909 priv->prom_priv->ieee->stats.rx_dropped++;
7910 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7911 return;
7912 }
7913
7914 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7915 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
7916 if (filter & IPW_PROM_NO_MGMT)
7917 return;
7918 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7919 hdr_only = 1;
7920 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
7921 if (filter & IPW_PROM_NO_CTL)
7922 return;
7923 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7924 hdr_only = 1;
7925 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
7926 if (filter & IPW_PROM_NO_DATA)
7927 return;
7928 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7929 hdr_only = 1;
7930 }
7931
7932 /* Copy the SKB since this is for the promiscuous side */
7933 skb = skb_copy(rxb->skb, GFP_ATOMIC);
7934 if (skb == NULL) {
7935 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7936 return;
7937 }
7938
7939 /* copy the frame data to write after where the radiotap header goes */
7940 ipw_rt = (void *)skb->data;
7941
7942 if (hdr_only)
7943 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
7944
7945 memcpy(ipw_rt->payload, hdr, len);
7946
7947 /* Zero the radiotap static buffer ... We only need to zero the bytes
7948 * NOT part of our real header, saves a little time.
7949 *
7950 * No longer necessary since we fill in all our data. Purge before
7951 * merging patch officially.
7952 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7953 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7954 */
7955
7956 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7957 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7958 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
7959
7960 /* Set the size of the skb to the size of the frame */
7961 skb_put(skb, sizeof(*ipw_rt) + len);
7962
7963 /* Big bitfield of all the fields we provide in radiotap */
7964 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7965 (1 << IEEE80211_RADIOTAP_TSFT) |
7966 (1 << IEEE80211_RADIOTAP_FLAGS) |
7967 (1 << IEEE80211_RADIOTAP_RATE) |
7968 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7969 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7970 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7971 (1 << IEEE80211_RADIOTAP_ANTENNA));
7972
7973 /* Zero the flags, we'll add to them as we go */
7974 ipw_rt->rt_flags = 0;
7975 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7976 frame->parent_tsf[2] << 16 |
7977 frame->parent_tsf[1] << 8 |
7978 frame->parent_tsf[0]);
7979
7980 /* Convert to DBM */
7981 ipw_rt->rt_dbmsignal = signal;
7982 ipw_rt->rt_dbmnoise = noise;
7983
7984 /* Convert the channel data and set the flags */
7985 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
7986 if (channel > 14) { /* 802.11a */
7987 ipw_rt->rt_chbitmask =
7988 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7989 } else if (phy_flags & (1 << 5)) { /* 802.11b */
7990 ipw_rt->rt_chbitmask =
7991 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7992 } else { /* 802.11g */
7993 ipw_rt->rt_chbitmask =
7994 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7995 }
7996
7997 /* set the rate in multiples of 500k/s */
7998 switch (rate) {
7999 case IPW_TX_RATE_1MB:
8000 ipw_rt->rt_rate = 2;
8001 break;
8002 case IPW_TX_RATE_2MB:
8003 ipw_rt->rt_rate = 4;
8004 break;
8005 case IPW_TX_RATE_5MB:
8006 ipw_rt->rt_rate = 10;
8007 break;
8008 case IPW_TX_RATE_6MB:
8009 ipw_rt->rt_rate = 12;
8010 break;
8011 case IPW_TX_RATE_9MB:
8012 ipw_rt->rt_rate = 18;
8013 break;
8014 case IPW_TX_RATE_11MB:
8015 ipw_rt->rt_rate = 22;
8016 break;
8017 case IPW_TX_RATE_12MB:
8018 ipw_rt->rt_rate = 24;
8019 break;
8020 case IPW_TX_RATE_18MB:
8021 ipw_rt->rt_rate = 36;
8022 break;
8023 case IPW_TX_RATE_24MB:
8024 ipw_rt->rt_rate = 48;
8025 break;
8026 case IPW_TX_RATE_36MB:
8027 ipw_rt->rt_rate = 72;
8028 break;
8029 case IPW_TX_RATE_48MB:
8030 ipw_rt->rt_rate = 96;
8031 break;
8032 case IPW_TX_RATE_54MB:
8033 ipw_rt->rt_rate = 108;
8034 break;
8035 default:
8036 ipw_rt->rt_rate = 0;
8037 break;
8038 }
8039
8040 /* antenna number */
8041 ipw_rt->rt_antenna = (phy_flags & 3);
8042
8043 /* set the preamble flag if we have it */
8044 if (phy_flags & (1 << 6))
8045 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8046
8047 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8048
8049 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
8050 priv->prom_priv->ieee->stats.rx_errors++;
8051 dev_kfree_skb_any(skb);
8052 }
8053 }
8054 #endif
8055
8056 static int is_network_packet(struct ipw_priv *priv,
8057 struct ieee80211_hdr_4addr *header)
8058 {
8059 /* Filter incoming packets to determine if they are targetted toward
8060 * this network, discarding packets coming from ourselves */
8061 switch (priv->ieee->iw_mode) {
8062 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8063 /* packets from our adapter are dropped (echo) */
8064 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8065 return 0;
8066
8067 /* {broad,multi}cast packets to our BSSID go through */
8068 if (is_multicast_ether_addr(header->addr1))
8069 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8070
8071 /* packets to our adapter go through */
8072 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8073 ETH_ALEN);
8074
8075 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8076 /* packets from our adapter are dropped (echo) */
8077 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8078 return 0;
8079
8080 /* {broad,multi}cast packets to our BSS go through */
8081 if (is_multicast_ether_addr(header->addr1))
8082 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8083
8084 /* packets to our adapter go through */
8085 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8086 ETH_ALEN);
8087 }
8088
8089 return 1;
8090 }
8091
8092 #define IPW_PACKET_RETRY_TIME HZ
8093
8094 static int is_duplicate_packet(struct ipw_priv *priv,
8095 struct ieee80211_hdr_4addr *header)
8096 {
8097 u16 sc = le16_to_cpu(header->seq_ctl);
8098 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8099 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8100 u16 *last_seq, *last_frag;
8101 unsigned long *last_time;
8102
8103 switch (priv->ieee->iw_mode) {
8104 case IW_MODE_ADHOC:
8105 {
8106 struct list_head *p;
8107 struct ipw_ibss_seq *entry = NULL;
8108 u8 *mac = header->addr2;
8109 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8110
8111 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8112 entry =
8113 list_entry(p, struct ipw_ibss_seq, list);
8114 if (!memcmp(entry->mac, mac, ETH_ALEN))
8115 break;
8116 }
8117 if (p == &priv->ibss_mac_hash[index]) {
8118 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8119 if (!entry) {
8120 IPW_ERROR
8121 ("Cannot malloc new mac entry\n");
8122 return 0;
8123 }
8124 memcpy(entry->mac, mac, ETH_ALEN);
8125 entry->seq_num = seq;
8126 entry->frag_num = frag;
8127 entry->packet_time = jiffies;
8128 list_add(&entry->list,
8129 &priv->ibss_mac_hash[index]);
8130 return 0;
8131 }
8132 last_seq = &entry->seq_num;
8133 last_frag = &entry->frag_num;
8134 last_time = &entry->packet_time;
8135 break;
8136 }
8137 case IW_MODE_INFRA:
8138 last_seq = &priv->last_seq_num;
8139 last_frag = &priv->last_frag_num;
8140 last_time = &priv->last_packet_time;
8141 break;
8142 default:
8143 return 0;
8144 }
8145 if ((*last_seq == seq) &&
8146 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8147 if (*last_frag == frag)
8148 goto drop;
8149 if (*last_frag + 1 != frag)
8150 /* out-of-order fragment */
8151 goto drop;
8152 } else
8153 *last_seq = seq;
8154
8155 *last_frag = frag;
8156 *last_time = jiffies;
8157 return 0;
8158
8159 drop:
8160 /* Comment this line now since we observed the card receives
8161 * duplicate packets but the FCTL_RETRY bit is not set in the
8162 * IBSS mode with fragmentation enabled.
8163 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
8164 return 1;
8165 }
8166
8167 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8168 struct ipw_rx_mem_buffer *rxb,
8169 struct ieee80211_rx_stats *stats)
8170 {
8171 struct sk_buff *skb = rxb->skb;
8172 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8173 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8174 (skb->data + IPW_RX_FRAME_SIZE);
8175
8176 ieee80211_rx_mgt(priv->ieee, header, stats);
8177
8178 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8179 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8180 IEEE80211_STYPE_PROBE_RESP) ||
8181 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8182 IEEE80211_STYPE_BEACON))) {
8183 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8184 ipw_add_station(priv, header->addr2);
8185 }
8186
8187 if (priv->config & CFG_NET_STATS) {
8188 IPW_DEBUG_HC("sending stat packet\n");
8189
8190 /* Set the size of the skb to the size of the full
8191 * ipw header and 802.11 frame */
8192 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8193 IPW_RX_FRAME_SIZE);
8194
8195 /* Advance past the ipw packet header to the 802.11 frame */
8196 skb_pull(skb, IPW_RX_FRAME_SIZE);
8197
8198 /* Push the ieee80211_rx_stats before the 802.11 frame */
8199 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8200
8201 skb->dev = priv->ieee->dev;
8202
8203 /* Point raw at the ieee80211_stats */
8204 skb_reset_mac_header(skb);
8205
8206 skb->pkt_type = PACKET_OTHERHOST;
8207 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8208 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8209 netif_rx(skb);
8210 rxb->skb = NULL;
8211 }
8212 }
8213
8214 /*
8215 * Main entry function for recieving a packet with 80211 headers. This
8216 * should be called when ever the FW has notified us that there is a new
8217 * skb in the recieve queue.
8218 */
8219 static void ipw_rx(struct ipw_priv *priv)
8220 {
8221 struct ipw_rx_mem_buffer *rxb;
8222 struct ipw_rx_packet *pkt;
8223 struct ieee80211_hdr_4addr *header;
8224 u32 r, w, i;
8225 u8 network_packet;
8226 DECLARE_MAC_BUF(mac);
8227 DECLARE_MAC_BUF(mac2);
8228 DECLARE_MAC_BUF(mac3);
8229
8230 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8231 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8232 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
8233
8234 while (i != r) {
8235 rxb = priv->rxq->queue[i];
8236 if (unlikely(rxb == NULL)) {
8237 printk(KERN_CRIT "Queue not allocated!\n");
8238 break;
8239 }
8240 priv->rxq->queue[i] = NULL;
8241
8242 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8243 IPW_RX_BUF_SIZE,
8244 PCI_DMA_FROMDEVICE);
8245
8246 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8247 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8248 pkt->header.message_type,
8249 pkt->header.rx_seq_num, pkt->header.control_bits);
8250
8251 switch (pkt->header.message_type) {
8252 case RX_FRAME_TYPE: /* 802.11 frame */ {
8253 struct ieee80211_rx_stats stats = {
8254 .rssi = pkt->u.frame.rssi_dbm -
8255 IPW_RSSI_TO_DBM,
8256 .signal =
8257 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8258 IPW_RSSI_TO_DBM + 0x100,
8259 .noise =
8260 le16_to_cpu(pkt->u.frame.noise),
8261 .rate = pkt->u.frame.rate,
8262 .mac_time = jiffies,
8263 .received_channel =
8264 pkt->u.frame.received_channel,
8265 .freq =
8266 (pkt->u.frame.
8267 control & (1 << 0)) ?
8268 IEEE80211_24GHZ_BAND :
8269 IEEE80211_52GHZ_BAND,
8270 .len = le16_to_cpu(pkt->u.frame.length),
8271 };
8272
8273 if (stats.rssi != 0)
8274 stats.mask |= IEEE80211_STATMASK_RSSI;
8275 if (stats.signal != 0)
8276 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8277 if (stats.noise != 0)
8278 stats.mask |= IEEE80211_STATMASK_NOISE;
8279 if (stats.rate != 0)
8280 stats.mask |= IEEE80211_STATMASK_RATE;
8281
8282 priv->rx_packets++;
8283
8284 #ifdef CONFIG_IPW2200_PROMISCUOUS
8285 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8286 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8287 #endif
8288
8289 #ifdef CONFIG_IPW2200_MONITOR
8290 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8291 #ifdef CONFIG_IPW2200_RADIOTAP
8292
8293 ipw_handle_data_packet_monitor(priv,
8294 rxb,
8295 &stats);
8296 #else
8297 ipw_handle_data_packet(priv, rxb,
8298 &stats);
8299 #endif
8300 break;
8301 }
8302 #endif
8303
8304 header =
8305 (struct ieee80211_hdr_4addr *)(rxb->skb->
8306 data +
8307 IPW_RX_FRAME_SIZE);
8308 /* TODO: Check Ad-Hoc dest/source and make sure
8309 * that we are actually parsing these packets
8310 * correctly -- we should probably use the
8311 * frame control of the packet and disregard
8312 * the current iw_mode */
8313
8314 network_packet =
8315 is_network_packet(priv, header);
8316 if (network_packet && priv->assoc_network) {
8317 priv->assoc_network->stats.rssi =
8318 stats.rssi;
8319 priv->exp_avg_rssi =
8320 exponential_average(priv->exp_avg_rssi,
8321 stats.rssi, DEPTH_RSSI);
8322 }
8323
8324 IPW_DEBUG_RX("Frame: len=%u\n",
8325 le16_to_cpu(pkt->u.frame.length));
8326
8327 if (le16_to_cpu(pkt->u.frame.length) <
8328 ieee80211_get_hdrlen(le16_to_cpu(
8329 header->frame_ctl))) {
8330 IPW_DEBUG_DROP
8331 ("Received packet is too small. "
8332 "Dropping.\n");
8333 priv->ieee->stats.rx_errors++;
8334 priv->wstats.discard.misc++;
8335 break;
8336 }
8337
8338 switch (WLAN_FC_GET_TYPE
8339 (le16_to_cpu(header->frame_ctl))) {
8340
8341 case IEEE80211_FTYPE_MGMT:
8342 ipw_handle_mgmt_packet(priv, rxb,
8343 &stats);
8344 break;
8345
8346 case IEEE80211_FTYPE_CTL:
8347 break;
8348
8349 case IEEE80211_FTYPE_DATA:
8350 if (unlikely(!network_packet ||
8351 is_duplicate_packet(priv,
8352 header)))
8353 {
8354 IPW_DEBUG_DROP("Dropping: "
8355 "%s, "
8356 "%s, "
8357 "%s\n",
8358 print_mac(mac,
8359 header->
8360 addr1),
8361 print_mac(mac2,
8362 header->
8363 addr2),
8364 print_mac(mac3,
8365 header->
8366 addr3));
8367 break;
8368 }
8369
8370 ipw_handle_data_packet(priv, rxb,
8371 &stats);
8372
8373 break;
8374 }
8375 break;
8376 }
8377
8378 case RX_HOST_NOTIFICATION_TYPE:{
8379 IPW_DEBUG_RX
8380 ("Notification: subtype=%02X flags=%02X size=%d\n",
8381 pkt->u.notification.subtype,
8382 pkt->u.notification.flags,
8383 le16_to_cpu(pkt->u.notification.size));
8384 ipw_rx_notification(priv, &pkt->u.notification);
8385 break;
8386 }
8387
8388 default:
8389 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8390 pkt->header.message_type);
8391 break;
8392 }
8393
8394 /* For now we just don't re-use anything. We can tweak this
8395 * later to try and re-use notification packets and SKBs that
8396 * fail to Rx correctly */
8397 if (rxb->skb != NULL) {
8398 dev_kfree_skb_any(rxb->skb);
8399 rxb->skb = NULL;
8400 }
8401
8402 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8403 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8404 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8405
8406 i = (i + 1) % RX_QUEUE_SIZE;
8407 }
8408
8409 /* Backtrack one entry */
8410 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
8411
8412 ipw_rx_queue_restock(priv);
8413 }
8414
8415 #define DEFAULT_RTS_THRESHOLD 2304U
8416 #define MIN_RTS_THRESHOLD 1U
8417 #define MAX_RTS_THRESHOLD 2304U
8418 #define DEFAULT_BEACON_INTERVAL 100U
8419 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8420 #define DEFAULT_LONG_RETRY_LIMIT 4U
8421
8422 /**
8423 * ipw_sw_reset
8424 * @option: options to control different reset behaviour
8425 * 0 = reset everything except the 'disable' module_param
8426 * 1 = reset everything and print out driver info (for probe only)
8427 * 2 = reset everything
8428 */
8429 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8430 {
8431 int band, modulation;
8432 int old_mode = priv->ieee->iw_mode;
8433
8434 /* Initialize module parameter values here */
8435 priv->config = 0;
8436
8437 /* We default to disabling the LED code as right now it causes
8438 * too many systems to lock up... */
8439 if (!led)
8440 priv->config |= CFG_NO_LED;
8441
8442 if (associate)
8443 priv->config |= CFG_ASSOCIATE;
8444 else
8445 IPW_DEBUG_INFO("Auto associate disabled.\n");
8446
8447 if (auto_create)
8448 priv->config |= CFG_ADHOC_CREATE;
8449 else
8450 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8451
8452 priv->config &= ~CFG_STATIC_ESSID;
8453 priv->essid_len = 0;
8454 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8455
8456 if (disable && option) {
8457 priv->status |= STATUS_RF_KILL_SW;
8458 IPW_DEBUG_INFO("Radio disabled.\n");
8459 }
8460
8461 if (channel != 0) {
8462 priv->config |= CFG_STATIC_CHANNEL;
8463 priv->channel = channel;
8464 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8465 /* TODO: Validate that provided channel is in range */
8466 }
8467 #ifdef CONFIG_IPW2200_QOS
8468 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8469 burst_duration_CCK, burst_duration_OFDM);
8470 #endif /* CONFIG_IPW2200_QOS */
8471
8472 switch (mode) {
8473 case 1:
8474 priv->ieee->iw_mode = IW_MODE_ADHOC;
8475 priv->net_dev->type = ARPHRD_ETHER;
8476
8477 break;
8478 #ifdef CONFIG_IPW2200_MONITOR
8479 case 2:
8480 priv->ieee->iw_mode = IW_MODE_MONITOR;
8481 #ifdef CONFIG_IPW2200_RADIOTAP
8482 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8483 #else
8484 priv->net_dev->type = ARPHRD_IEEE80211;
8485 #endif
8486 break;
8487 #endif
8488 default:
8489 case 0:
8490 priv->net_dev->type = ARPHRD_ETHER;
8491 priv->ieee->iw_mode = IW_MODE_INFRA;
8492 break;
8493 }
8494
8495 if (hwcrypto) {
8496 priv->ieee->host_encrypt = 0;
8497 priv->ieee->host_encrypt_msdu = 0;
8498 priv->ieee->host_decrypt = 0;
8499 priv->ieee->host_mc_decrypt = 0;
8500 }
8501 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8502
8503 /* IPW2200/2915 is abled to do hardware fragmentation. */
8504 priv->ieee->host_open_frag = 0;
8505
8506 if ((priv->pci_dev->device == 0x4223) ||
8507 (priv->pci_dev->device == 0x4224)) {
8508 if (option == 1)
8509 printk(KERN_INFO DRV_NAME
8510 ": Detected Intel PRO/Wireless 2915ABG Network "
8511 "Connection\n");
8512 priv->ieee->abg_true = 1;
8513 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8514 modulation = IEEE80211_OFDM_MODULATION |
8515 IEEE80211_CCK_MODULATION;
8516 priv->adapter = IPW_2915ABG;
8517 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8518 } else {
8519 if (option == 1)
8520 printk(KERN_INFO DRV_NAME
8521 ": Detected Intel PRO/Wireless 2200BG Network "
8522 "Connection\n");
8523
8524 priv->ieee->abg_true = 0;
8525 band = IEEE80211_24GHZ_BAND;
8526 modulation = IEEE80211_OFDM_MODULATION |
8527 IEEE80211_CCK_MODULATION;
8528 priv->adapter = IPW_2200BG;
8529 priv->ieee->mode = IEEE_G | IEEE_B;
8530 }
8531
8532 priv->ieee->freq_band = band;
8533 priv->ieee->modulation = modulation;
8534
8535 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8536
8537 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8538 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8539
8540 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8541 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8542 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8543
8544 /* If power management is turned on, default to AC mode */
8545 priv->power_mode = IPW_POWER_AC;
8546 priv->tx_power = IPW_TX_POWER_DEFAULT;
8547
8548 return old_mode == priv->ieee->iw_mode;
8549 }
8550
8551 /*
8552 * This file defines the Wireless Extension handlers. It does not
8553 * define any methods of hardware manipulation and relies on the
8554 * functions defined in ipw_main to provide the HW interaction.
8555 *
8556 * The exception to this is the use of the ipw_get_ordinal()
8557 * function used to poll the hardware vs. making unecessary calls.
8558 *
8559 */
8560
8561 static int ipw_wx_get_name(struct net_device *dev,
8562 struct iw_request_info *info,
8563 union iwreq_data *wrqu, char *extra)
8564 {
8565 struct ipw_priv *priv = ieee80211_priv(dev);
8566 mutex_lock(&priv->mutex);
8567 if (priv->status & STATUS_RF_KILL_MASK)
8568 strcpy(wrqu->name, "radio off");
8569 else if (!(priv->status & STATUS_ASSOCIATED))
8570 strcpy(wrqu->name, "unassociated");
8571 else
8572 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8573 ipw_modes[priv->assoc_request.ieee_mode]);
8574 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8575 mutex_unlock(&priv->mutex);
8576 return 0;
8577 }
8578
8579 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8580 {
8581 if (channel == 0) {
8582 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8583 priv->config &= ~CFG_STATIC_CHANNEL;
8584 IPW_DEBUG_ASSOC("Attempting to associate with new "
8585 "parameters.\n");
8586 ipw_associate(priv);
8587 return 0;
8588 }
8589
8590 priv->config |= CFG_STATIC_CHANNEL;
8591
8592 if (priv->channel == channel) {
8593 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8594 channel);
8595 return 0;
8596 }
8597
8598 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8599 priv->channel = channel;
8600
8601 #ifdef CONFIG_IPW2200_MONITOR
8602 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8603 int i;
8604 if (priv->status & STATUS_SCANNING) {
8605 IPW_DEBUG_SCAN("Scan abort triggered due to "
8606 "channel change.\n");
8607 ipw_abort_scan(priv);
8608 }
8609
8610 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8611 udelay(10);
8612
8613 if (priv->status & STATUS_SCANNING)
8614 IPW_DEBUG_SCAN("Still scanning...\n");
8615 else
8616 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8617 1000 - i);
8618
8619 return 0;
8620 }
8621 #endif /* CONFIG_IPW2200_MONITOR */
8622
8623 /* Network configuration changed -- force [re]association */
8624 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8625 if (!ipw_disassociate(priv))
8626 ipw_associate(priv);
8627
8628 return 0;
8629 }
8630
8631 static int ipw_wx_set_freq(struct net_device *dev,
8632 struct iw_request_info *info,
8633 union iwreq_data *wrqu, char *extra)
8634 {
8635 struct ipw_priv *priv = ieee80211_priv(dev);
8636 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8637 struct iw_freq *fwrq = &wrqu->freq;
8638 int ret = 0, i;
8639 u8 channel, flags;
8640 int band;
8641
8642 if (fwrq->m == 0) {
8643 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8644 mutex_lock(&priv->mutex);
8645 ret = ipw_set_channel(priv, 0);
8646 mutex_unlock(&priv->mutex);
8647 return ret;
8648 }
8649 /* if setting by freq convert to channel */
8650 if (fwrq->e == 1) {
8651 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8652 if (channel == 0)
8653 return -EINVAL;
8654 } else
8655 channel = fwrq->m;
8656
8657 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8658 return -EINVAL;
8659
8660 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8661 i = ieee80211_channel_to_index(priv->ieee, channel);
8662 if (i == -1)
8663 return -EINVAL;
8664
8665 flags = (band == IEEE80211_24GHZ_BAND) ?
8666 geo->bg[i].flags : geo->a[i].flags;
8667 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8668 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8669 return -EINVAL;
8670 }
8671 }
8672
8673 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8674 mutex_lock(&priv->mutex);
8675 ret = ipw_set_channel(priv, channel);
8676 mutex_unlock(&priv->mutex);
8677 return ret;
8678 }
8679
8680 static int ipw_wx_get_freq(struct net_device *dev,
8681 struct iw_request_info *info,
8682 union iwreq_data *wrqu, char *extra)
8683 {
8684 struct ipw_priv *priv = ieee80211_priv(dev);
8685
8686 wrqu->freq.e = 0;
8687
8688 /* If we are associated, trying to associate, or have a statically
8689 * configured CHANNEL then return that; otherwise return ANY */
8690 mutex_lock(&priv->mutex);
8691 if (priv->config & CFG_STATIC_CHANNEL ||
8692 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8693 int i;
8694
8695 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8696 BUG_ON(i == -1);
8697 wrqu->freq.e = 1;
8698
8699 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8700 case IEEE80211_52GHZ_BAND:
8701 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8702 break;
8703
8704 case IEEE80211_24GHZ_BAND:
8705 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8706 break;
8707
8708 default:
8709 BUG();
8710 }
8711 } else
8712 wrqu->freq.m = 0;
8713
8714 mutex_unlock(&priv->mutex);
8715 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8716 return 0;
8717 }
8718
8719 static int ipw_wx_set_mode(struct net_device *dev,
8720 struct iw_request_info *info,
8721 union iwreq_data *wrqu, char *extra)
8722 {
8723 struct ipw_priv *priv = ieee80211_priv(dev);
8724 int err = 0;
8725
8726 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8727
8728 switch (wrqu->mode) {
8729 #ifdef CONFIG_IPW2200_MONITOR
8730 case IW_MODE_MONITOR:
8731 #endif
8732 case IW_MODE_ADHOC:
8733 case IW_MODE_INFRA:
8734 break;
8735 case IW_MODE_AUTO:
8736 wrqu->mode = IW_MODE_INFRA;
8737 break;
8738 default:
8739 return -EINVAL;
8740 }
8741 if (wrqu->mode == priv->ieee->iw_mode)
8742 return 0;
8743
8744 mutex_lock(&priv->mutex);
8745
8746 ipw_sw_reset(priv, 0);
8747
8748 #ifdef CONFIG_IPW2200_MONITOR
8749 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8750 priv->net_dev->type = ARPHRD_ETHER;
8751
8752 if (wrqu->mode == IW_MODE_MONITOR)
8753 #ifdef CONFIG_IPW2200_RADIOTAP
8754 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8755 #else
8756 priv->net_dev->type = ARPHRD_IEEE80211;
8757 #endif
8758 #endif /* CONFIG_IPW2200_MONITOR */
8759
8760 /* Free the existing firmware and reset the fw_loaded
8761 * flag so ipw_load() will bring in the new firmawre */
8762 free_firmware();
8763
8764 priv->ieee->iw_mode = wrqu->mode;
8765
8766 queue_work(priv->workqueue, &priv->adapter_restart);
8767 mutex_unlock(&priv->mutex);
8768 return err;
8769 }
8770
8771 static int ipw_wx_get_mode(struct net_device *dev,
8772 struct iw_request_info *info,
8773 union iwreq_data *wrqu, char *extra)
8774 {
8775 struct ipw_priv *priv = ieee80211_priv(dev);
8776 mutex_lock(&priv->mutex);
8777 wrqu->mode = priv->ieee->iw_mode;
8778 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8779 mutex_unlock(&priv->mutex);
8780 return 0;
8781 }
8782
8783 /* Values are in microsecond */
8784 static const s32 timeout_duration[] = {
8785 350000,
8786 250000,
8787 75000,
8788 37000,
8789 25000,
8790 };
8791
8792 static const s32 period_duration[] = {
8793 400000,
8794 700000,
8795 1000000,
8796 1000000,
8797 1000000
8798 };
8799
8800 static int ipw_wx_get_range(struct net_device *dev,
8801 struct iw_request_info *info,
8802 union iwreq_data *wrqu, char *extra)
8803 {
8804 struct ipw_priv *priv = ieee80211_priv(dev);
8805 struct iw_range *range = (struct iw_range *)extra;
8806 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8807 int i = 0, j;
8808
8809 wrqu->data.length = sizeof(*range);
8810 memset(range, 0, sizeof(*range));
8811
8812 /* 54Mbs == ~27 Mb/s real (802.11g) */
8813 range->throughput = 27 * 1000 * 1000;
8814
8815 range->max_qual.qual = 100;
8816 /* TODO: Find real max RSSI and stick here */
8817 range->max_qual.level = 0;
8818 range->max_qual.noise = 0;
8819 range->max_qual.updated = 7; /* Updated all three */
8820
8821 range->avg_qual.qual = 70;
8822 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8823 range->avg_qual.level = 0; /* FIXME to real average level */
8824 range->avg_qual.noise = 0;
8825 range->avg_qual.updated = 7; /* Updated all three */
8826 mutex_lock(&priv->mutex);
8827 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8828
8829 for (i = 0; i < range->num_bitrates; i++)
8830 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8831 500000;
8832
8833 range->max_rts = DEFAULT_RTS_THRESHOLD;
8834 range->min_frag = MIN_FRAG_THRESHOLD;
8835 range->max_frag = MAX_FRAG_THRESHOLD;
8836
8837 range->encoding_size[0] = 5;
8838 range->encoding_size[1] = 13;
8839 range->num_encoding_sizes = 2;
8840 range->max_encoding_tokens = WEP_KEYS;
8841
8842 /* Set the Wireless Extension versions */
8843 range->we_version_compiled = WIRELESS_EXT;
8844 range->we_version_source = 18;
8845
8846 i = 0;
8847 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8848 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8849 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8850 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8851 continue;
8852
8853 range->freq[i].i = geo->bg[j].channel;
8854 range->freq[i].m = geo->bg[j].freq * 100000;
8855 range->freq[i].e = 1;
8856 i++;
8857 }
8858 }
8859
8860 if (priv->ieee->mode & IEEE_A) {
8861 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8862 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8863 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8864 continue;
8865
8866 range->freq[i].i = geo->a[j].channel;
8867 range->freq[i].m = geo->a[j].freq * 100000;
8868 range->freq[i].e = 1;
8869 i++;
8870 }
8871 }
8872
8873 range->num_channels = i;
8874 range->num_frequency = i;
8875
8876 mutex_unlock(&priv->mutex);
8877
8878 /* Event capability (kernel + driver) */
8879 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8880 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8881 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8882 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8883 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8884
8885 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8886 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8887
8888 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8889
8890 IPW_DEBUG_WX("GET Range\n");
8891 return 0;
8892 }
8893
8894 static int ipw_wx_set_wap(struct net_device *dev,
8895 struct iw_request_info *info,
8896 union iwreq_data *wrqu, char *extra)
8897 {
8898 struct ipw_priv *priv = ieee80211_priv(dev);
8899 DECLARE_MAC_BUF(mac);
8900
8901 static const unsigned char any[] = {
8902 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8903 };
8904 static const unsigned char off[] = {
8905 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8906 };
8907
8908 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8909 return -EINVAL;
8910 mutex_lock(&priv->mutex);
8911 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8912 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8913 /* we disable mandatory BSSID association */
8914 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8915 priv->config &= ~CFG_STATIC_BSSID;
8916 IPW_DEBUG_ASSOC("Attempting to associate with new "
8917 "parameters.\n");
8918 ipw_associate(priv);
8919 mutex_unlock(&priv->mutex);
8920 return 0;
8921 }
8922
8923 priv->config |= CFG_STATIC_BSSID;
8924 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8925 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8926 mutex_unlock(&priv->mutex);
8927 return 0;
8928 }
8929
8930 IPW_DEBUG_WX("Setting mandatory BSSID to %s\n",
8931 print_mac(mac, wrqu->ap_addr.sa_data));
8932
8933 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8934
8935 /* Network configuration changed -- force [re]association */
8936 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8937 if (!ipw_disassociate(priv))
8938 ipw_associate(priv);
8939
8940 mutex_unlock(&priv->mutex);
8941 return 0;
8942 }
8943
8944 static int ipw_wx_get_wap(struct net_device *dev,
8945 struct iw_request_info *info,
8946 union iwreq_data *wrqu, char *extra)
8947 {
8948 struct ipw_priv *priv = ieee80211_priv(dev);
8949 DECLARE_MAC_BUF(mac);
8950
8951 /* If we are associated, trying to associate, or have a statically
8952 * configured BSSID then return that; otherwise return ANY */
8953 mutex_lock(&priv->mutex);
8954 if (priv->config & CFG_STATIC_BSSID ||
8955 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8956 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8957 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8958 } else
8959 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8960
8961 IPW_DEBUG_WX("Getting WAP BSSID: %s\n",
8962 print_mac(mac, wrqu->ap_addr.sa_data));
8963 mutex_unlock(&priv->mutex);
8964 return 0;
8965 }
8966
8967 static int ipw_wx_set_essid(struct net_device *dev,
8968 struct iw_request_info *info,
8969 union iwreq_data *wrqu, char *extra)
8970 {
8971 struct ipw_priv *priv = ieee80211_priv(dev);
8972 int length;
8973
8974 mutex_lock(&priv->mutex);
8975
8976 if (!wrqu->essid.flags)
8977 {
8978 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8979 ipw_disassociate(priv);
8980 priv->config &= ~CFG_STATIC_ESSID;
8981 ipw_associate(priv);
8982 mutex_unlock(&priv->mutex);
8983 return 0;
8984 }
8985
8986 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
8987
8988 priv->config |= CFG_STATIC_ESSID;
8989
8990 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
8991 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
8992 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8993 mutex_unlock(&priv->mutex);
8994 return 0;
8995 }
8996
8997 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length),
8998 length);
8999
9000 priv->essid_len = length;
9001 memcpy(priv->essid, extra, priv->essid_len);
9002
9003 /* Network configuration changed -- force [re]association */
9004 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9005 if (!ipw_disassociate(priv))
9006 ipw_associate(priv);
9007
9008 mutex_unlock(&priv->mutex);
9009 return 0;
9010 }
9011
9012 static int ipw_wx_get_essid(struct net_device *dev,
9013 struct iw_request_info *info,
9014 union iwreq_data *wrqu, char *extra)
9015 {
9016 struct ipw_priv *priv = ieee80211_priv(dev);
9017
9018 /* If we are associated, trying to associate, or have a statically
9019 * configured ESSID then return that; otherwise return ANY */
9020 mutex_lock(&priv->mutex);
9021 if (priv->config & CFG_STATIC_ESSID ||
9022 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9023 IPW_DEBUG_WX("Getting essid: '%s'\n",
9024 escape_essid(priv->essid, priv->essid_len));
9025 memcpy(extra, priv->essid, priv->essid_len);
9026 wrqu->essid.length = priv->essid_len;
9027 wrqu->essid.flags = 1; /* active */
9028 } else {
9029 IPW_DEBUG_WX("Getting essid: ANY\n");
9030 wrqu->essid.length = 0;
9031 wrqu->essid.flags = 0; /* active */
9032 }
9033 mutex_unlock(&priv->mutex);
9034 return 0;
9035 }
9036
9037 static int ipw_wx_set_nick(struct net_device *dev,
9038 struct iw_request_info *info,
9039 union iwreq_data *wrqu, char *extra)
9040 {
9041 struct ipw_priv *priv = ieee80211_priv(dev);
9042
9043 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9044 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9045 return -E2BIG;
9046 mutex_lock(&priv->mutex);
9047 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9048 memset(priv->nick, 0, sizeof(priv->nick));
9049 memcpy(priv->nick, extra, wrqu->data.length);
9050 IPW_DEBUG_TRACE("<<\n");
9051 mutex_unlock(&priv->mutex);
9052 return 0;
9053
9054 }
9055
9056 static int ipw_wx_get_nick(struct net_device *dev,
9057 struct iw_request_info *info,
9058 union iwreq_data *wrqu, char *extra)
9059 {
9060 struct ipw_priv *priv = ieee80211_priv(dev);
9061 IPW_DEBUG_WX("Getting nick\n");
9062 mutex_lock(&priv->mutex);
9063 wrqu->data.length = strlen(priv->nick);
9064 memcpy(extra, priv->nick, wrqu->data.length);
9065 wrqu->data.flags = 1; /* active */
9066 mutex_unlock(&priv->mutex);
9067 return 0;
9068 }
9069
9070 static int ipw_wx_set_sens(struct net_device *dev,
9071 struct iw_request_info *info,
9072 union iwreq_data *wrqu, char *extra)
9073 {
9074 struct ipw_priv *priv = ieee80211_priv(dev);
9075 int err = 0;
9076
9077 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9078 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9079 mutex_lock(&priv->mutex);
9080
9081 if (wrqu->sens.fixed == 0)
9082 {
9083 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9084 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9085 goto out;
9086 }
9087 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9088 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9089 err = -EINVAL;
9090 goto out;
9091 }
9092
9093 priv->roaming_threshold = wrqu->sens.value;
9094 priv->disassociate_threshold = 3*wrqu->sens.value;
9095 out:
9096 mutex_unlock(&priv->mutex);
9097 return err;
9098 }
9099
9100 static int ipw_wx_get_sens(struct net_device *dev,
9101 struct iw_request_info *info,
9102 union iwreq_data *wrqu, char *extra)
9103 {
9104 struct ipw_priv *priv = ieee80211_priv(dev);
9105 mutex_lock(&priv->mutex);
9106 wrqu->sens.fixed = 1;
9107 wrqu->sens.value = priv->roaming_threshold;
9108 mutex_unlock(&priv->mutex);
9109
9110 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9111 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9112
9113 return 0;
9114 }
9115
9116 static int ipw_wx_set_rate(struct net_device *dev,
9117 struct iw_request_info *info,
9118 union iwreq_data *wrqu, char *extra)
9119 {
9120 /* TODO: We should use semaphores or locks for access to priv */
9121 struct ipw_priv *priv = ieee80211_priv(dev);
9122 u32 target_rate = wrqu->bitrate.value;
9123 u32 fixed, mask;
9124
9125 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9126 /* value = X, fixed = 1 means only rate X */
9127 /* value = X, fixed = 0 means all rates lower equal X */
9128
9129 if (target_rate == -1) {
9130 fixed = 0;
9131 mask = IEEE80211_DEFAULT_RATES_MASK;
9132 /* Now we should reassociate */
9133 goto apply;
9134 }
9135
9136 mask = 0;
9137 fixed = wrqu->bitrate.fixed;
9138
9139 if (target_rate == 1000000 || !fixed)
9140 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9141 if (target_rate == 1000000)
9142 goto apply;
9143
9144 if (target_rate == 2000000 || !fixed)
9145 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9146 if (target_rate == 2000000)
9147 goto apply;
9148
9149 if (target_rate == 5500000 || !fixed)
9150 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9151 if (target_rate == 5500000)
9152 goto apply;
9153
9154 if (target_rate == 6000000 || !fixed)
9155 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9156 if (target_rate == 6000000)
9157 goto apply;
9158
9159 if (target_rate == 9000000 || !fixed)
9160 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9161 if (target_rate == 9000000)
9162 goto apply;
9163
9164 if (target_rate == 11000000 || !fixed)
9165 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9166 if (target_rate == 11000000)
9167 goto apply;
9168
9169 if (target_rate == 12000000 || !fixed)
9170 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9171 if (target_rate == 12000000)
9172 goto apply;
9173
9174 if (target_rate == 18000000 || !fixed)
9175 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9176 if (target_rate == 18000000)
9177 goto apply;
9178
9179 if (target_rate == 24000000 || !fixed)
9180 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9181 if (target_rate == 24000000)
9182 goto apply;
9183
9184 if (target_rate == 36000000 || !fixed)
9185 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9186 if (target_rate == 36000000)
9187 goto apply;
9188
9189 if (target_rate == 48000000 || !fixed)
9190 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9191 if (target_rate == 48000000)
9192 goto apply;
9193
9194 if (target_rate == 54000000 || !fixed)
9195 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9196 if (target_rate == 54000000)
9197 goto apply;
9198
9199 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9200 return -EINVAL;
9201
9202 apply:
9203 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9204 mask, fixed ? "fixed" : "sub-rates");
9205 mutex_lock(&priv->mutex);
9206 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9207 priv->config &= ~CFG_FIXED_RATE;
9208 ipw_set_fixed_rate(priv, priv->ieee->mode);
9209 } else
9210 priv->config |= CFG_FIXED_RATE;
9211
9212 if (priv->rates_mask == mask) {
9213 IPW_DEBUG_WX("Mask set to current mask.\n");
9214 mutex_unlock(&priv->mutex);
9215 return 0;
9216 }
9217
9218 priv->rates_mask = mask;
9219
9220 /* Network configuration changed -- force [re]association */
9221 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9222 if (!ipw_disassociate(priv))
9223 ipw_associate(priv);
9224
9225 mutex_unlock(&priv->mutex);
9226 return 0;
9227 }
9228
9229 static int ipw_wx_get_rate(struct net_device *dev,
9230 struct iw_request_info *info,
9231 union iwreq_data *wrqu, char *extra)
9232 {
9233 struct ipw_priv *priv = ieee80211_priv(dev);
9234 mutex_lock(&priv->mutex);
9235 wrqu->bitrate.value = priv->last_rate;
9236 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9237 mutex_unlock(&priv->mutex);
9238 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9239 return 0;
9240 }
9241
9242 static int ipw_wx_set_rts(struct net_device *dev,
9243 struct iw_request_info *info,
9244 union iwreq_data *wrqu, char *extra)
9245 {
9246 struct ipw_priv *priv = ieee80211_priv(dev);
9247 mutex_lock(&priv->mutex);
9248 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9249 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9250 else {
9251 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9252 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9253 mutex_unlock(&priv->mutex);
9254 return -EINVAL;
9255 }
9256 priv->rts_threshold = wrqu->rts.value;
9257 }
9258
9259 ipw_send_rts_threshold(priv, priv->rts_threshold);
9260 mutex_unlock(&priv->mutex);
9261 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9262 return 0;
9263 }
9264
9265 static int ipw_wx_get_rts(struct net_device *dev,
9266 struct iw_request_info *info,
9267 union iwreq_data *wrqu, char *extra)
9268 {
9269 struct ipw_priv *priv = ieee80211_priv(dev);
9270 mutex_lock(&priv->mutex);
9271 wrqu->rts.value = priv->rts_threshold;
9272 wrqu->rts.fixed = 0; /* no auto select */
9273 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9274 mutex_unlock(&priv->mutex);
9275 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9276 return 0;
9277 }
9278
9279 static int ipw_wx_set_txpow(struct net_device *dev,
9280 struct iw_request_info *info,
9281 union iwreq_data *wrqu, char *extra)
9282 {
9283 struct ipw_priv *priv = ieee80211_priv(dev);
9284 int err = 0;
9285
9286 mutex_lock(&priv->mutex);
9287 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9288 err = -EINPROGRESS;
9289 goto out;
9290 }
9291
9292 if (!wrqu->power.fixed)
9293 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9294
9295 if (wrqu->power.flags != IW_TXPOW_DBM) {
9296 err = -EINVAL;
9297 goto out;
9298 }
9299
9300 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9301 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9302 err = -EINVAL;
9303 goto out;
9304 }
9305
9306 priv->tx_power = wrqu->power.value;
9307 err = ipw_set_tx_power(priv);
9308 out:
9309 mutex_unlock(&priv->mutex);
9310 return err;
9311 }
9312
9313 static int ipw_wx_get_txpow(struct net_device *dev,
9314 struct iw_request_info *info,
9315 union iwreq_data *wrqu, char *extra)
9316 {
9317 struct ipw_priv *priv = ieee80211_priv(dev);
9318 mutex_lock(&priv->mutex);
9319 wrqu->power.value = priv->tx_power;
9320 wrqu->power.fixed = 1;
9321 wrqu->power.flags = IW_TXPOW_DBM;
9322 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9323 mutex_unlock(&priv->mutex);
9324
9325 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9326 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9327
9328 return 0;
9329 }
9330
9331 static int ipw_wx_set_frag(struct net_device *dev,
9332 struct iw_request_info *info,
9333 union iwreq_data *wrqu, char *extra)
9334 {
9335 struct ipw_priv *priv = ieee80211_priv(dev);
9336 mutex_lock(&priv->mutex);
9337 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9338 priv->ieee->fts = DEFAULT_FTS;
9339 else {
9340 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9341 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9342 mutex_unlock(&priv->mutex);
9343 return -EINVAL;
9344 }
9345
9346 priv->ieee->fts = wrqu->frag.value & ~0x1;
9347 }
9348
9349 ipw_send_frag_threshold(priv, wrqu->frag.value);
9350 mutex_unlock(&priv->mutex);
9351 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9352 return 0;
9353 }
9354
9355 static int ipw_wx_get_frag(struct net_device *dev,
9356 struct iw_request_info *info,
9357 union iwreq_data *wrqu, char *extra)
9358 {
9359 struct ipw_priv *priv = ieee80211_priv(dev);
9360 mutex_lock(&priv->mutex);
9361 wrqu->frag.value = priv->ieee->fts;
9362 wrqu->frag.fixed = 0; /* no auto select */
9363 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9364 mutex_unlock(&priv->mutex);
9365 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9366
9367 return 0;
9368 }
9369
9370 static int ipw_wx_set_retry(struct net_device *dev,
9371 struct iw_request_info *info,
9372 union iwreq_data *wrqu, char *extra)
9373 {
9374 struct ipw_priv *priv = ieee80211_priv(dev);
9375
9376 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9377 return -EINVAL;
9378
9379 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9380 return 0;
9381
9382 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9383 return -EINVAL;
9384
9385 mutex_lock(&priv->mutex);
9386 if (wrqu->retry.flags & IW_RETRY_SHORT)
9387 priv->short_retry_limit = (u8) wrqu->retry.value;
9388 else if (wrqu->retry.flags & IW_RETRY_LONG)
9389 priv->long_retry_limit = (u8) wrqu->retry.value;
9390 else {
9391 priv->short_retry_limit = (u8) wrqu->retry.value;
9392 priv->long_retry_limit = (u8) wrqu->retry.value;
9393 }
9394
9395 ipw_send_retry_limit(priv, priv->short_retry_limit,
9396 priv->long_retry_limit);
9397 mutex_unlock(&priv->mutex);
9398 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9399 priv->short_retry_limit, priv->long_retry_limit);
9400 return 0;
9401 }
9402
9403 static int ipw_wx_get_retry(struct net_device *dev,
9404 struct iw_request_info *info,
9405 union iwreq_data *wrqu, char *extra)
9406 {
9407 struct ipw_priv *priv = ieee80211_priv(dev);
9408
9409 mutex_lock(&priv->mutex);
9410 wrqu->retry.disabled = 0;
9411
9412 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9413 mutex_unlock(&priv->mutex);
9414 return -EINVAL;
9415 }
9416
9417 if (wrqu->retry.flags & IW_RETRY_LONG) {
9418 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9419 wrqu->retry.value = priv->long_retry_limit;
9420 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9421 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9422 wrqu->retry.value = priv->short_retry_limit;
9423 } else {
9424 wrqu->retry.flags = IW_RETRY_LIMIT;
9425 wrqu->retry.value = priv->short_retry_limit;
9426 }
9427 mutex_unlock(&priv->mutex);
9428
9429 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9430
9431 return 0;
9432 }
9433
9434 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
9435 int essid_len)
9436 {
9437 struct ipw_scan_request_ext scan;
9438 int err = 0, scan_type;
9439
9440 if (!(priv->status & STATUS_INIT) ||
9441 (priv->status & STATUS_EXIT_PENDING))
9442 return 0;
9443
9444 mutex_lock(&priv->mutex);
9445
9446 if (priv->status & STATUS_RF_KILL_MASK) {
9447 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
9448 priv->status |= STATUS_SCAN_PENDING;
9449 goto done;
9450 }
9451
9452 IPW_DEBUG_HC("starting request direct scan!\n");
9453
9454 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
9455 /* We should not sleep here; otherwise we will block most
9456 * of the system (for instance, we hold rtnl_lock when we
9457 * get here).
9458 */
9459 err = -EAGAIN;
9460 goto done;
9461 }
9462 memset(&scan, 0, sizeof(scan));
9463
9464 if (priv->config & CFG_SPEED_SCAN)
9465 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9466 cpu_to_le16(30);
9467 else
9468 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9469 cpu_to_le16(20);
9470
9471 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
9472 cpu_to_le16(20);
9473 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
9474 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
9475
9476 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
9477
9478 err = ipw_send_ssid(priv, essid, essid_len);
9479 if (err) {
9480 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9481 goto done;
9482 }
9483 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9484
9485 ipw_add_scan_channels(priv, &scan, scan_type);
9486
9487 err = ipw_send_scan_request_ext(priv, &scan);
9488 if (err) {
9489 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9490 goto done;
9491 }
9492
9493 priv->status |= STATUS_SCANNING;
9494
9495 done:
9496 mutex_unlock(&priv->mutex);
9497 return err;
9498 }
9499
9500 static int ipw_wx_set_scan(struct net_device *dev,
9501 struct iw_request_info *info,
9502 union iwreq_data *wrqu, char *extra)
9503 {
9504 struct ipw_priv *priv = ieee80211_priv(dev);
9505 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9506
9507 mutex_lock(&priv->mutex);
9508 priv->user_requested_scan = 1;
9509 mutex_unlock(&priv->mutex);
9510
9511 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9512 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9513 ipw_request_direct_scan(priv, req->essid,
9514 req->essid_len);
9515 return 0;
9516 }
9517 if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9518 queue_work(priv->workqueue,
9519 &priv->request_passive_scan);
9520 return 0;
9521 }
9522 }
9523
9524 IPW_DEBUG_WX("Start scan\n");
9525
9526 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
9527
9528 return 0;
9529 }
9530
9531 static int ipw_wx_get_scan(struct net_device *dev,
9532 struct iw_request_info *info,
9533 union iwreq_data *wrqu, char *extra)
9534 {
9535 struct ipw_priv *priv = ieee80211_priv(dev);
9536 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9537 }
9538
9539 static int ipw_wx_set_encode(struct net_device *dev,
9540 struct iw_request_info *info,
9541 union iwreq_data *wrqu, char *key)
9542 {
9543 struct ipw_priv *priv = ieee80211_priv(dev);
9544 int ret;
9545 u32 cap = priv->capability;
9546
9547 mutex_lock(&priv->mutex);
9548 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9549
9550 /* In IBSS mode, we need to notify the firmware to update
9551 * the beacon info after we changed the capability. */
9552 if (cap != priv->capability &&
9553 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9554 priv->status & STATUS_ASSOCIATED)
9555 ipw_disassociate(priv);
9556
9557 mutex_unlock(&priv->mutex);
9558 return ret;
9559 }
9560
9561 static int ipw_wx_get_encode(struct net_device *dev,
9562 struct iw_request_info *info,
9563 union iwreq_data *wrqu, char *key)
9564 {
9565 struct ipw_priv *priv = ieee80211_priv(dev);
9566 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9567 }
9568
9569 static int ipw_wx_set_power(struct net_device *dev,
9570 struct iw_request_info *info,
9571 union iwreq_data *wrqu, char *extra)
9572 {
9573 struct ipw_priv *priv = ieee80211_priv(dev);
9574 int err;
9575 mutex_lock(&priv->mutex);
9576 if (wrqu->power.disabled) {
9577 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9578 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9579 if (err) {
9580 IPW_DEBUG_WX("failed setting power mode.\n");
9581 mutex_unlock(&priv->mutex);
9582 return err;
9583 }
9584 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9585 mutex_unlock(&priv->mutex);
9586 return 0;
9587 }
9588
9589 switch (wrqu->power.flags & IW_POWER_MODE) {
9590 case IW_POWER_ON: /* If not specified */
9591 case IW_POWER_MODE: /* If set all mask */
9592 case IW_POWER_ALL_R: /* If explicitly state all */
9593 break;
9594 default: /* Otherwise we don't support it */
9595 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9596 wrqu->power.flags);
9597 mutex_unlock(&priv->mutex);
9598 return -EOPNOTSUPP;
9599 }
9600
9601 /* If the user hasn't specified a power management mode yet, default
9602 * to BATTERY */
9603 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9604 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9605 else
9606 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9607
9608 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9609 if (err) {
9610 IPW_DEBUG_WX("failed setting power mode.\n");
9611 mutex_unlock(&priv->mutex);
9612 return err;
9613 }
9614
9615 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9616 mutex_unlock(&priv->mutex);
9617 return 0;
9618 }
9619
9620 static int ipw_wx_get_power(struct net_device *dev,
9621 struct iw_request_info *info,
9622 union iwreq_data *wrqu, char *extra)
9623 {
9624 struct ipw_priv *priv = ieee80211_priv(dev);
9625 mutex_lock(&priv->mutex);
9626 if (!(priv->power_mode & IPW_POWER_ENABLED))
9627 wrqu->power.disabled = 1;
9628 else
9629 wrqu->power.disabled = 0;
9630
9631 mutex_unlock(&priv->mutex);
9632 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9633
9634 return 0;
9635 }
9636
9637 static int ipw_wx_set_powermode(struct net_device *dev,
9638 struct iw_request_info *info,
9639 union iwreq_data *wrqu, char *extra)
9640 {
9641 struct ipw_priv *priv = ieee80211_priv(dev);
9642 int mode = *(int *)extra;
9643 int err;
9644
9645 mutex_lock(&priv->mutex);
9646 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9647 mode = IPW_POWER_AC;
9648
9649 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9650 err = ipw_send_power_mode(priv, mode);
9651 if (err) {
9652 IPW_DEBUG_WX("failed setting power mode.\n");
9653 mutex_unlock(&priv->mutex);
9654 return err;
9655 }
9656 priv->power_mode = IPW_POWER_ENABLED | mode;
9657 }
9658 mutex_unlock(&priv->mutex);
9659 return 0;
9660 }
9661
9662 #define MAX_WX_STRING 80
9663 static int ipw_wx_get_powermode(struct net_device *dev,
9664 struct iw_request_info *info,
9665 union iwreq_data *wrqu, char *extra)
9666 {
9667 struct ipw_priv *priv = ieee80211_priv(dev);
9668 int level = IPW_POWER_LEVEL(priv->power_mode);
9669 char *p = extra;
9670
9671 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9672
9673 switch (level) {
9674 case IPW_POWER_AC:
9675 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9676 break;
9677 case IPW_POWER_BATTERY:
9678 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9679 break;
9680 default:
9681 p += snprintf(p, MAX_WX_STRING - (p - extra),
9682 "(Timeout %dms, Period %dms)",
9683 timeout_duration[level - 1] / 1000,
9684 period_duration[level - 1] / 1000);
9685 }
9686
9687 if (!(priv->power_mode & IPW_POWER_ENABLED))
9688 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9689
9690 wrqu->data.length = p - extra + 1;
9691
9692 return 0;
9693 }
9694
9695 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9696 struct iw_request_info *info,
9697 union iwreq_data *wrqu, char *extra)
9698 {
9699 struct ipw_priv *priv = ieee80211_priv(dev);
9700 int mode = *(int *)extra;
9701 u8 band = 0, modulation = 0;
9702
9703 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9704 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9705 return -EINVAL;
9706 }
9707 mutex_lock(&priv->mutex);
9708 if (priv->adapter == IPW_2915ABG) {
9709 priv->ieee->abg_true = 1;
9710 if (mode & IEEE_A) {
9711 band |= IEEE80211_52GHZ_BAND;
9712 modulation |= IEEE80211_OFDM_MODULATION;
9713 } else
9714 priv->ieee->abg_true = 0;
9715 } else {
9716 if (mode & IEEE_A) {
9717 IPW_WARNING("Attempt to set 2200BG into "
9718 "802.11a mode\n");
9719 mutex_unlock(&priv->mutex);
9720 return -EINVAL;
9721 }
9722
9723 priv->ieee->abg_true = 0;
9724 }
9725
9726 if (mode & IEEE_B) {
9727 band |= IEEE80211_24GHZ_BAND;
9728 modulation |= IEEE80211_CCK_MODULATION;
9729 } else
9730 priv->ieee->abg_true = 0;
9731
9732 if (mode & IEEE_G) {
9733 band |= IEEE80211_24GHZ_BAND;
9734 modulation |= IEEE80211_OFDM_MODULATION;
9735 } else
9736 priv->ieee->abg_true = 0;
9737
9738 priv->ieee->mode = mode;
9739 priv->ieee->freq_band = band;
9740 priv->ieee->modulation = modulation;
9741 init_supported_rates(priv, &priv->rates);
9742
9743 /* Network configuration changed -- force [re]association */
9744 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9745 if (!ipw_disassociate(priv)) {
9746 ipw_send_supported_rates(priv, &priv->rates);
9747 ipw_associate(priv);
9748 }
9749
9750 /* Update the band LEDs */
9751 ipw_led_band_on(priv);
9752
9753 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9754 mode & IEEE_A ? 'a' : '.',
9755 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9756 mutex_unlock(&priv->mutex);
9757 return 0;
9758 }
9759
9760 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9761 struct iw_request_info *info,
9762 union iwreq_data *wrqu, char *extra)
9763 {
9764 struct ipw_priv *priv = ieee80211_priv(dev);
9765 mutex_lock(&priv->mutex);
9766 switch (priv->ieee->mode) {
9767 case IEEE_A:
9768 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9769 break;
9770 case IEEE_B:
9771 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9772 break;
9773 case IEEE_A | IEEE_B:
9774 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9775 break;
9776 case IEEE_G:
9777 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9778 break;
9779 case IEEE_A | IEEE_G:
9780 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9781 break;
9782 case IEEE_B | IEEE_G:
9783 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9784 break;
9785 case IEEE_A | IEEE_B | IEEE_G:
9786 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9787 break;
9788 default:
9789 strncpy(extra, "unknown", MAX_WX_STRING);
9790 break;
9791 }
9792
9793 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9794
9795 wrqu->data.length = strlen(extra) + 1;
9796 mutex_unlock(&priv->mutex);
9797
9798 return 0;
9799 }
9800
9801 static int ipw_wx_set_preamble(struct net_device *dev,
9802 struct iw_request_info *info,
9803 union iwreq_data *wrqu, char *extra)
9804 {
9805 struct ipw_priv *priv = ieee80211_priv(dev);
9806 int mode = *(int *)extra;
9807 mutex_lock(&priv->mutex);
9808 /* Switching from SHORT -> LONG requires a disassociation */
9809 if (mode == 1) {
9810 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9811 priv->config |= CFG_PREAMBLE_LONG;
9812
9813 /* Network configuration changed -- force [re]association */
9814 IPW_DEBUG_ASSOC
9815 ("[re]association triggered due to preamble change.\n");
9816 if (!ipw_disassociate(priv))
9817 ipw_associate(priv);
9818 }
9819 goto done;
9820 }
9821
9822 if (mode == 0) {
9823 priv->config &= ~CFG_PREAMBLE_LONG;
9824 goto done;
9825 }
9826 mutex_unlock(&priv->mutex);
9827 return -EINVAL;
9828
9829 done:
9830 mutex_unlock(&priv->mutex);
9831 return 0;
9832 }
9833
9834 static int ipw_wx_get_preamble(struct net_device *dev,
9835 struct iw_request_info *info,
9836 union iwreq_data *wrqu, char *extra)
9837 {
9838 struct ipw_priv *priv = ieee80211_priv(dev);
9839 mutex_lock(&priv->mutex);
9840 if (priv->config & CFG_PREAMBLE_LONG)
9841 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9842 else
9843 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9844 mutex_unlock(&priv->mutex);
9845 return 0;
9846 }
9847
9848 #ifdef CONFIG_IPW2200_MONITOR
9849 static int ipw_wx_set_monitor(struct net_device *dev,
9850 struct iw_request_info *info,
9851 union iwreq_data *wrqu, char *extra)
9852 {
9853 struct ipw_priv *priv = ieee80211_priv(dev);
9854 int *parms = (int *)extra;
9855 int enable = (parms[0] > 0);
9856 mutex_lock(&priv->mutex);
9857 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9858 if (enable) {
9859 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9860 #ifdef CONFIG_IPW2200_RADIOTAP
9861 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9862 #else
9863 priv->net_dev->type = ARPHRD_IEEE80211;
9864 #endif
9865 queue_work(priv->workqueue, &priv->adapter_restart);
9866 }
9867
9868 ipw_set_channel(priv, parms[1]);
9869 } else {
9870 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9871 mutex_unlock(&priv->mutex);
9872 return 0;
9873 }
9874 priv->net_dev->type = ARPHRD_ETHER;
9875 queue_work(priv->workqueue, &priv->adapter_restart);
9876 }
9877 mutex_unlock(&priv->mutex);
9878 return 0;
9879 }
9880
9881 #endif /* CONFIG_IPW2200_MONITOR */
9882
9883 static int ipw_wx_reset(struct net_device *dev,
9884 struct iw_request_info *info,
9885 union iwreq_data *wrqu, char *extra)
9886 {
9887 struct ipw_priv *priv = ieee80211_priv(dev);
9888 IPW_DEBUG_WX("RESET\n");
9889 queue_work(priv->workqueue, &priv->adapter_restart);
9890 return 0;
9891 }
9892
9893 static int ipw_wx_sw_reset(struct net_device *dev,
9894 struct iw_request_info *info,
9895 union iwreq_data *wrqu, char *extra)
9896 {
9897 struct ipw_priv *priv = ieee80211_priv(dev);
9898 union iwreq_data wrqu_sec = {
9899 .encoding = {
9900 .flags = IW_ENCODE_DISABLED,
9901 },
9902 };
9903 int ret;
9904
9905 IPW_DEBUG_WX("SW_RESET\n");
9906
9907 mutex_lock(&priv->mutex);
9908
9909 ret = ipw_sw_reset(priv, 2);
9910 if (!ret) {
9911 free_firmware();
9912 ipw_adapter_restart(priv);
9913 }
9914
9915 /* The SW reset bit might have been toggled on by the 'disable'
9916 * module parameter, so take appropriate action */
9917 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9918
9919 mutex_unlock(&priv->mutex);
9920 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9921 mutex_lock(&priv->mutex);
9922
9923 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9924 /* Configuration likely changed -- force [re]association */
9925 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9926 "reset.\n");
9927 if (!ipw_disassociate(priv))
9928 ipw_associate(priv);
9929 }
9930
9931 mutex_unlock(&priv->mutex);
9932
9933 return 0;
9934 }
9935
9936 /* Rebase the WE IOCTLs to zero for the handler array */
9937 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9938 static iw_handler ipw_wx_handlers[] = {
9939 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9940 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9941 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9942 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9943 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9944 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9945 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9946 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9947 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9948 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9949 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9950 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9951 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9952 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9953 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9954 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9955 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9956 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9957 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9958 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9959 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9960 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9961 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9962 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9963 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9964 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9965 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9966 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9967 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9968 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9969 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9970 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9971 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9972 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9973 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9974 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9975 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9976 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9977 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9978 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9979 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9980 };
9981
9982 enum {
9983 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9984 IPW_PRIV_GET_POWER,
9985 IPW_PRIV_SET_MODE,
9986 IPW_PRIV_GET_MODE,
9987 IPW_PRIV_SET_PREAMBLE,
9988 IPW_PRIV_GET_PREAMBLE,
9989 IPW_PRIV_RESET,
9990 IPW_PRIV_SW_RESET,
9991 #ifdef CONFIG_IPW2200_MONITOR
9992 IPW_PRIV_SET_MONITOR,
9993 #endif
9994 };
9995
9996 static struct iw_priv_args ipw_priv_args[] = {
9997 {
9998 .cmd = IPW_PRIV_SET_POWER,
9999 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10000 .name = "set_power"},
10001 {
10002 .cmd = IPW_PRIV_GET_POWER,
10003 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10004 .name = "get_power"},
10005 {
10006 .cmd = IPW_PRIV_SET_MODE,
10007 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10008 .name = "set_mode"},
10009 {
10010 .cmd = IPW_PRIV_GET_MODE,
10011 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10012 .name = "get_mode"},
10013 {
10014 .cmd = IPW_PRIV_SET_PREAMBLE,
10015 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10016 .name = "set_preamble"},
10017 {
10018 .cmd = IPW_PRIV_GET_PREAMBLE,
10019 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10020 .name = "get_preamble"},
10021 {
10022 IPW_PRIV_RESET,
10023 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10024 {
10025 IPW_PRIV_SW_RESET,
10026 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10027 #ifdef CONFIG_IPW2200_MONITOR
10028 {
10029 IPW_PRIV_SET_MONITOR,
10030 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10031 #endif /* CONFIG_IPW2200_MONITOR */
10032 };
10033
10034 static iw_handler ipw_priv_handler[] = {
10035 ipw_wx_set_powermode,
10036 ipw_wx_get_powermode,
10037 ipw_wx_set_wireless_mode,
10038 ipw_wx_get_wireless_mode,
10039 ipw_wx_set_preamble,
10040 ipw_wx_get_preamble,
10041 ipw_wx_reset,
10042 ipw_wx_sw_reset,
10043 #ifdef CONFIG_IPW2200_MONITOR
10044 ipw_wx_set_monitor,
10045 #endif
10046 };
10047
10048 static struct iw_handler_def ipw_wx_handler_def = {
10049 .standard = ipw_wx_handlers,
10050 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10051 .num_private = ARRAY_SIZE(ipw_priv_handler),
10052 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10053 .private = ipw_priv_handler,
10054 .private_args = ipw_priv_args,
10055 .get_wireless_stats = ipw_get_wireless_stats,
10056 };
10057
10058 /*
10059 * Get wireless statistics.
10060 * Called by /proc/net/wireless
10061 * Also called by SIOCGIWSTATS
10062 */
10063 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10064 {
10065 struct ipw_priv *priv = ieee80211_priv(dev);
10066 struct iw_statistics *wstats;
10067
10068 wstats = &priv->wstats;
10069
10070 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10071 * netdev->get_wireless_stats seems to be called before fw is
10072 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10073 * and associated; if not associcated, the values are all meaningless
10074 * anyway, so set them all to NULL and INVALID */
10075 if (!(priv->status & STATUS_ASSOCIATED)) {
10076 wstats->miss.beacon = 0;
10077 wstats->discard.retries = 0;
10078 wstats->qual.qual = 0;
10079 wstats->qual.level = 0;
10080 wstats->qual.noise = 0;
10081 wstats->qual.updated = 7;
10082 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10083 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10084 return wstats;
10085 }
10086
10087 wstats->qual.qual = priv->quality;
10088 wstats->qual.level = priv->exp_avg_rssi;
10089 wstats->qual.noise = priv->exp_avg_noise;
10090 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10091 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10092
10093 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10094 wstats->discard.retries = priv->last_tx_failures;
10095 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10096
10097 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10098 goto fail_get_ordinal;
10099 wstats->discard.retries += tx_retry; */
10100
10101 return wstats;
10102 }
10103
10104 /* net device stuff */
10105
10106 static void init_sys_config(struct ipw_sys_config *sys_config)
10107 {
10108 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10109 sys_config->bt_coexistence = 0;
10110 sys_config->answer_broadcast_ssid_probe = 0;
10111 sys_config->accept_all_data_frames = 0;
10112 sys_config->accept_non_directed_frames = 1;
10113 sys_config->exclude_unicast_unencrypted = 0;
10114 sys_config->disable_unicast_decryption = 1;
10115 sys_config->exclude_multicast_unencrypted = 0;
10116 sys_config->disable_multicast_decryption = 1;
10117 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10118 antenna = CFG_SYS_ANTENNA_BOTH;
10119 sys_config->antenna_diversity = antenna;
10120 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10121 sys_config->dot11g_auto_detection = 0;
10122 sys_config->enable_cts_to_self = 0;
10123 sys_config->bt_coexist_collision_thr = 0;
10124 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10125 sys_config->silence_threshold = 0x1e;
10126 }
10127
10128 static int ipw_net_open(struct net_device *dev)
10129 {
10130 struct ipw_priv *priv = ieee80211_priv(dev);
10131 IPW_DEBUG_INFO("dev->open\n");
10132 /* we should be verifying the device is ready to be opened */
10133 mutex_lock(&priv->mutex);
10134 if (!(priv->status & STATUS_RF_KILL_MASK) &&
10135 (priv->status & STATUS_ASSOCIATED))
10136 netif_start_queue(dev);
10137 mutex_unlock(&priv->mutex);
10138 return 0;
10139 }
10140
10141 static int ipw_net_stop(struct net_device *dev)
10142 {
10143 IPW_DEBUG_INFO("dev->close\n");
10144 netif_stop_queue(dev);
10145 return 0;
10146 }
10147
10148 /*
10149 todo:
10150
10151 modify to send one tfd per fragment instead of using chunking. otherwise
10152 we need to heavily modify the ieee80211_skb_to_txb.
10153 */
10154
10155 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10156 int pri)
10157 {
10158 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10159 txb->fragments[0]->data;
10160 int i = 0;
10161 struct tfd_frame *tfd;
10162 #ifdef CONFIG_IPW2200_QOS
10163 int tx_id = ipw_get_tx_queue_number(priv, pri);
10164 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10165 #else
10166 struct clx2_tx_queue *txq = &priv->txq[0];
10167 #endif
10168 struct clx2_queue *q = &txq->q;
10169 u8 id, hdr_len, unicast;
10170 u16 remaining_bytes;
10171 int fc;
10172 DECLARE_MAC_BUF(mac);
10173
10174 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10175 switch (priv->ieee->iw_mode) {
10176 case IW_MODE_ADHOC:
10177 unicast = !is_multicast_ether_addr(hdr->addr1);
10178 id = ipw_find_station(priv, hdr->addr1);
10179 if (id == IPW_INVALID_STATION) {
10180 id = ipw_add_station(priv, hdr->addr1);
10181 if (id == IPW_INVALID_STATION) {
10182 IPW_WARNING("Attempt to send data to "
10183 "invalid cell: %s\n",
10184 print_mac(mac, hdr->addr1));
10185 goto drop;
10186 }
10187 }
10188 break;
10189
10190 case IW_MODE_INFRA:
10191 default:
10192 unicast = !is_multicast_ether_addr(hdr->addr3);
10193 id = 0;
10194 break;
10195 }
10196
10197 tfd = &txq->bd[q->first_empty];
10198 txq->txb[q->first_empty] = txb;
10199 memset(tfd, 0, sizeof(*tfd));
10200 tfd->u.data.station_number = id;
10201
10202 tfd->control_flags.message_type = TX_FRAME_TYPE;
10203 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10204
10205 tfd->u.data.cmd_id = DINO_CMD_TX;
10206 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10207 remaining_bytes = txb->payload_size;
10208
10209 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10210 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10211 else
10212 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10213
10214 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10215 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10216
10217 fc = le16_to_cpu(hdr->frame_ctl);
10218 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10219
10220 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10221
10222 if (likely(unicast))
10223 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10224
10225 if (txb->encrypted && !priv->ieee->host_encrypt) {
10226 switch (priv->ieee->sec.level) {
10227 case SEC_LEVEL_3:
10228 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10229 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10230 /* XXX: ACK flag must be set for CCMP even if it
10231 * is a multicast/broadcast packet, because CCMP
10232 * group communication encrypted by GTK is
10233 * actually done by the AP. */
10234 if (!unicast)
10235 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10236
10237 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10238 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10239 tfd->u.data.key_index = 0;
10240 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10241 break;
10242 case SEC_LEVEL_2:
10243 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10244 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10245 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10246 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10247 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10248 break;
10249 case SEC_LEVEL_1:
10250 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10251 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10252 tfd->u.data.key_index = priv->ieee->tx_keyidx;
10253 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10254 40)
10255 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10256 else
10257 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10258 break;
10259 case SEC_LEVEL_0:
10260 break;
10261 default:
10262 printk(KERN_ERR "Unknow security level %d\n",
10263 priv->ieee->sec.level);
10264 break;
10265 }
10266 } else
10267 /* No hardware encryption */
10268 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10269
10270 #ifdef CONFIG_IPW2200_QOS
10271 if (fc & IEEE80211_STYPE_QOS_DATA)
10272 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10273 #endif /* CONFIG_IPW2200_QOS */
10274
10275 /* payload */
10276 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10277 txb->nr_frags));
10278 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10279 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10280 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10281 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10282 i, le32_to_cpu(tfd->u.data.num_chunks),
10283 txb->fragments[i]->len - hdr_len);
10284 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10285 i, tfd->u.data.num_chunks,
10286 txb->fragments[i]->len - hdr_len);
10287 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10288 txb->fragments[i]->len - hdr_len);
10289
10290 tfd->u.data.chunk_ptr[i] =
10291 cpu_to_le32(pci_map_single
10292 (priv->pci_dev,
10293 txb->fragments[i]->data + hdr_len,
10294 txb->fragments[i]->len - hdr_len,
10295 PCI_DMA_TODEVICE));
10296 tfd->u.data.chunk_len[i] =
10297 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10298 }
10299
10300 if (i != txb->nr_frags) {
10301 struct sk_buff *skb;
10302 u16 remaining_bytes = 0;
10303 int j;
10304
10305 for (j = i; j < txb->nr_frags; j++)
10306 remaining_bytes += txb->fragments[j]->len - hdr_len;
10307
10308 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10309 remaining_bytes);
10310 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10311 if (skb != NULL) {
10312 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10313 for (j = i; j < txb->nr_frags; j++) {
10314 int size = txb->fragments[j]->len - hdr_len;
10315
10316 printk(KERN_INFO "Adding frag %d %d...\n",
10317 j, size);
10318 memcpy(skb_put(skb, size),
10319 txb->fragments[j]->data + hdr_len, size);
10320 }
10321 dev_kfree_skb_any(txb->fragments[i]);
10322 txb->fragments[i] = skb;
10323 tfd->u.data.chunk_ptr[i] =
10324 cpu_to_le32(pci_map_single
10325 (priv->pci_dev, skb->data,
10326 remaining_bytes,
10327 PCI_DMA_TODEVICE));
10328
10329 tfd->u.data.num_chunks =
10330 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
10331 1);
10332 }
10333 }
10334
10335 /* kick DMA */
10336 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10337 ipw_write32(priv, q->reg_w, q->first_empty);
10338
10339 if (ipw_queue_space(q) < q->high_mark)
10340 netif_stop_queue(priv->net_dev);
10341
10342 return NETDEV_TX_OK;
10343
10344 drop:
10345 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10346 ieee80211_txb_free(txb);
10347 return NETDEV_TX_OK;
10348 }
10349
10350 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10351 {
10352 struct ipw_priv *priv = ieee80211_priv(dev);
10353 #ifdef CONFIG_IPW2200_QOS
10354 int tx_id = ipw_get_tx_queue_number(priv, pri);
10355 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10356 #else
10357 struct clx2_tx_queue *txq = &priv->txq[0];
10358 #endif /* CONFIG_IPW2200_QOS */
10359
10360 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
10361 return 1;
10362
10363 return 0;
10364 }
10365
10366 #ifdef CONFIG_IPW2200_PROMISCUOUS
10367 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10368 struct ieee80211_txb *txb)
10369 {
10370 struct ieee80211_rx_stats dummystats;
10371 struct ieee80211_hdr *hdr;
10372 u8 n;
10373 u16 filter = priv->prom_priv->filter;
10374 int hdr_only = 0;
10375
10376 if (filter & IPW_PROM_NO_TX)
10377 return;
10378
10379 memset(&dummystats, 0, sizeof(dummystats));
10380
10381 /* Filtering of fragment chains is done agains the first fragment */
10382 hdr = (void *)txb->fragments[0]->data;
10383 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
10384 if (filter & IPW_PROM_NO_MGMT)
10385 return;
10386 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10387 hdr_only = 1;
10388 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
10389 if (filter & IPW_PROM_NO_CTL)
10390 return;
10391 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10392 hdr_only = 1;
10393 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
10394 if (filter & IPW_PROM_NO_DATA)
10395 return;
10396 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10397 hdr_only = 1;
10398 }
10399
10400 for(n=0; n<txb->nr_frags; ++n) {
10401 struct sk_buff *src = txb->fragments[n];
10402 struct sk_buff *dst;
10403 struct ieee80211_radiotap_header *rt_hdr;
10404 int len;
10405
10406 if (hdr_only) {
10407 hdr = (void *)src->data;
10408 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10409 } else
10410 len = src->len;
10411
10412 dst = alloc_skb(
10413 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10414 if (!dst) continue;
10415
10416 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10417
10418 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10419 rt_hdr->it_pad = 0;
10420 rt_hdr->it_present = 0; /* after all, it's just an idea */
10421 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10422
10423 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10424 ieee80211chan2mhz(priv->channel));
10425 if (priv->channel > 14) /* 802.11a */
10426 *(__le16*)skb_put(dst, sizeof(u16)) =
10427 cpu_to_le16(IEEE80211_CHAN_OFDM |
10428 IEEE80211_CHAN_5GHZ);
10429 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10430 *(__le16*)skb_put(dst, sizeof(u16)) =
10431 cpu_to_le16(IEEE80211_CHAN_CCK |
10432 IEEE80211_CHAN_2GHZ);
10433 else /* 802.11g */
10434 *(__le16*)skb_put(dst, sizeof(u16)) =
10435 cpu_to_le16(IEEE80211_CHAN_OFDM |
10436 IEEE80211_CHAN_2GHZ);
10437
10438 rt_hdr->it_len = cpu_to_le16(dst->len);
10439
10440 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10441
10442 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10443 dev_kfree_skb_any(dst);
10444 }
10445 }
10446 #endif
10447
10448 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10449 struct net_device *dev, int pri)
10450 {
10451 struct ipw_priv *priv = ieee80211_priv(dev);
10452 unsigned long flags;
10453 int ret;
10454
10455 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10456 spin_lock_irqsave(&priv->lock, flags);
10457
10458 if (!(priv->status & STATUS_ASSOCIATED)) {
10459 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
10460 priv->ieee->stats.tx_carrier_errors++;
10461 netif_stop_queue(dev);
10462 goto fail_unlock;
10463 }
10464
10465 #ifdef CONFIG_IPW2200_PROMISCUOUS
10466 if (rtap_iface && netif_running(priv->prom_net_dev))
10467 ipw_handle_promiscuous_tx(priv, txb);
10468 #endif
10469
10470 ret = ipw_tx_skb(priv, txb, pri);
10471 if (ret == NETDEV_TX_OK)
10472 __ipw_led_activity_on(priv);
10473 spin_unlock_irqrestore(&priv->lock, flags);
10474
10475 return ret;
10476
10477 fail_unlock:
10478 spin_unlock_irqrestore(&priv->lock, flags);
10479 return 1;
10480 }
10481
10482 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10483 {
10484 struct ipw_priv *priv = ieee80211_priv(dev);
10485
10486 priv->ieee->stats.tx_packets = priv->tx_packets;
10487 priv->ieee->stats.rx_packets = priv->rx_packets;
10488 return &priv->ieee->stats;
10489 }
10490
10491 static void ipw_net_set_multicast_list(struct net_device *dev)
10492 {
10493
10494 }
10495
10496 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10497 {
10498 struct ipw_priv *priv = ieee80211_priv(dev);
10499 struct sockaddr *addr = p;
10500 DECLARE_MAC_BUF(mac);
10501
10502 if (!is_valid_ether_addr(addr->sa_data))
10503 return -EADDRNOTAVAIL;
10504 mutex_lock(&priv->mutex);
10505 priv->config |= CFG_CUSTOM_MAC;
10506 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10507 printk(KERN_INFO "%s: Setting MAC to %s\n",
10508 priv->net_dev->name, print_mac(mac, priv->mac_addr));
10509 queue_work(priv->workqueue, &priv->adapter_restart);
10510 mutex_unlock(&priv->mutex);
10511 return 0;
10512 }
10513
10514 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10515 struct ethtool_drvinfo *info)
10516 {
10517 struct ipw_priv *p = ieee80211_priv(dev);
10518 char vers[64];
10519 char date[32];
10520 u32 len;
10521
10522 strcpy(info->driver, DRV_NAME);
10523 strcpy(info->version, DRV_VERSION);
10524
10525 len = sizeof(vers);
10526 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10527 len = sizeof(date);
10528 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10529
10530 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10531 vers, date);
10532 strcpy(info->bus_info, pci_name(p->pci_dev));
10533 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10534 }
10535
10536 static u32 ipw_ethtool_get_link(struct net_device *dev)
10537 {
10538 struct ipw_priv *priv = ieee80211_priv(dev);
10539 return (priv->status & STATUS_ASSOCIATED) != 0;
10540 }
10541
10542 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10543 {
10544 return IPW_EEPROM_IMAGE_SIZE;
10545 }
10546
10547 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10548 struct ethtool_eeprom *eeprom, u8 * bytes)
10549 {
10550 struct ipw_priv *p = ieee80211_priv(dev);
10551
10552 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10553 return -EINVAL;
10554 mutex_lock(&p->mutex);
10555 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10556 mutex_unlock(&p->mutex);
10557 return 0;
10558 }
10559
10560 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10561 struct ethtool_eeprom *eeprom, u8 * bytes)
10562 {
10563 struct ipw_priv *p = ieee80211_priv(dev);
10564 int i;
10565
10566 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10567 return -EINVAL;
10568 mutex_lock(&p->mutex);
10569 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10570 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10571 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10572 mutex_unlock(&p->mutex);
10573 return 0;
10574 }
10575
10576 static const struct ethtool_ops ipw_ethtool_ops = {
10577 .get_link = ipw_ethtool_get_link,
10578 .get_drvinfo = ipw_ethtool_get_drvinfo,
10579 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10580 .get_eeprom = ipw_ethtool_get_eeprom,
10581 .set_eeprom = ipw_ethtool_set_eeprom,
10582 };
10583
10584 static irqreturn_t ipw_isr(int irq, void *data)
10585 {
10586 struct ipw_priv *priv = data;
10587 u32 inta, inta_mask;
10588
10589 if (!priv)
10590 return IRQ_NONE;
10591
10592 spin_lock(&priv->irq_lock);
10593
10594 if (!(priv->status & STATUS_INT_ENABLED)) {
10595 /* IRQ is disabled */
10596 goto none;
10597 }
10598
10599 inta = ipw_read32(priv, IPW_INTA_RW);
10600 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10601
10602 if (inta == 0xFFFFFFFF) {
10603 /* Hardware disappeared */
10604 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10605 goto none;
10606 }
10607
10608 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10609 /* Shared interrupt */
10610 goto none;
10611 }
10612
10613 /* tell the device to stop sending interrupts */
10614 __ipw_disable_interrupts(priv);
10615
10616 /* ack current interrupts */
10617 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10618 ipw_write32(priv, IPW_INTA_RW, inta);
10619
10620 /* Cache INTA value for our tasklet */
10621 priv->isr_inta = inta;
10622
10623 tasklet_schedule(&priv->irq_tasklet);
10624
10625 spin_unlock(&priv->irq_lock);
10626
10627 return IRQ_HANDLED;
10628 none:
10629 spin_unlock(&priv->irq_lock);
10630 return IRQ_NONE;
10631 }
10632
10633 static void ipw_rf_kill(void *adapter)
10634 {
10635 struct ipw_priv *priv = adapter;
10636 unsigned long flags;
10637
10638 spin_lock_irqsave(&priv->lock, flags);
10639
10640 if (rf_kill_active(priv)) {
10641 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10642 if (priv->workqueue)
10643 queue_delayed_work(priv->workqueue,
10644 &priv->rf_kill, 2 * HZ);
10645 goto exit_unlock;
10646 }
10647
10648 /* RF Kill is now disabled, so bring the device back up */
10649
10650 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10651 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10652 "device\n");
10653
10654 /* we can not do an adapter restart while inside an irq lock */
10655 queue_work(priv->workqueue, &priv->adapter_restart);
10656 } else
10657 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10658 "enabled\n");
10659
10660 exit_unlock:
10661 spin_unlock_irqrestore(&priv->lock, flags);
10662 }
10663
10664 static void ipw_bg_rf_kill(struct work_struct *work)
10665 {
10666 struct ipw_priv *priv =
10667 container_of(work, struct ipw_priv, rf_kill.work);
10668 mutex_lock(&priv->mutex);
10669 ipw_rf_kill(priv);
10670 mutex_unlock(&priv->mutex);
10671 }
10672
10673 static void ipw_link_up(struct ipw_priv *priv)
10674 {
10675 priv->last_seq_num = -1;
10676 priv->last_frag_num = -1;
10677 priv->last_packet_time = 0;
10678
10679 netif_carrier_on(priv->net_dev);
10680 if (netif_queue_stopped(priv->net_dev)) {
10681 IPW_DEBUG_NOTIF("waking queue\n");
10682 netif_wake_queue(priv->net_dev);
10683 } else {
10684 IPW_DEBUG_NOTIF("starting queue\n");
10685 netif_start_queue(priv->net_dev);
10686 }
10687
10688 cancel_delayed_work(&priv->request_scan);
10689 cancel_delayed_work(&priv->scan_event);
10690 ipw_reset_stats(priv);
10691 /* Ensure the rate is updated immediately */
10692 priv->last_rate = ipw_get_current_rate(priv);
10693 ipw_gather_stats(priv);
10694 ipw_led_link_up(priv);
10695 notify_wx_assoc_event(priv);
10696
10697 if (priv->config & CFG_BACKGROUND_SCAN)
10698 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10699 }
10700
10701 static void ipw_bg_link_up(struct work_struct *work)
10702 {
10703 struct ipw_priv *priv =
10704 container_of(work, struct ipw_priv, link_up);
10705 mutex_lock(&priv->mutex);
10706 ipw_link_up(priv);
10707 mutex_unlock(&priv->mutex);
10708 }
10709
10710 static void ipw_link_down(struct ipw_priv *priv)
10711 {
10712 ipw_led_link_down(priv);
10713 netif_carrier_off(priv->net_dev);
10714 netif_stop_queue(priv->net_dev);
10715 notify_wx_assoc_event(priv);
10716
10717 /* Cancel any queued work ... */
10718 cancel_delayed_work(&priv->request_scan);
10719 cancel_delayed_work(&priv->adhoc_check);
10720 cancel_delayed_work(&priv->gather_stats);
10721
10722 ipw_reset_stats(priv);
10723
10724 if (!(priv->status & STATUS_EXIT_PENDING)) {
10725 /* Queue up another scan... */
10726 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10727 } else
10728 cancel_delayed_work(&priv->scan_event);
10729 }
10730
10731 static void ipw_bg_link_down(struct work_struct *work)
10732 {
10733 struct ipw_priv *priv =
10734 container_of(work, struct ipw_priv, link_down);
10735 mutex_lock(&priv->mutex);
10736 ipw_link_down(priv);
10737 mutex_unlock(&priv->mutex);
10738 }
10739
10740 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10741 {
10742 int ret = 0;
10743
10744 priv->workqueue = create_workqueue(DRV_NAME);
10745 init_waitqueue_head(&priv->wait_command_queue);
10746 init_waitqueue_head(&priv->wait_state);
10747
10748 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10749 INIT_WORK(&priv->associate, ipw_bg_associate);
10750 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10751 INIT_WORK(&priv->system_config, ipw_system_config);
10752 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10753 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10754 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10755 INIT_WORK(&priv->up, ipw_bg_up);
10756 INIT_WORK(&priv->down, ipw_bg_down);
10757 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10758 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10759 INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10760 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10761 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10762 INIT_WORK(&priv->roam, ipw_bg_roam);
10763 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10764 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10765 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10766 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10767 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10768 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10769 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10770
10771 #ifdef CONFIG_IPW2200_QOS
10772 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10773 #endif /* CONFIG_IPW2200_QOS */
10774
10775 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10776 ipw_irq_tasklet, (unsigned long)priv);
10777
10778 return ret;
10779 }
10780
10781 static void shim__set_security(struct net_device *dev,
10782 struct ieee80211_security *sec)
10783 {
10784 struct ipw_priv *priv = ieee80211_priv(dev);
10785 int i;
10786 for (i = 0; i < 4; i++) {
10787 if (sec->flags & (1 << i)) {
10788 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10789 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10790 if (sec->key_sizes[i] == 0)
10791 priv->ieee->sec.flags &= ~(1 << i);
10792 else {
10793 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10794 sec->key_sizes[i]);
10795 priv->ieee->sec.flags |= (1 << i);
10796 }
10797 priv->status |= STATUS_SECURITY_UPDATED;
10798 } else if (sec->level != SEC_LEVEL_1)
10799 priv->ieee->sec.flags &= ~(1 << i);
10800 }
10801
10802 if (sec->flags & SEC_ACTIVE_KEY) {
10803 if (sec->active_key <= 3) {
10804 priv->ieee->sec.active_key = sec->active_key;
10805 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10806 } else
10807 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10808 priv->status |= STATUS_SECURITY_UPDATED;
10809 } else
10810 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10811
10812 if ((sec->flags & SEC_AUTH_MODE) &&
10813 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10814 priv->ieee->sec.auth_mode = sec->auth_mode;
10815 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10816 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10817 priv->capability |= CAP_SHARED_KEY;
10818 else
10819 priv->capability &= ~CAP_SHARED_KEY;
10820 priv->status |= STATUS_SECURITY_UPDATED;
10821 }
10822
10823 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10824 priv->ieee->sec.flags |= SEC_ENABLED;
10825 priv->ieee->sec.enabled = sec->enabled;
10826 priv->status |= STATUS_SECURITY_UPDATED;
10827 if (sec->enabled)
10828 priv->capability |= CAP_PRIVACY_ON;
10829 else
10830 priv->capability &= ~CAP_PRIVACY_ON;
10831 }
10832
10833 if (sec->flags & SEC_ENCRYPT)
10834 priv->ieee->sec.encrypt = sec->encrypt;
10835
10836 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10837 priv->ieee->sec.level = sec->level;
10838 priv->ieee->sec.flags |= SEC_LEVEL;
10839 priv->status |= STATUS_SECURITY_UPDATED;
10840 }
10841
10842 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10843 ipw_set_hwcrypto_keys(priv);
10844
10845 /* To match current functionality of ipw2100 (which works well w/
10846 * various supplicants, we don't force a disassociate if the
10847 * privacy capability changes ... */
10848 #if 0
10849 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10850 (((priv->assoc_request.capability &
10851 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10852 (!(priv->assoc_request.capability &
10853 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10854 IPW_DEBUG_ASSOC("Disassociating due to capability "
10855 "change.\n");
10856 ipw_disassociate(priv);
10857 }
10858 #endif
10859 }
10860
10861 static int init_supported_rates(struct ipw_priv *priv,
10862 struct ipw_supported_rates *rates)
10863 {
10864 /* TODO: Mask out rates based on priv->rates_mask */
10865
10866 memset(rates, 0, sizeof(*rates));
10867 /* configure supported rates */
10868 switch (priv->ieee->freq_band) {
10869 case IEEE80211_52GHZ_BAND:
10870 rates->ieee_mode = IPW_A_MODE;
10871 rates->purpose = IPW_RATE_CAPABILITIES;
10872 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10873 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10874 break;
10875
10876 default: /* Mixed or 2.4Ghz */
10877 rates->ieee_mode = IPW_G_MODE;
10878 rates->purpose = IPW_RATE_CAPABILITIES;
10879 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10880 IEEE80211_CCK_DEFAULT_RATES_MASK);
10881 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10882 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10883 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10884 }
10885 break;
10886 }
10887
10888 return 0;
10889 }
10890
10891 static int ipw_config(struct ipw_priv *priv)
10892 {
10893 /* This is only called from ipw_up, which resets/reloads the firmware
10894 so, we don't need to first disable the card before we configure
10895 it */
10896 if (ipw_set_tx_power(priv))
10897 goto error;
10898
10899 /* initialize adapter address */
10900 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10901 goto error;
10902
10903 /* set basic system config settings */
10904 init_sys_config(&priv->sys_config);
10905
10906 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10907 * Does not support BT priority yet (don't abort or defer our Tx) */
10908 if (bt_coexist) {
10909 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10910
10911 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10912 priv->sys_config.bt_coexistence
10913 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10914 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10915 priv->sys_config.bt_coexistence
10916 |= CFG_BT_COEXISTENCE_OOB;
10917 }
10918
10919 #ifdef CONFIG_IPW2200_PROMISCUOUS
10920 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10921 priv->sys_config.accept_all_data_frames = 1;
10922 priv->sys_config.accept_non_directed_frames = 1;
10923 priv->sys_config.accept_all_mgmt_bcpr = 1;
10924 priv->sys_config.accept_all_mgmt_frames = 1;
10925 }
10926 #endif
10927
10928 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10929 priv->sys_config.answer_broadcast_ssid_probe = 1;
10930 else
10931 priv->sys_config.answer_broadcast_ssid_probe = 0;
10932
10933 if (ipw_send_system_config(priv))
10934 goto error;
10935
10936 init_supported_rates(priv, &priv->rates);
10937 if (ipw_send_supported_rates(priv, &priv->rates))
10938 goto error;
10939
10940 /* Set request-to-send threshold */
10941 if (priv->rts_threshold) {
10942 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10943 goto error;
10944 }
10945 #ifdef CONFIG_IPW2200_QOS
10946 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10947 ipw_qos_activate(priv, NULL);
10948 #endif /* CONFIG_IPW2200_QOS */
10949
10950 if (ipw_set_random_seed(priv))
10951 goto error;
10952
10953 /* final state transition to the RUN state */
10954 if (ipw_send_host_complete(priv))
10955 goto error;
10956
10957 priv->status |= STATUS_INIT;
10958
10959 ipw_led_init(priv);
10960 ipw_led_radio_on(priv);
10961 priv->notif_missed_beacons = 0;
10962
10963 /* Set hardware WEP key if it is configured. */
10964 if ((priv->capability & CAP_PRIVACY_ON) &&
10965 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10966 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10967 ipw_set_hwcrypto_keys(priv);
10968
10969 return 0;
10970
10971 error:
10972 return -EIO;
10973 }
10974
10975 /*
10976 * NOTE:
10977 *
10978 * These tables have been tested in conjunction with the
10979 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10980 *
10981 * Altering this values, using it on other hardware, or in geographies
10982 * not intended for resale of the above mentioned Intel adapters has
10983 * not been tested.
10984 *
10985 * Remember to update the table in README.ipw2200 when changing this
10986 * table.
10987 *
10988 */
10989 static const struct ieee80211_geo ipw_geos[] = {
10990 { /* Restricted */
10991 "---",
10992 .bg_channels = 11,
10993 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10994 {2427, 4}, {2432, 5}, {2437, 6},
10995 {2442, 7}, {2447, 8}, {2452, 9},
10996 {2457, 10}, {2462, 11}},
10997 },
10998
10999 { /* Custom US/Canada */
11000 "ZZF",
11001 .bg_channels = 11,
11002 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11003 {2427, 4}, {2432, 5}, {2437, 6},
11004 {2442, 7}, {2447, 8}, {2452, 9},
11005 {2457, 10}, {2462, 11}},
11006 .a_channels = 8,
11007 .a = {{5180, 36},
11008 {5200, 40},
11009 {5220, 44},
11010 {5240, 48},
11011 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11012 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11013 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11014 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
11015 },
11016
11017 { /* Rest of World */
11018 "ZZD",
11019 .bg_channels = 13,
11020 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11021 {2427, 4}, {2432, 5}, {2437, 6},
11022 {2442, 7}, {2447, 8}, {2452, 9},
11023 {2457, 10}, {2462, 11}, {2467, 12},
11024 {2472, 13}},
11025 },
11026
11027 { /* Custom USA & Europe & High */
11028 "ZZA",
11029 .bg_channels = 11,
11030 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11031 {2427, 4}, {2432, 5}, {2437, 6},
11032 {2442, 7}, {2447, 8}, {2452, 9},
11033 {2457, 10}, {2462, 11}},
11034 .a_channels = 13,
11035 .a = {{5180, 36},
11036 {5200, 40},
11037 {5220, 44},
11038 {5240, 48},
11039 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11040 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11041 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11042 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11043 {5745, 149},
11044 {5765, 153},
11045 {5785, 157},
11046 {5805, 161},
11047 {5825, 165}},
11048 },
11049
11050 { /* Custom NA & Europe */
11051 "ZZB",
11052 .bg_channels = 11,
11053 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11054 {2427, 4}, {2432, 5}, {2437, 6},
11055 {2442, 7}, {2447, 8}, {2452, 9},
11056 {2457, 10}, {2462, 11}},
11057 .a_channels = 13,
11058 .a = {{5180, 36},
11059 {5200, 40},
11060 {5220, 44},
11061 {5240, 48},
11062 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11063 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11064 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11065 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11066 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11067 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11068 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11069 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11070 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11071 },
11072
11073 { /* Custom Japan */
11074 "ZZC",
11075 .bg_channels = 11,
11076 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11077 {2427, 4}, {2432, 5}, {2437, 6},
11078 {2442, 7}, {2447, 8}, {2452, 9},
11079 {2457, 10}, {2462, 11}},
11080 .a_channels = 4,
11081 .a = {{5170, 34}, {5190, 38},
11082 {5210, 42}, {5230, 46}},
11083 },
11084
11085 { /* Custom */
11086 "ZZM",
11087 .bg_channels = 11,
11088 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11089 {2427, 4}, {2432, 5}, {2437, 6},
11090 {2442, 7}, {2447, 8}, {2452, 9},
11091 {2457, 10}, {2462, 11}},
11092 },
11093
11094 { /* Europe */
11095 "ZZE",
11096 .bg_channels = 13,
11097 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11098 {2427, 4}, {2432, 5}, {2437, 6},
11099 {2442, 7}, {2447, 8}, {2452, 9},
11100 {2457, 10}, {2462, 11}, {2467, 12},
11101 {2472, 13}},
11102 .a_channels = 19,
11103 .a = {{5180, 36},
11104 {5200, 40},
11105 {5220, 44},
11106 {5240, 48},
11107 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11108 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11109 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11110 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11111 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11112 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11113 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11114 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11115 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11116 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11117 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11118 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11119 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11120 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11121 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11122 },
11123
11124 { /* Custom Japan */
11125 "ZZJ",
11126 .bg_channels = 14,
11127 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11128 {2427, 4}, {2432, 5}, {2437, 6},
11129 {2442, 7}, {2447, 8}, {2452, 9},
11130 {2457, 10}, {2462, 11}, {2467, 12},
11131 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11132 .a_channels = 4,
11133 .a = {{5170, 34}, {5190, 38},
11134 {5210, 42}, {5230, 46}},
11135 },
11136
11137 { /* Rest of World */
11138 "ZZR",
11139 .bg_channels = 14,
11140 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11141 {2427, 4}, {2432, 5}, {2437, 6},
11142 {2442, 7}, {2447, 8}, {2452, 9},
11143 {2457, 10}, {2462, 11}, {2467, 12},
11144 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11145 IEEE80211_CH_PASSIVE_ONLY}},
11146 },
11147
11148 { /* High Band */
11149 "ZZH",
11150 .bg_channels = 13,
11151 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11152 {2427, 4}, {2432, 5}, {2437, 6},
11153 {2442, 7}, {2447, 8}, {2452, 9},
11154 {2457, 10}, {2462, 11},
11155 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11156 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11157 .a_channels = 4,
11158 .a = {{5745, 149}, {5765, 153},
11159 {5785, 157}, {5805, 161}},
11160 },
11161
11162 { /* Custom Europe */
11163 "ZZG",
11164 .bg_channels = 13,
11165 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11166 {2427, 4}, {2432, 5}, {2437, 6},
11167 {2442, 7}, {2447, 8}, {2452, 9},
11168 {2457, 10}, {2462, 11},
11169 {2467, 12}, {2472, 13}},
11170 .a_channels = 4,
11171 .a = {{5180, 36}, {5200, 40},
11172 {5220, 44}, {5240, 48}},
11173 },
11174
11175 { /* Europe */
11176 "ZZK",
11177 .bg_channels = 13,
11178 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11179 {2427, 4}, {2432, 5}, {2437, 6},
11180 {2442, 7}, {2447, 8}, {2452, 9},
11181 {2457, 10}, {2462, 11},
11182 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11183 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11184 .a_channels = 24,
11185 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11186 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11187 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11188 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11189 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11190 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11191 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11192 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11193 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11194 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11195 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11196 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11197 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11198 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11199 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11200 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11201 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11202 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11203 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11204 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11205 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11206 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11207 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11208 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11209 },
11210
11211 { /* Europe */
11212 "ZZL",
11213 .bg_channels = 11,
11214 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11215 {2427, 4}, {2432, 5}, {2437, 6},
11216 {2442, 7}, {2447, 8}, {2452, 9},
11217 {2457, 10}, {2462, 11}},
11218 .a_channels = 13,
11219 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11220 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11221 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11222 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11223 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11224 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11225 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11226 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11227 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11228 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11229 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11230 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11231 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11232 }
11233 };
11234
11235 #define MAX_HW_RESTARTS 5
11236 static int ipw_up(struct ipw_priv *priv)
11237 {
11238 int rc, i, j;
11239
11240 if (priv->status & STATUS_EXIT_PENDING)
11241 return -EIO;
11242
11243 if (cmdlog && !priv->cmdlog) {
11244 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11245 GFP_KERNEL);
11246 if (priv->cmdlog == NULL) {
11247 IPW_ERROR("Error allocating %d command log entries.\n",
11248 cmdlog);
11249 return -ENOMEM;
11250 } else {
11251 priv->cmdlog_len = cmdlog;
11252 }
11253 }
11254
11255 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11256 /* Load the microcode, firmware, and eeprom.
11257 * Also start the clocks. */
11258 rc = ipw_load(priv);
11259 if (rc) {
11260 IPW_ERROR("Unable to load firmware: %d\n", rc);
11261 return rc;
11262 }
11263
11264 ipw_init_ordinals(priv);
11265 if (!(priv->config & CFG_CUSTOM_MAC))
11266 eeprom_parse_mac(priv, priv->mac_addr);
11267 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11268
11269 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11270 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11271 ipw_geos[j].name, 3))
11272 break;
11273 }
11274 if (j == ARRAY_SIZE(ipw_geos)) {
11275 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11276 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11277 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11278 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11279 j = 0;
11280 }
11281 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11282 IPW_WARNING("Could not set geography.");
11283 return 0;
11284 }
11285
11286 if (priv->status & STATUS_RF_KILL_SW) {
11287 IPW_WARNING("Radio disabled by module parameter.\n");
11288 return 0;
11289 } else if (rf_kill_active(priv)) {
11290 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11291 "Kill switch must be turned off for "
11292 "wireless networking to work.\n");
11293 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11294 2 * HZ);
11295 return 0;
11296 }
11297
11298 rc = ipw_config(priv);
11299 if (!rc) {
11300 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11301
11302 /* If configure to try and auto-associate, kick
11303 * off a scan. */
11304 queue_delayed_work(priv->workqueue,
11305 &priv->request_scan, 0);
11306
11307 return 0;
11308 }
11309
11310 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11311 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11312 i, MAX_HW_RESTARTS);
11313
11314 /* We had an error bringing up the hardware, so take it
11315 * all the way back down so we can try again */
11316 ipw_down(priv);
11317 }
11318
11319 /* tried to restart and config the device for as long as our
11320 * patience could withstand */
11321 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11322
11323 return -EIO;
11324 }
11325
11326 static void ipw_bg_up(struct work_struct *work)
11327 {
11328 struct ipw_priv *priv =
11329 container_of(work, struct ipw_priv, up);
11330 mutex_lock(&priv->mutex);
11331 ipw_up(priv);
11332 mutex_unlock(&priv->mutex);
11333 }
11334
11335 static void ipw_deinit(struct ipw_priv *priv)
11336 {
11337 int i;
11338
11339 if (priv->status & STATUS_SCANNING) {
11340 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11341 ipw_abort_scan(priv);
11342 }
11343
11344 if (priv->status & STATUS_ASSOCIATED) {
11345 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11346 ipw_disassociate(priv);
11347 }
11348
11349 ipw_led_shutdown(priv);
11350
11351 /* Wait up to 1s for status to change to not scanning and not
11352 * associated (disassociation can take a while for a ful 802.11
11353 * exchange */
11354 for (i = 1000; i && (priv->status &
11355 (STATUS_DISASSOCIATING |
11356 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11357 udelay(10);
11358
11359 if (priv->status & (STATUS_DISASSOCIATING |
11360 STATUS_ASSOCIATED | STATUS_SCANNING))
11361 IPW_DEBUG_INFO("Still associated or scanning...\n");
11362 else
11363 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11364
11365 /* Attempt to disable the card */
11366 ipw_send_card_disable(priv, 0);
11367
11368 priv->status &= ~STATUS_INIT;
11369 }
11370
11371 static void ipw_down(struct ipw_priv *priv)
11372 {
11373 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11374
11375 priv->status |= STATUS_EXIT_PENDING;
11376
11377 if (ipw_is_init(priv))
11378 ipw_deinit(priv);
11379
11380 /* Wipe out the EXIT_PENDING status bit if we are not actually
11381 * exiting the module */
11382 if (!exit_pending)
11383 priv->status &= ~STATUS_EXIT_PENDING;
11384
11385 /* tell the device to stop sending interrupts */
11386 ipw_disable_interrupts(priv);
11387
11388 /* Clear all bits but the RF Kill */
11389 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11390 netif_carrier_off(priv->net_dev);
11391 netif_stop_queue(priv->net_dev);
11392
11393 ipw_stop_nic(priv);
11394
11395 ipw_led_radio_off(priv);
11396 }
11397
11398 static void ipw_bg_down(struct work_struct *work)
11399 {
11400 struct ipw_priv *priv =
11401 container_of(work, struct ipw_priv, down);
11402 mutex_lock(&priv->mutex);
11403 ipw_down(priv);
11404 mutex_unlock(&priv->mutex);
11405 }
11406
11407 /* Called by register_netdev() */
11408 static int ipw_net_init(struct net_device *dev)
11409 {
11410 struct ipw_priv *priv = ieee80211_priv(dev);
11411 mutex_lock(&priv->mutex);
11412
11413 if (ipw_up(priv)) {
11414 mutex_unlock(&priv->mutex);
11415 return -EIO;
11416 }
11417
11418 mutex_unlock(&priv->mutex);
11419 return 0;
11420 }
11421
11422 /* PCI driver stuff */
11423 static struct pci_device_id card_ids[] = {
11424 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11425 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11426 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11427 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11428 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11429 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11430 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11431 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11432 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11433 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11434 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11435 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11436 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11437 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11438 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11439 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11440 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11441 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11442 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11443 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11444 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11445 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11446
11447 /* required last entry */
11448 {0,}
11449 };
11450
11451 MODULE_DEVICE_TABLE(pci, card_ids);
11452
11453 static struct attribute *ipw_sysfs_entries[] = {
11454 &dev_attr_rf_kill.attr,
11455 &dev_attr_direct_dword.attr,
11456 &dev_attr_indirect_byte.attr,
11457 &dev_attr_indirect_dword.attr,
11458 &dev_attr_mem_gpio_reg.attr,
11459 &dev_attr_command_event_reg.attr,
11460 &dev_attr_nic_type.attr,
11461 &dev_attr_status.attr,
11462 &dev_attr_cfg.attr,
11463 &dev_attr_error.attr,
11464 &dev_attr_event_log.attr,
11465 &dev_attr_cmd_log.attr,
11466 &dev_attr_eeprom_delay.attr,
11467 &dev_attr_ucode_version.attr,
11468 &dev_attr_rtc.attr,
11469 &dev_attr_scan_age.attr,
11470 &dev_attr_led.attr,
11471 &dev_attr_speed_scan.attr,
11472 &dev_attr_net_stats.attr,
11473 &dev_attr_channels.attr,
11474 #ifdef CONFIG_IPW2200_PROMISCUOUS
11475 &dev_attr_rtap_iface.attr,
11476 &dev_attr_rtap_filter.attr,
11477 #endif
11478 NULL
11479 };
11480
11481 static struct attribute_group ipw_attribute_group = {
11482 .name = NULL, /* put in device directory */
11483 .attrs = ipw_sysfs_entries,
11484 };
11485
11486 #ifdef CONFIG_IPW2200_PROMISCUOUS
11487 static int ipw_prom_open(struct net_device *dev)
11488 {
11489 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11490 struct ipw_priv *priv = prom_priv->priv;
11491
11492 IPW_DEBUG_INFO("prom dev->open\n");
11493 netif_carrier_off(dev);
11494 netif_stop_queue(dev);
11495
11496 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11497 priv->sys_config.accept_all_data_frames = 1;
11498 priv->sys_config.accept_non_directed_frames = 1;
11499 priv->sys_config.accept_all_mgmt_bcpr = 1;
11500 priv->sys_config.accept_all_mgmt_frames = 1;
11501
11502 ipw_send_system_config(priv);
11503 }
11504
11505 return 0;
11506 }
11507
11508 static int ipw_prom_stop(struct net_device *dev)
11509 {
11510 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11511 struct ipw_priv *priv = prom_priv->priv;
11512
11513 IPW_DEBUG_INFO("prom dev->stop\n");
11514
11515 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11516 priv->sys_config.accept_all_data_frames = 0;
11517 priv->sys_config.accept_non_directed_frames = 0;
11518 priv->sys_config.accept_all_mgmt_bcpr = 0;
11519 priv->sys_config.accept_all_mgmt_frames = 0;
11520
11521 ipw_send_system_config(priv);
11522 }
11523
11524 return 0;
11525 }
11526
11527 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11528 {
11529 IPW_DEBUG_INFO("prom dev->xmit\n");
11530 netif_stop_queue(dev);
11531 return -EOPNOTSUPP;
11532 }
11533
11534 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11535 {
11536 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11537 return &prom_priv->ieee->stats;
11538 }
11539
11540 static int ipw_prom_alloc(struct ipw_priv *priv)
11541 {
11542 int rc = 0;
11543
11544 if (priv->prom_net_dev)
11545 return -EPERM;
11546
11547 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11548 if (priv->prom_net_dev == NULL)
11549 return -ENOMEM;
11550
11551 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11552 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11553 priv->prom_priv->priv = priv;
11554
11555 strcpy(priv->prom_net_dev->name, "rtap%d");
11556
11557 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11558 priv->prom_net_dev->open = ipw_prom_open;
11559 priv->prom_net_dev->stop = ipw_prom_stop;
11560 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11561 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11562
11563 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11564
11565 rc = register_netdev(priv->prom_net_dev);
11566 if (rc) {
11567 free_ieee80211(priv->prom_net_dev);
11568 priv->prom_net_dev = NULL;
11569 return rc;
11570 }
11571
11572 return 0;
11573 }
11574
11575 static void ipw_prom_free(struct ipw_priv *priv)
11576 {
11577 if (!priv->prom_net_dev)
11578 return;
11579
11580 unregister_netdev(priv->prom_net_dev);
11581 free_ieee80211(priv->prom_net_dev);
11582
11583 priv->prom_net_dev = NULL;
11584 }
11585
11586 #endif
11587
11588
11589 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11590 const struct pci_device_id *ent)
11591 {
11592 int err = 0;
11593 struct net_device *net_dev;
11594 void __iomem *base;
11595 u32 length, val;
11596 struct ipw_priv *priv;
11597 int i;
11598
11599 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11600 if (net_dev == NULL) {
11601 err = -ENOMEM;
11602 goto out;
11603 }
11604
11605 priv = ieee80211_priv(net_dev);
11606 priv->ieee = netdev_priv(net_dev);
11607
11608 priv->net_dev = net_dev;
11609 priv->pci_dev = pdev;
11610 ipw_debug_level = debug;
11611 spin_lock_init(&priv->irq_lock);
11612 spin_lock_init(&priv->lock);
11613 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11614 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11615
11616 mutex_init(&priv->mutex);
11617 if (pci_enable_device(pdev)) {
11618 err = -ENODEV;
11619 goto out_free_ieee80211;
11620 }
11621
11622 pci_set_master(pdev);
11623
11624 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11625 if (!err)
11626 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11627 if (err) {
11628 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11629 goto out_pci_disable_device;
11630 }
11631
11632 pci_set_drvdata(pdev, priv);
11633
11634 err = pci_request_regions(pdev, DRV_NAME);
11635 if (err)
11636 goto out_pci_disable_device;
11637
11638 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11639 * PCI Tx retries from interfering with C3 CPU state */
11640 pci_read_config_dword(pdev, 0x40, &val);
11641 if ((val & 0x0000ff00) != 0)
11642 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11643
11644 length = pci_resource_len(pdev, 0);
11645 priv->hw_len = length;
11646
11647 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11648 if (!base) {
11649 err = -ENODEV;
11650 goto out_pci_release_regions;
11651 }
11652
11653 priv->hw_base = base;
11654 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11655 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11656
11657 err = ipw_setup_deferred_work(priv);
11658 if (err) {
11659 IPW_ERROR("Unable to setup deferred work\n");
11660 goto out_iounmap;
11661 }
11662
11663 ipw_sw_reset(priv, 1);
11664
11665 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11666 if (err) {
11667 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11668 goto out_destroy_workqueue;
11669 }
11670
11671 SET_NETDEV_DEV(net_dev, &pdev->dev);
11672
11673 mutex_lock(&priv->mutex);
11674
11675 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11676 priv->ieee->set_security = shim__set_security;
11677 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11678
11679 #ifdef CONFIG_IPW2200_QOS
11680 priv->ieee->is_qos_active = ipw_is_qos_active;
11681 priv->ieee->handle_probe_response = ipw_handle_beacon;
11682 priv->ieee->handle_beacon = ipw_handle_probe_response;
11683 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11684 #endif /* CONFIG_IPW2200_QOS */
11685
11686 priv->ieee->perfect_rssi = -20;
11687 priv->ieee->worst_rssi = -85;
11688
11689 net_dev->open = ipw_net_open;
11690 net_dev->stop = ipw_net_stop;
11691 net_dev->init = ipw_net_init;
11692 net_dev->get_stats = ipw_net_get_stats;
11693 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11694 net_dev->set_mac_address = ipw_net_set_mac_address;
11695 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11696 net_dev->wireless_data = &priv->wireless_data;
11697 net_dev->wireless_handlers = &ipw_wx_handler_def;
11698 net_dev->ethtool_ops = &ipw_ethtool_ops;
11699 net_dev->irq = pdev->irq;
11700 net_dev->base_addr = (unsigned long)priv->hw_base;
11701 net_dev->mem_start = pci_resource_start(pdev, 0);
11702 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11703
11704 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11705 if (err) {
11706 IPW_ERROR("failed to create sysfs device attributes\n");
11707 mutex_unlock(&priv->mutex);
11708 goto out_release_irq;
11709 }
11710
11711 mutex_unlock(&priv->mutex);
11712 err = register_netdev(net_dev);
11713 if (err) {
11714 IPW_ERROR("failed to register network device\n");
11715 goto out_remove_sysfs;
11716 }
11717
11718 #ifdef CONFIG_IPW2200_PROMISCUOUS
11719 if (rtap_iface) {
11720 err = ipw_prom_alloc(priv);
11721 if (err) {
11722 IPW_ERROR("Failed to register promiscuous network "
11723 "device (error %d).\n", err);
11724 unregister_netdev(priv->net_dev);
11725 goto out_remove_sysfs;
11726 }
11727 }
11728 #endif
11729
11730 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11731 "channels, %d 802.11a channels)\n",
11732 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11733 priv->ieee->geo.a_channels);
11734
11735 return 0;
11736
11737 out_remove_sysfs:
11738 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11739 out_release_irq:
11740 free_irq(pdev->irq, priv);
11741 out_destroy_workqueue:
11742 destroy_workqueue(priv->workqueue);
11743 priv->workqueue = NULL;
11744 out_iounmap:
11745 iounmap(priv->hw_base);
11746 out_pci_release_regions:
11747 pci_release_regions(pdev);
11748 out_pci_disable_device:
11749 pci_disable_device(pdev);
11750 pci_set_drvdata(pdev, NULL);
11751 out_free_ieee80211:
11752 free_ieee80211(priv->net_dev);
11753 out:
11754 return err;
11755 }
11756
11757 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11758 {
11759 struct ipw_priv *priv = pci_get_drvdata(pdev);
11760 struct list_head *p, *q;
11761 int i;
11762
11763 if (!priv)
11764 return;
11765
11766 mutex_lock(&priv->mutex);
11767
11768 priv->status |= STATUS_EXIT_PENDING;
11769 ipw_down(priv);
11770 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11771
11772 mutex_unlock(&priv->mutex);
11773
11774 unregister_netdev(priv->net_dev);
11775
11776 if (priv->rxq) {
11777 ipw_rx_queue_free(priv, priv->rxq);
11778 priv->rxq = NULL;
11779 }
11780 ipw_tx_queue_free(priv);
11781
11782 if (priv->cmdlog) {
11783 kfree(priv->cmdlog);
11784 priv->cmdlog = NULL;
11785 }
11786 /* ipw_down will ensure that there is no more pending work
11787 * in the workqueue's, so we can safely remove them now. */
11788 cancel_delayed_work(&priv->adhoc_check);
11789 cancel_delayed_work(&priv->gather_stats);
11790 cancel_delayed_work(&priv->request_scan);
11791 cancel_delayed_work(&priv->scan_event);
11792 cancel_delayed_work(&priv->rf_kill);
11793 cancel_delayed_work(&priv->scan_check);
11794 destroy_workqueue(priv->workqueue);
11795 priv->workqueue = NULL;
11796
11797 /* Free MAC hash list for ADHOC */
11798 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11799 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11800 list_del(p);
11801 kfree(list_entry(p, struct ipw_ibss_seq, list));
11802 }
11803 }
11804
11805 kfree(priv->error);
11806 priv->error = NULL;
11807
11808 #ifdef CONFIG_IPW2200_PROMISCUOUS
11809 ipw_prom_free(priv);
11810 #endif
11811
11812 free_irq(pdev->irq, priv);
11813 iounmap(priv->hw_base);
11814 pci_release_regions(pdev);
11815 pci_disable_device(pdev);
11816 pci_set_drvdata(pdev, NULL);
11817 free_ieee80211(priv->net_dev);
11818 free_firmware();
11819 }
11820
11821 #ifdef CONFIG_PM
11822 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11823 {
11824 struct ipw_priv *priv = pci_get_drvdata(pdev);
11825 struct net_device *dev = priv->net_dev;
11826
11827 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11828
11829 /* Take down the device; powers it off, etc. */
11830 ipw_down(priv);
11831
11832 /* Remove the PRESENT state of the device */
11833 netif_device_detach(dev);
11834
11835 pci_save_state(pdev);
11836 pci_disable_device(pdev);
11837 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11838
11839 return 0;
11840 }
11841
11842 static int ipw_pci_resume(struct pci_dev *pdev)
11843 {
11844 struct ipw_priv *priv = pci_get_drvdata(pdev);
11845 struct net_device *dev = priv->net_dev;
11846 int err;
11847 u32 val;
11848
11849 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11850
11851 pci_set_power_state(pdev, PCI_D0);
11852 err = pci_enable_device(pdev);
11853 if (err) {
11854 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11855 dev->name);
11856 return err;
11857 }
11858 pci_restore_state(pdev);
11859
11860 /*
11861 * Suspend/Resume resets the PCI configuration space, so we have to
11862 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11863 * from interfering with C3 CPU state. pci_restore_state won't help
11864 * here since it only restores the first 64 bytes pci config header.
11865 */
11866 pci_read_config_dword(pdev, 0x40, &val);
11867 if ((val & 0x0000ff00) != 0)
11868 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11869
11870 /* Set the device back into the PRESENT state; this will also wake
11871 * the queue of needed */
11872 netif_device_attach(dev);
11873
11874 /* Bring the device back up */
11875 queue_work(priv->workqueue, &priv->up);
11876
11877 return 0;
11878 }
11879 #endif
11880
11881 static void ipw_pci_shutdown(struct pci_dev *pdev)
11882 {
11883 struct ipw_priv *priv = pci_get_drvdata(pdev);
11884
11885 /* Take down the device; powers it off, etc. */
11886 ipw_down(priv);
11887
11888 pci_disable_device(pdev);
11889 }
11890
11891 /* driver initialization stuff */
11892 static struct pci_driver ipw_driver = {
11893 .name = DRV_NAME,
11894 .id_table = card_ids,
11895 .probe = ipw_pci_probe,
11896 .remove = __devexit_p(ipw_pci_remove),
11897 #ifdef CONFIG_PM
11898 .suspend = ipw_pci_suspend,
11899 .resume = ipw_pci_resume,
11900 #endif
11901 .shutdown = ipw_pci_shutdown,
11902 };
11903
11904 static int __init ipw_init(void)
11905 {
11906 int ret;
11907
11908 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11909 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11910
11911 ret = pci_register_driver(&ipw_driver);
11912 if (ret) {
11913 IPW_ERROR("Unable to initialize PCI module\n");
11914 return ret;
11915 }
11916
11917 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11918 if (ret) {
11919 IPW_ERROR("Unable to create driver sysfs file\n");
11920 pci_unregister_driver(&ipw_driver);
11921 return ret;
11922 }
11923
11924 return ret;
11925 }
11926
11927 static void __exit ipw_exit(void)
11928 {
11929 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11930 pci_unregister_driver(&ipw_driver);
11931 }
11932
11933 module_param(disable, int, 0444);
11934 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11935
11936 module_param(associate, int, 0444);
11937 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11938
11939 module_param(auto_create, int, 0444);
11940 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11941
11942 module_param(led, int, 0444);
11943 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11944
11945 module_param(debug, int, 0444);
11946 MODULE_PARM_DESC(debug, "debug output mask");
11947
11948 module_param(channel, int, 0444);
11949 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11950
11951 #ifdef CONFIG_IPW2200_PROMISCUOUS
11952 module_param(rtap_iface, int, 0444);
11953 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11954 #endif
11955
11956 #ifdef CONFIG_IPW2200_QOS
11957 module_param(qos_enable, int, 0444);
11958 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11959
11960 module_param(qos_burst_enable, int, 0444);
11961 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11962
11963 module_param(qos_no_ack_mask, int, 0444);
11964 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11965
11966 module_param(burst_duration_CCK, int, 0444);
11967 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11968
11969 module_param(burst_duration_OFDM, int, 0444);
11970 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11971 #endif /* CONFIG_IPW2200_QOS */
11972
11973 #ifdef CONFIG_IPW2200_MONITOR
11974 module_param(mode, int, 0444);
11975 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11976 #else
11977 module_param(mode, int, 0444);
11978 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11979 #endif
11980
11981 module_param(bt_coexist, int, 0444);
11982 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11983
11984 module_param(hwcrypto, int, 0444);
11985 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11986
11987 module_param(cmdlog, int, 0444);
11988 MODULE_PARM_DESC(cmdlog,
11989 "allocate a ring buffer for logging firmware commands");
11990
11991 module_param(roaming, int, 0444);
11992 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11993
11994 module_param(antenna, int, 0444);
11995 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11996
11997 module_exit(ipw_exit);
11998 module_init(ipw_init);
This page took 0.413681 seconds and 5 git commands to generate.