[NET]: Introduce and use print_mac() and DECLARE_MAC_BUF()
[deliverable/linux.git] / drivers / net / wireless / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34 #include <linux/version.h>
35
36
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
42
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
48
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
54
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
60
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
66
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
72
73 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
77
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
84
85 static int cmdlog = 0;
86 static int debug = 0;
87 static int channel = 0;
88 static int mode = 0;
89
90 static u32 ipw_debug_level;
91 static int associate = 1;
92 static int auto_create = 1;
93 static int led = 0;
94 static int disable = 0;
95 static int bt_coexist = 0;
96 static int hwcrypto = 0;
97 static int roaming = 1;
98 static const char ipw_modes[] = {
99 'a', 'b', 'g', '?'
100 };
101 static int antenna = CFG_SYS_ANTENNA_BOTH;
102
103 #ifdef CONFIG_IPW2200_PROMISCUOUS
104 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
105 #endif
106
107
108 #ifdef CONFIG_IPW2200_QOS
109 static int qos_enable = 0;
110 static int qos_burst_enable = 0;
111 static int qos_no_ack_mask = 0;
112 static int burst_duration_CCK = 0;
113 static int burst_duration_OFDM = 0;
114
115 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
116 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
117 QOS_TX3_CW_MIN_OFDM},
118 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
119 QOS_TX3_CW_MAX_OFDM},
120 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
121 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
122 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
123 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
124 };
125
126 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
127 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
128 QOS_TX3_CW_MIN_CCK},
129 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
130 QOS_TX3_CW_MAX_CCK},
131 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
132 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
133 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
134 QOS_TX3_TXOP_LIMIT_CCK}
135 };
136
137 static struct ieee80211_qos_parameters def_parameters_OFDM = {
138 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
139 DEF_TX3_CW_MIN_OFDM},
140 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
141 DEF_TX3_CW_MAX_OFDM},
142 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
143 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
144 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
145 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
146 };
147
148 static struct ieee80211_qos_parameters def_parameters_CCK = {
149 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
150 DEF_TX3_CW_MIN_CCK},
151 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
152 DEF_TX3_CW_MAX_CCK},
153 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
154 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
155 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
156 DEF_TX3_TXOP_LIMIT_CCK}
157 };
158
159 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
160
161 static int from_priority_to_tx_queue[] = {
162 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
163 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
164 };
165
166 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
167
168 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
169 *qos_param);
170 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
171 *qos_param);
172 #endif /* CONFIG_IPW2200_QOS */
173
174 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
175 static void ipw_remove_current_network(struct ipw_priv *priv);
176 static void ipw_rx(struct ipw_priv *priv);
177 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
178 struct clx2_tx_queue *txq, int qindex);
179 static int ipw_queue_reset(struct ipw_priv *priv);
180
181 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
182 int len, int sync);
183
184 static void ipw_tx_queue_free(struct ipw_priv *);
185
186 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
187 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
188 static void ipw_rx_queue_replenish(void *);
189 static int ipw_up(struct ipw_priv *);
190 static void ipw_bg_up(struct work_struct *work);
191 static void ipw_down(struct ipw_priv *);
192 static void ipw_bg_down(struct work_struct *work);
193 static int ipw_config(struct ipw_priv *);
194 static int init_supported_rates(struct ipw_priv *priv,
195 struct ipw_supported_rates *prates);
196 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
197 static void ipw_send_wep_keys(struct ipw_priv *, int);
198
199 static int snprint_line(char *buf, size_t count,
200 const u8 * data, u32 len, u32 ofs)
201 {
202 int out, i, j, l;
203 char c;
204
205 out = snprintf(buf, count, "%08X", ofs);
206
207 for (l = 0, i = 0; i < 2; i++) {
208 out += snprintf(buf + out, count - out, " ");
209 for (j = 0; j < 8 && l < len; j++, l++)
210 out += snprintf(buf + out, count - out, "%02X ",
211 data[(i * 8 + j)]);
212 for (; j < 8; j++)
213 out += snprintf(buf + out, count - out, " ");
214 }
215
216 out += snprintf(buf + out, count - out, " ");
217 for (l = 0, i = 0; i < 2; i++) {
218 out += snprintf(buf + out, count - out, " ");
219 for (j = 0; j < 8 && l < len; j++, l++) {
220 c = data[(i * 8 + j)];
221 if (!isascii(c) || !isprint(c))
222 c = '.';
223
224 out += snprintf(buf + out, count - out, "%c", c);
225 }
226
227 for (; j < 8; j++)
228 out += snprintf(buf + out, count - out, " ");
229 }
230
231 return out;
232 }
233
234 static void printk_buf(int level, const u8 * data, u32 len)
235 {
236 char line[81];
237 u32 ofs = 0;
238 if (!(ipw_debug_level & level))
239 return;
240
241 while (len) {
242 snprint_line(line, sizeof(line), &data[ofs],
243 min(len, 16U), ofs);
244 printk(KERN_DEBUG "%s\n", line);
245 ofs += 16;
246 len -= min(len, 16U);
247 }
248 }
249
250 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
251 {
252 size_t out = size;
253 u32 ofs = 0;
254 int total = 0;
255
256 while (size && len) {
257 out = snprint_line(output, size, &data[ofs],
258 min_t(size_t, len, 16U), ofs);
259
260 ofs += 16;
261 output += out;
262 size -= out;
263 len -= min_t(size_t, len, 16U);
264 total += out;
265 }
266 return total;
267 }
268
269 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
270 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
271 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
272
273 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
274 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
275 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
276
277 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
278 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
279 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
280 {
281 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
282 __LINE__, (u32) (b), (u32) (c));
283 _ipw_write_reg8(a, b, c);
284 }
285
286 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
287 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
288 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
289 {
290 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
291 __LINE__, (u32) (b), (u32) (c));
292 _ipw_write_reg16(a, b, c);
293 }
294
295 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
296 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
297 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
298 {
299 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
300 __LINE__, (u32) (b), (u32) (c));
301 _ipw_write_reg32(a, b, c);
302 }
303
304 /* 8-bit direct write (low 4K) */
305 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
306
307 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
308 #define ipw_write8(ipw, ofs, val) \
309 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
310 _ipw_write8(ipw, ofs, val)
311
312 /* 16-bit direct write (low 4K) */
313 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
314
315 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316 #define ipw_write16(ipw, ofs, val) \
317 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318 _ipw_write16(ipw, ofs, val)
319
320 /* 32-bit direct write (low 4K) */
321 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
322
323 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324 #define ipw_write32(ipw, ofs, val) \
325 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326 _ipw_write32(ipw, ofs, val)
327
328 /* 8-bit direct read (low 4K) */
329 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
330
331 /* 8-bit direct read (low 4K), with debug wrapper */
332 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
333 {
334 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335 return _ipw_read8(ipw, ofs);
336 }
337
338 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
340
341 /* 16-bit direct read (low 4K) */
342 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
343
344 /* 16-bit direct read (low 4K), with debug wrapper */
345 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
346 {
347 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348 return _ipw_read16(ipw, ofs);
349 }
350
351 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
353
354 /* 32-bit direct read (low 4K) */
355 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
356
357 /* 32-bit direct read (low 4K), with debug wrapper */
358 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
359 {
360 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361 return _ipw_read32(ipw, ofs);
362 }
363
364 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
366
367 /* multi-byte read (above 4K), with debug wrapper */
368 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
369 static inline void __ipw_read_indirect(const char *f, int l,
370 struct ipw_priv *a, u32 b, u8 * c, int d)
371 {
372 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
373 d);
374 _ipw_read_indirect(a, b, c, d);
375 }
376
377 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
379
380 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
382 int num);
383 #define ipw_write_indirect(a, b, c, d) \
384 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385 _ipw_write_indirect(a, b, c, d)
386
387 /* 32-bit indirect write (above 4K) */
388 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
389 {
390 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
393 }
394
395 /* 8-bit indirect write (above 4K) */
396 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
397 {
398 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
399 u32 dif_len = reg - aligned_addr;
400
401 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
404 }
405
406 /* 16-bit indirect write (above 4K) */
407 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
408 {
409 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
410 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
411
412 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
415 }
416
417 /* 8-bit indirect read (above 4K) */
418 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
419 {
420 u32 word;
421 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424 return (word >> ((reg & 0x3) * 8)) & 0xff;
425 }
426
427 /* 32-bit indirect read (above 4K) */
428 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
429 {
430 u32 value;
431
432 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
433
434 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
437 return value;
438 }
439
440 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
441 /* for area above 1st 4K of SRAM/reg space */
442 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443 int num)
444 {
445 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = addr - aligned_addr;
447 u32 i;
448
449 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
450
451 if (num <= 0) {
452 return;
453 }
454
455 /* Read the first dword (or portion) byte by byte */
456 if (unlikely(dif_len)) {
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 /* Start reading at aligned_addr + dif_len */
459 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
461 aligned_addr += 4;
462 }
463
464 /* Read all of the middle dwords as dwords, with auto-increment */
465 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
468
469 /* Read the last dword (or portion) byte by byte */
470 if (unlikely(num)) {
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 for (i = 0; num > 0; i++, num--)
473 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
474 }
475 }
476
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /* for area above 1st 4K of SRAM/reg space */
479 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
480 int num)
481 {
482 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
483 u32 dif_len = addr - aligned_addr;
484 u32 i;
485
486 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
487
488 if (num <= 0) {
489 return;
490 }
491
492 /* Write the first dword (or portion) byte by byte */
493 if (unlikely(dif_len)) {
494 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495 /* Start writing at aligned_addr + dif_len */
496 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
498 aligned_addr += 4;
499 }
500
501 /* Write all of the middle dwords as dwords, with auto-increment */
502 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
505
506 /* Write the last dword (or portion) byte by byte */
507 if (unlikely(num)) {
508 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509 for (i = 0; num > 0; i++, num--, buf++)
510 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
511 }
512 }
513
514 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
515 /* for 1st 4K of SRAM/regs space */
516 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
517 int num)
518 {
519 memcpy_toio((priv->hw_base + addr), buf, num);
520 }
521
522 /* Set bit(s) in low 4K of SRAM/regs */
523 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
524 {
525 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
526 }
527
528 /* Clear bit(s) in low 4K of SRAM/regs */
529 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
530 {
531 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
532 }
533
534 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
535 {
536 if (priv->status & STATUS_INT_ENABLED)
537 return;
538 priv->status |= STATUS_INT_ENABLED;
539 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
540 }
541
542 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
543 {
544 if (!(priv->status & STATUS_INT_ENABLED))
545 return;
546 priv->status &= ~STATUS_INT_ENABLED;
547 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
548 }
549
550 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
551 {
552 unsigned long flags;
553
554 spin_lock_irqsave(&priv->irq_lock, flags);
555 __ipw_enable_interrupts(priv);
556 spin_unlock_irqrestore(&priv->irq_lock, flags);
557 }
558
559 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
560 {
561 unsigned long flags;
562
563 spin_lock_irqsave(&priv->irq_lock, flags);
564 __ipw_disable_interrupts(priv);
565 spin_unlock_irqrestore(&priv->irq_lock, flags);
566 }
567
568 static char *ipw_error_desc(u32 val)
569 {
570 switch (val) {
571 case IPW_FW_ERROR_OK:
572 return "ERROR_OK";
573 case IPW_FW_ERROR_FAIL:
574 return "ERROR_FAIL";
575 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576 return "MEMORY_UNDERFLOW";
577 case IPW_FW_ERROR_MEMORY_OVERFLOW:
578 return "MEMORY_OVERFLOW";
579 case IPW_FW_ERROR_BAD_PARAM:
580 return "BAD_PARAM";
581 case IPW_FW_ERROR_BAD_CHECKSUM:
582 return "BAD_CHECKSUM";
583 case IPW_FW_ERROR_NMI_INTERRUPT:
584 return "NMI_INTERRUPT";
585 case IPW_FW_ERROR_BAD_DATABASE:
586 return "BAD_DATABASE";
587 case IPW_FW_ERROR_ALLOC_FAIL:
588 return "ALLOC_FAIL";
589 case IPW_FW_ERROR_DMA_UNDERRUN:
590 return "DMA_UNDERRUN";
591 case IPW_FW_ERROR_DMA_STATUS:
592 return "DMA_STATUS";
593 case IPW_FW_ERROR_DINO_ERROR:
594 return "DINO_ERROR";
595 case IPW_FW_ERROR_EEPROM_ERROR:
596 return "EEPROM_ERROR";
597 case IPW_FW_ERROR_SYSASSERT:
598 return "SYSASSERT";
599 case IPW_FW_ERROR_FATAL_ERROR:
600 return "FATAL_ERROR";
601 default:
602 return "UNKNOWN_ERROR";
603 }
604 }
605
606 static void ipw_dump_error_log(struct ipw_priv *priv,
607 struct ipw_fw_error *error)
608 {
609 u32 i;
610
611 if (!error) {
612 IPW_ERROR("Error allocating and capturing error log. "
613 "Nothing to dump.\n");
614 return;
615 }
616
617 IPW_ERROR("Start IPW Error Log Dump:\n");
618 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619 error->status, error->config);
620
621 for (i = 0; i < error->elem_len; i++)
622 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
623 ipw_error_desc(error->elem[i].desc),
624 error->elem[i].time,
625 error->elem[i].blink1,
626 error->elem[i].blink2,
627 error->elem[i].link1,
628 error->elem[i].link2, error->elem[i].data);
629 for (i = 0; i < error->log_len; i++)
630 IPW_ERROR("%i\t0x%08x\t%i\n",
631 error->log[i].time,
632 error->log[i].data, error->log[i].event);
633 }
634
635 static inline int ipw_is_init(struct ipw_priv *priv)
636 {
637 return (priv->status & STATUS_INIT) ? 1 : 0;
638 }
639
640 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
641 {
642 u32 addr, field_info, field_len, field_count, total_len;
643
644 IPW_DEBUG_ORD("ordinal = %i\n", ord);
645
646 if (!priv || !val || !len) {
647 IPW_DEBUG_ORD("Invalid argument\n");
648 return -EINVAL;
649 }
650
651 /* verify device ordinal tables have been initialized */
652 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653 IPW_DEBUG_ORD("Access ordinals before initialization\n");
654 return -EINVAL;
655 }
656
657 switch (IPW_ORD_TABLE_ID_MASK & ord) {
658 case IPW_ORD_TABLE_0_MASK:
659 /*
660 * TABLE 0: Direct access to a table of 32 bit values
661 *
662 * This is a very simple table with the data directly
663 * read from the table
664 */
665
666 /* remove the table id from the ordinal */
667 ord &= IPW_ORD_TABLE_VALUE_MASK;
668
669 /* boundary check */
670 if (ord > priv->table0_len) {
671 IPW_DEBUG_ORD("ordinal value (%i) longer then "
672 "max (%i)\n", ord, priv->table0_len);
673 return -EINVAL;
674 }
675
676 /* verify we have enough room to store the value */
677 if (*len < sizeof(u32)) {
678 IPW_DEBUG_ORD("ordinal buffer length too small, "
679 "need %zd\n", sizeof(u32));
680 return -EINVAL;
681 }
682
683 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684 ord, priv->table0_addr + (ord << 2));
685
686 *len = sizeof(u32);
687 ord <<= 2;
688 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
689 break;
690
691 case IPW_ORD_TABLE_1_MASK:
692 /*
693 * TABLE 1: Indirect access to a table of 32 bit values
694 *
695 * This is a fairly large table of u32 values each
696 * representing starting addr for the data (which is
697 * also a u32)
698 */
699
700 /* remove the table id from the ordinal */
701 ord &= IPW_ORD_TABLE_VALUE_MASK;
702
703 /* boundary check */
704 if (ord > priv->table1_len) {
705 IPW_DEBUG_ORD("ordinal value too long\n");
706 return -EINVAL;
707 }
708
709 /* verify we have enough room to store the value */
710 if (*len < sizeof(u32)) {
711 IPW_DEBUG_ORD("ordinal buffer length too small, "
712 "need %zd\n", sizeof(u32));
713 return -EINVAL;
714 }
715
716 *((u32 *) val) =
717 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
718 *len = sizeof(u32);
719 break;
720
721 case IPW_ORD_TABLE_2_MASK:
722 /*
723 * TABLE 2: Indirect access to a table of variable sized values
724 *
725 * This table consist of six values, each containing
726 * - dword containing the starting offset of the data
727 * - dword containing the lengh in the first 16bits
728 * and the count in the second 16bits
729 */
730
731 /* remove the table id from the ordinal */
732 ord &= IPW_ORD_TABLE_VALUE_MASK;
733
734 /* boundary check */
735 if (ord > priv->table2_len) {
736 IPW_DEBUG_ORD("ordinal value too long\n");
737 return -EINVAL;
738 }
739
740 /* get the address of statistic */
741 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
742
743 /* get the second DW of statistics ;
744 * two 16-bit words - first is length, second is count */
745 field_info =
746 ipw_read_reg32(priv,
747 priv->table2_addr + (ord << 3) +
748 sizeof(u32));
749
750 /* get each entry length */
751 field_len = *((u16 *) & field_info);
752
753 /* get number of entries */
754 field_count = *(((u16 *) & field_info) + 1);
755
756 /* abort if not enought memory */
757 total_len = field_len * field_count;
758 if (total_len > *len) {
759 *len = total_len;
760 return -EINVAL;
761 }
762
763 *len = total_len;
764 if (!total_len)
765 return 0;
766
767 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768 "field_info = 0x%08x\n",
769 addr, total_len, field_info);
770 ipw_read_indirect(priv, addr, val, total_len);
771 break;
772
773 default:
774 IPW_DEBUG_ORD("Invalid ordinal!\n");
775 return -EINVAL;
776
777 }
778
779 return 0;
780 }
781
782 static void ipw_init_ordinals(struct ipw_priv *priv)
783 {
784 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785 priv->table0_len = ipw_read32(priv, priv->table0_addr);
786
787 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788 priv->table0_addr, priv->table0_len);
789
790 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
792
793 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794 priv->table1_addr, priv->table1_len);
795
796 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798 priv->table2_len &= 0x0000ffff; /* use first two bytes */
799
800 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801 priv->table2_addr, priv->table2_len);
802
803 }
804
805 static u32 ipw_register_toggle(u32 reg)
806 {
807 reg &= ~IPW_START_STANDBY;
808 if (reg & IPW_GATE_ODMA)
809 reg &= ~IPW_GATE_ODMA;
810 if (reg & IPW_GATE_IDMA)
811 reg &= ~IPW_GATE_IDMA;
812 if (reg & IPW_GATE_ADMA)
813 reg &= ~IPW_GATE_ADMA;
814 return reg;
815 }
816
817 /*
818 * LED behavior:
819 * - On radio ON, turn on any LEDs that require to be on during start
820 * - On initialization, start unassociated blink
821 * - On association, disable unassociated blink
822 * - On disassociation, start unassociated blink
823 * - On radio OFF, turn off any LEDs started during radio on
824 *
825 */
826 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
827 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
829
830 static void ipw_led_link_on(struct ipw_priv *priv)
831 {
832 unsigned long flags;
833 u32 led;
834
835 /* If configured to not use LEDs, or nic_type is 1,
836 * then we don't toggle a LINK led */
837 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
838 return;
839
840 spin_lock_irqsave(&priv->lock, flags);
841
842 if (!(priv->status & STATUS_RF_KILL_MASK) &&
843 !(priv->status & STATUS_LED_LINK_ON)) {
844 IPW_DEBUG_LED("Link LED On\n");
845 led = ipw_read_reg32(priv, IPW_EVENT_REG);
846 led |= priv->led_association_on;
847
848 led = ipw_register_toggle(led);
849
850 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851 ipw_write_reg32(priv, IPW_EVENT_REG, led);
852
853 priv->status |= STATUS_LED_LINK_ON;
854
855 /* If we aren't associated, schedule turning the LED off */
856 if (!(priv->status & STATUS_ASSOCIATED))
857 queue_delayed_work(priv->workqueue,
858 &priv->led_link_off,
859 LD_TIME_LINK_ON);
860 }
861
862 spin_unlock_irqrestore(&priv->lock, flags);
863 }
864
865 static void ipw_bg_led_link_on(struct work_struct *work)
866 {
867 struct ipw_priv *priv =
868 container_of(work, struct ipw_priv, led_link_on.work);
869 mutex_lock(&priv->mutex);
870 ipw_led_link_on(priv);
871 mutex_unlock(&priv->mutex);
872 }
873
874 static void ipw_led_link_off(struct ipw_priv *priv)
875 {
876 unsigned long flags;
877 u32 led;
878
879 /* If configured not to use LEDs, or nic type is 1,
880 * then we don't goggle the LINK led. */
881 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
882 return;
883
884 spin_lock_irqsave(&priv->lock, flags);
885
886 if (priv->status & STATUS_LED_LINK_ON) {
887 led = ipw_read_reg32(priv, IPW_EVENT_REG);
888 led &= priv->led_association_off;
889 led = ipw_register_toggle(led);
890
891 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
892 ipw_write_reg32(priv, IPW_EVENT_REG, led);
893
894 IPW_DEBUG_LED("Link LED Off\n");
895
896 priv->status &= ~STATUS_LED_LINK_ON;
897
898 /* If we aren't associated and the radio is on, schedule
899 * turning the LED on (blink while unassociated) */
900 if (!(priv->status & STATUS_RF_KILL_MASK) &&
901 !(priv->status & STATUS_ASSOCIATED))
902 queue_delayed_work(priv->workqueue, &priv->led_link_on,
903 LD_TIME_LINK_OFF);
904
905 }
906
907 spin_unlock_irqrestore(&priv->lock, flags);
908 }
909
910 static void ipw_bg_led_link_off(struct work_struct *work)
911 {
912 struct ipw_priv *priv =
913 container_of(work, struct ipw_priv, led_link_off.work);
914 mutex_lock(&priv->mutex);
915 ipw_led_link_off(priv);
916 mutex_unlock(&priv->mutex);
917 }
918
919 static void __ipw_led_activity_on(struct ipw_priv *priv)
920 {
921 u32 led;
922
923 if (priv->config & CFG_NO_LED)
924 return;
925
926 if (priv->status & STATUS_RF_KILL_MASK)
927 return;
928
929 if (!(priv->status & STATUS_LED_ACT_ON)) {
930 led = ipw_read_reg32(priv, IPW_EVENT_REG);
931 led |= priv->led_activity_on;
932
933 led = ipw_register_toggle(led);
934
935 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
936 ipw_write_reg32(priv, IPW_EVENT_REG, led);
937
938 IPW_DEBUG_LED("Activity LED On\n");
939
940 priv->status |= STATUS_LED_ACT_ON;
941
942 cancel_delayed_work(&priv->led_act_off);
943 queue_delayed_work(priv->workqueue, &priv->led_act_off,
944 LD_TIME_ACT_ON);
945 } else {
946 /* Reschedule LED off for full time period */
947 cancel_delayed_work(&priv->led_act_off);
948 queue_delayed_work(priv->workqueue, &priv->led_act_off,
949 LD_TIME_ACT_ON);
950 }
951 }
952
953 #if 0
954 void ipw_led_activity_on(struct ipw_priv *priv)
955 {
956 unsigned long flags;
957 spin_lock_irqsave(&priv->lock, flags);
958 __ipw_led_activity_on(priv);
959 spin_unlock_irqrestore(&priv->lock, flags);
960 }
961 #endif /* 0 */
962
963 static void ipw_led_activity_off(struct ipw_priv *priv)
964 {
965 unsigned long flags;
966 u32 led;
967
968 if (priv->config & CFG_NO_LED)
969 return;
970
971 spin_lock_irqsave(&priv->lock, flags);
972
973 if (priv->status & STATUS_LED_ACT_ON) {
974 led = ipw_read_reg32(priv, IPW_EVENT_REG);
975 led &= priv->led_activity_off;
976
977 led = ipw_register_toggle(led);
978
979 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
980 ipw_write_reg32(priv, IPW_EVENT_REG, led);
981
982 IPW_DEBUG_LED("Activity LED Off\n");
983
984 priv->status &= ~STATUS_LED_ACT_ON;
985 }
986
987 spin_unlock_irqrestore(&priv->lock, flags);
988 }
989
990 static void ipw_bg_led_activity_off(struct work_struct *work)
991 {
992 struct ipw_priv *priv =
993 container_of(work, struct ipw_priv, led_act_off.work);
994 mutex_lock(&priv->mutex);
995 ipw_led_activity_off(priv);
996 mutex_unlock(&priv->mutex);
997 }
998
999 static void ipw_led_band_on(struct ipw_priv *priv)
1000 {
1001 unsigned long flags;
1002 u32 led;
1003
1004 /* Only nic type 1 supports mode LEDs */
1005 if (priv->config & CFG_NO_LED ||
1006 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1007 return;
1008
1009 spin_lock_irqsave(&priv->lock, flags);
1010
1011 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1012 if (priv->assoc_network->mode == IEEE_A) {
1013 led |= priv->led_ofdm_on;
1014 led &= priv->led_association_off;
1015 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1016 } else if (priv->assoc_network->mode == IEEE_G) {
1017 led |= priv->led_ofdm_on;
1018 led |= priv->led_association_on;
1019 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1020 } else {
1021 led &= priv->led_ofdm_off;
1022 led |= priv->led_association_on;
1023 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1024 }
1025
1026 led = ipw_register_toggle(led);
1027
1028 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1029 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1030
1031 spin_unlock_irqrestore(&priv->lock, flags);
1032 }
1033
1034 static void ipw_led_band_off(struct ipw_priv *priv)
1035 {
1036 unsigned long flags;
1037 u32 led;
1038
1039 /* Only nic type 1 supports mode LEDs */
1040 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1041 return;
1042
1043 spin_lock_irqsave(&priv->lock, flags);
1044
1045 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1046 led &= priv->led_ofdm_off;
1047 led &= priv->led_association_off;
1048
1049 led = ipw_register_toggle(led);
1050
1051 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1052 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1053
1054 spin_unlock_irqrestore(&priv->lock, flags);
1055 }
1056
1057 static void ipw_led_radio_on(struct ipw_priv *priv)
1058 {
1059 ipw_led_link_on(priv);
1060 }
1061
1062 static void ipw_led_radio_off(struct ipw_priv *priv)
1063 {
1064 ipw_led_activity_off(priv);
1065 ipw_led_link_off(priv);
1066 }
1067
1068 static void ipw_led_link_up(struct ipw_priv *priv)
1069 {
1070 /* Set the Link Led on for all nic types */
1071 ipw_led_link_on(priv);
1072 }
1073
1074 static void ipw_led_link_down(struct ipw_priv *priv)
1075 {
1076 ipw_led_activity_off(priv);
1077 ipw_led_link_off(priv);
1078
1079 if (priv->status & STATUS_RF_KILL_MASK)
1080 ipw_led_radio_off(priv);
1081 }
1082
1083 static void ipw_led_init(struct ipw_priv *priv)
1084 {
1085 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1086
1087 /* Set the default PINs for the link and activity leds */
1088 priv->led_activity_on = IPW_ACTIVITY_LED;
1089 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1090
1091 priv->led_association_on = IPW_ASSOCIATED_LED;
1092 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1093
1094 /* Set the default PINs for the OFDM leds */
1095 priv->led_ofdm_on = IPW_OFDM_LED;
1096 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1097
1098 switch (priv->nic_type) {
1099 case EEPROM_NIC_TYPE_1:
1100 /* In this NIC type, the LEDs are reversed.... */
1101 priv->led_activity_on = IPW_ASSOCIATED_LED;
1102 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1103 priv->led_association_on = IPW_ACTIVITY_LED;
1104 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1105
1106 if (!(priv->config & CFG_NO_LED))
1107 ipw_led_band_on(priv);
1108
1109 /* And we don't blink link LEDs for this nic, so
1110 * just return here */
1111 return;
1112
1113 case EEPROM_NIC_TYPE_3:
1114 case EEPROM_NIC_TYPE_2:
1115 case EEPROM_NIC_TYPE_4:
1116 case EEPROM_NIC_TYPE_0:
1117 break;
1118
1119 default:
1120 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1121 priv->nic_type);
1122 priv->nic_type = EEPROM_NIC_TYPE_0;
1123 break;
1124 }
1125
1126 if (!(priv->config & CFG_NO_LED)) {
1127 if (priv->status & STATUS_ASSOCIATED)
1128 ipw_led_link_on(priv);
1129 else
1130 ipw_led_link_off(priv);
1131 }
1132 }
1133
1134 static void ipw_led_shutdown(struct ipw_priv *priv)
1135 {
1136 ipw_led_activity_off(priv);
1137 ipw_led_link_off(priv);
1138 ipw_led_band_off(priv);
1139 cancel_delayed_work(&priv->led_link_on);
1140 cancel_delayed_work(&priv->led_link_off);
1141 cancel_delayed_work(&priv->led_act_off);
1142 }
1143
1144 /*
1145 * The following adds a new attribute to the sysfs representation
1146 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1147 * used for controling the debug level.
1148 *
1149 * See the level definitions in ipw for details.
1150 */
1151 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1152 {
1153 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1154 }
1155
1156 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1157 size_t count)
1158 {
1159 char *p = (char *)buf;
1160 u32 val;
1161
1162 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1163 p++;
1164 if (p[0] == 'x' || p[0] == 'X')
1165 p++;
1166 val = simple_strtoul(p, &p, 16);
1167 } else
1168 val = simple_strtoul(p, &p, 10);
1169 if (p == buf)
1170 printk(KERN_INFO DRV_NAME
1171 ": %s is not in hex or decimal form.\n", buf);
1172 else
1173 ipw_debug_level = val;
1174
1175 return strnlen(buf, count);
1176 }
1177
1178 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1179 show_debug_level, store_debug_level);
1180
1181 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1182 {
1183 /* length = 1st dword in log */
1184 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1185 }
1186
1187 static void ipw_capture_event_log(struct ipw_priv *priv,
1188 u32 log_len, struct ipw_event *log)
1189 {
1190 u32 base;
1191
1192 if (log_len) {
1193 base = ipw_read32(priv, IPW_EVENT_LOG);
1194 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1195 (u8 *) log, sizeof(*log) * log_len);
1196 }
1197 }
1198
1199 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1200 {
1201 struct ipw_fw_error *error;
1202 u32 log_len = ipw_get_event_log_len(priv);
1203 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1204 u32 elem_len = ipw_read_reg32(priv, base);
1205
1206 error = kmalloc(sizeof(*error) +
1207 sizeof(*error->elem) * elem_len +
1208 sizeof(*error->log) * log_len, GFP_ATOMIC);
1209 if (!error) {
1210 IPW_ERROR("Memory allocation for firmware error log "
1211 "failed.\n");
1212 return NULL;
1213 }
1214 error->jiffies = jiffies;
1215 error->status = priv->status;
1216 error->config = priv->config;
1217 error->elem_len = elem_len;
1218 error->log_len = log_len;
1219 error->elem = (struct ipw_error_elem *)error->payload;
1220 error->log = (struct ipw_event *)(error->elem + elem_len);
1221
1222 ipw_capture_event_log(priv, log_len, error->log);
1223
1224 if (elem_len)
1225 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1226 sizeof(*error->elem) * elem_len);
1227
1228 return error;
1229 }
1230
1231 static ssize_t show_event_log(struct device *d,
1232 struct device_attribute *attr, char *buf)
1233 {
1234 struct ipw_priv *priv = dev_get_drvdata(d);
1235 u32 log_len = ipw_get_event_log_len(priv);
1236 struct ipw_event log[log_len];
1237 u32 len = 0, i;
1238
1239 ipw_capture_event_log(priv, log_len, log);
1240
1241 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1242 for (i = 0; i < log_len; i++)
1243 len += snprintf(buf + len, PAGE_SIZE - len,
1244 "\n%08X%08X%08X",
1245 log[i].time, log[i].event, log[i].data);
1246 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1247 return len;
1248 }
1249
1250 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1251
1252 static ssize_t show_error(struct device *d,
1253 struct device_attribute *attr, char *buf)
1254 {
1255 struct ipw_priv *priv = dev_get_drvdata(d);
1256 u32 len = 0, i;
1257 if (!priv->error)
1258 return 0;
1259 len += snprintf(buf + len, PAGE_SIZE - len,
1260 "%08lX%08X%08X%08X",
1261 priv->error->jiffies,
1262 priv->error->status,
1263 priv->error->config, priv->error->elem_len);
1264 for (i = 0; i < priv->error->elem_len; i++)
1265 len += snprintf(buf + len, PAGE_SIZE - len,
1266 "\n%08X%08X%08X%08X%08X%08X%08X",
1267 priv->error->elem[i].time,
1268 priv->error->elem[i].desc,
1269 priv->error->elem[i].blink1,
1270 priv->error->elem[i].blink2,
1271 priv->error->elem[i].link1,
1272 priv->error->elem[i].link2,
1273 priv->error->elem[i].data);
1274
1275 len += snprintf(buf + len, PAGE_SIZE - len,
1276 "\n%08X", priv->error->log_len);
1277 for (i = 0; i < priv->error->log_len; i++)
1278 len += snprintf(buf + len, PAGE_SIZE - len,
1279 "\n%08X%08X%08X",
1280 priv->error->log[i].time,
1281 priv->error->log[i].event,
1282 priv->error->log[i].data);
1283 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1284 return len;
1285 }
1286
1287 static ssize_t clear_error(struct device *d,
1288 struct device_attribute *attr,
1289 const char *buf, size_t count)
1290 {
1291 struct ipw_priv *priv = dev_get_drvdata(d);
1292
1293 kfree(priv->error);
1294 priv->error = NULL;
1295 return count;
1296 }
1297
1298 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1299
1300 static ssize_t show_cmd_log(struct device *d,
1301 struct device_attribute *attr, char *buf)
1302 {
1303 struct ipw_priv *priv = dev_get_drvdata(d);
1304 u32 len = 0, i;
1305 if (!priv->cmdlog)
1306 return 0;
1307 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1308 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1309 i = (i + 1) % priv->cmdlog_len) {
1310 len +=
1311 snprintf(buf + len, PAGE_SIZE - len,
1312 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1313 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1314 priv->cmdlog[i].cmd.len);
1315 len +=
1316 snprintk_buf(buf + len, PAGE_SIZE - len,
1317 (u8 *) priv->cmdlog[i].cmd.param,
1318 priv->cmdlog[i].cmd.len);
1319 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1320 }
1321 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1322 return len;
1323 }
1324
1325 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1326
1327 #ifdef CONFIG_IPW2200_PROMISCUOUS
1328 static void ipw_prom_free(struct ipw_priv *priv);
1329 static int ipw_prom_alloc(struct ipw_priv *priv);
1330 static ssize_t store_rtap_iface(struct device *d,
1331 struct device_attribute *attr,
1332 const char *buf, size_t count)
1333 {
1334 struct ipw_priv *priv = dev_get_drvdata(d);
1335 int rc = 0;
1336
1337 if (count < 1)
1338 return -EINVAL;
1339
1340 switch (buf[0]) {
1341 case '0':
1342 if (!rtap_iface)
1343 return count;
1344
1345 if (netif_running(priv->prom_net_dev)) {
1346 IPW_WARNING("Interface is up. Cannot unregister.\n");
1347 return count;
1348 }
1349
1350 ipw_prom_free(priv);
1351 rtap_iface = 0;
1352 break;
1353
1354 case '1':
1355 if (rtap_iface)
1356 return count;
1357
1358 rc = ipw_prom_alloc(priv);
1359 if (!rc)
1360 rtap_iface = 1;
1361 break;
1362
1363 default:
1364 return -EINVAL;
1365 }
1366
1367 if (rc) {
1368 IPW_ERROR("Failed to register promiscuous network "
1369 "device (error %d).\n", rc);
1370 }
1371
1372 return count;
1373 }
1374
1375 static ssize_t show_rtap_iface(struct device *d,
1376 struct device_attribute *attr,
1377 char *buf)
1378 {
1379 struct ipw_priv *priv = dev_get_drvdata(d);
1380 if (rtap_iface)
1381 return sprintf(buf, "%s", priv->prom_net_dev->name);
1382 else {
1383 buf[0] = '-';
1384 buf[1] = '1';
1385 buf[2] = '\0';
1386 return 3;
1387 }
1388 }
1389
1390 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1391 store_rtap_iface);
1392
1393 static ssize_t store_rtap_filter(struct device *d,
1394 struct device_attribute *attr,
1395 const char *buf, size_t count)
1396 {
1397 struct ipw_priv *priv = dev_get_drvdata(d);
1398
1399 if (!priv->prom_priv) {
1400 IPW_ERROR("Attempting to set filter without "
1401 "rtap_iface enabled.\n");
1402 return -EPERM;
1403 }
1404
1405 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1406
1407 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1408 BIT_ARG16(priv->prom_priv->filter));
1409
1410 return count;
1411 }
1412
1413 static ssize_t show_rtap_filter(struct device *d,
1414 struct device_attribute *attr,
1415 char *buf)
1416 {
1417 struct ipw_priv *priv = dev_get_drvdata(d);
1418 return sprintf(buf, "0x%04X",
1419 priv->prom_priv ? priv->prom_priv->filter : 0);
1420 }
1421
1422 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1423 store_rtap_filter);
1424 #endif
1425
1426 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1427 char *buf)
1428 {
1429 struct ipw_priv *priv = dev_get_drvdata(d);
1430 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1431 }
1432
1433 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1434 const char *buf, size_t count)
1435 {
1436 struct ipw_priv *priv = dev_get_drvdata(d);
1437 struct net_device *dev = priv->net_dev;
1438 char buffer[] = "00000000";
1439 unsigned long len =
1440 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1441 unsigned long val;
1442 char *p = buffer;
1443
1444 IPW_DEBUG_INFO("enter\n");
1445
1446 strncpy(buffer, buf, len);
1447 buffer[len] = 0;
1448
1449 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1450 p++;
1451 if (p[0] == 'x' || p[0] == 'X')
1452 p++;
1453 val = simple_strtoul(p, &p, 16);
1454 } else
1455 val = simple_strtoul(p, &p, 10);
1456 if (p == buffer) {
1457 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1458 } else {
1459 priv->ieee->scan_age = val;
1460 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1461 }
1462
1463 IPW_DEBUG_INFO("exit\n");
1464 return len;
1465 }
1466
1467 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1468
1469 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1470 char *buf)
1471 {
1472 struct ipw_priv *priv = dev_get_drvdata(d);
1473 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1474 }
1475
1476 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1477 const char *buf, size_t count)
1478 {
1479 struct ipw_priv *priv = dev_get_drvdata(d);
1480
1481 IPW_DEBUG_INFO("enter\n");
1482
1483 if (count == 0)
1484 return 0;
1485
1486 if (*buf == 0) {
1487 IPW_DEBUG_LED("Disabling LED control.\n");
1488 priv->config |= CFG_NO_LED;
1489 ipw_led_shutdown(priv);
1490 } else {
1491 IPW_DEBUG_LED("Enabling LED control.\n");
1492 priv->config &= ~CFG_NO_LED;
1493 ipw_led_init(priv);
1494 }
1495
1496 IPW_DEBUG_INFO("exit\n");
1497 return count;
1498 }
1499
1500 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1501
1502 static ssize_t show_status(struct device *d,
1503 struct device_attribute *attr, char *buf)
1504 {
1505 struct ipw_priv *p = d->driver_data;
1506 return sprintf(buf, "0x%08x\n", (int)p->status);
1507 }
1508
1509 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1510
1511 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1512 char *buf)
1513 {
1514 struct ipw_priv *p = d->driver_data;
1515 return sprintf(buf, "0x%08x\n", (int)p->config);
1516 }
1517
1518 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1519
1520 static ssize_t show_nic_type(struct device *d,
1521 struct device_attribute *attr, char *buf)
1522 {
1523 struct ipw_priv *priv = d->driver_data;
1524 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1525 }
1526
1527 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1528
1529 static ssize_t show_ucode_version(struct device *d,
1530 struct device_attribute *attr, char *buf)
1531 {
1532 u32 len = sizeof(u32), tmp = 0;
1533 struct ipw_priv *p = d->driver_data;
1534
1535 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1536 return 0;
1537
1538 return sprintf(buf, "0x%08x\n", tmp);
1539 }
1540
1541 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1542
1543 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1544 char *buf)
1545 {
1546 u32 len = sizeof(u32), tmp = 0;
1547 struct ipw_priv *p = d->driver_data;
1548
1549 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1550 return 0;
1551
1552 return sprintf(buf, "0x%08x\n", tmp);
1553 }
1554
1555 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1556
1557 /*
1558 * Add a device attribute to view/control the delay between eeprom
1559 * operations.
1560 */
1561 static ssize_t show_eeprom_delay(struct device *d,
1562 struct device_attribute *attr, char *buf)
1563 {
1564 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1565 return sprintf(buf, "%i\n", n);
1566 }
1567 static ssize_t store_eeprom_delay(struct device *d,
1568 struct device_attribute *attr,
1569 const char *buf, size_t count)
1570 {
1571 struct ipw_priv *p = d->driver_data;
1572 sscanf(buf, "%i", &p->eeprom_delay);
1573 return strnlen(buf, count);
1574 }
1575
1576 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1577 show_eeprom_delay, store_eeprom_delay);
1578
1579 static ssize_t show_command_event_reg(struct device *d,
1580 struct device_attribute *attr, char *buf)
1581 {
1582 u32 reg = 0;
1583 struct ipw_priv *p = d->driver_data;
1584
1585 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1586 return sprintf(buf, "0x%08x\n", reg);
1587 }
1588 static ssize_t store_command_event_reg(struct device *d,
1589 struct device_attribute *attr,
1590 const char *buf, size_t count)
1591 {
1592 u32 reg;
1593 struct ipw_priv *p = d->driver_data;
1594
1595 sscanf(buf, "%x", &reg);
1596 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1597 return strnlen(buf, count);
1598 }
1599
1600 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1601 show_command_event_reg, store_command_event_reg);
1602
1603 static ssize_t show_mem_gpio_reg(struct device *d,
1604 struct device_attribute *attr, char *buf)
1605 {
1606 u32 reg = 0;
1607 struct ipw_priv *p = d->driver_data;
1608
1609 reg = ipw_read_reg32(p, 0x301100);
1610 return sprintf(buf, "0x%08x\n", reg);
1611 }
1612 static ssize_t store_mem_gpio_reg(struct device *d,
1613 struct device_attribute *attr,
1614 const char *buf, size_t count)
1615 {
1616 u32 reg;
1617 struct ipw_priv *p = d->driver_data;
1618
1619 sscanf(buf, "%x", &reg);
1620 ipw_write_reg32(p, 0x301100, reg);
1621 return strnlen(buf, count);
1622 }
1623
1624 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1625 show_mem_gpio_reg, store_mem_gpio_reg);
1626
1627 static ssize_t show_indirect_dword(struct device *d,
1628 struct device_attribute *attr, char *buf)
1629 {
1630 u32 reg = 0;
1631 struct ipw_priv *priv = d->driver_data;
1632
1633 if (priv->status & STATUS_INDIRECT_DWORD)
1634 reg = ipw_read_reg32(priv, priv->indirect_dword);
1635 else
1636 reg = 0;
1637
1638 return sprintf(buf, "0x%08x\n", reg);
1639 }
1640 static ssize_t store_indirect_dword(struct device *d,
1641 struct device_attribute *attr,
1642 const char *buf, size_t count)
1643 {
1644 struct ipw_priv *priv = d->driver_data;
1645
1646 sscanf(buf, "%x", &priv->indirect_dword);
1647 priv->status |= STATUS_INDIRECT_DWORD;
1648 return strnlen(buf, count);
1649 }
1650
1651 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1652 show_indirect_dword, store_indirect_dword);
1653
1654 static ssize_t show_indirect_byte(struct device *d,
1655 struct device_attribute *attr, char *buf)
1656 {
1657 u8 reg = 0;
1658 struct ipw_priv *priv = d->driver_data;
1659
1660 if (priv->status & STATUS_INDIRECT_BYTE)
1661 reg = ipw_read_reg8(priv, priv->indirect_byte);
1662 else
1663 reg = 0;
1664
1665 return sprintf(buf, "0x%02x\n", reg);
1666 }
1667 static ssize_t store_indirect_byte(struct device *d,
1668 struct device_attribute *attr,
1669 const char *buf, size_t count)
1670 {
1671 struct ipw_priv *priv = d->driver_data;
1672
1673 sscanf(buf, "%x", &priv->indirect_byte);
1674 priv->status |= STATUS_INDIRECT_BYTE;
1675 return strnlen(buf, count);
1676 }
1677
1678 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1679 show_indirect_byte, store_indirect_byte);
1680
1681 static ssize_t show_direct_dword(struct device *d,
1682 struct device_attribute *attr, char *buf)
1683 {
1684 u32 reg = 0;
1685 struct ipw_priv *priv = d->driver_data;
1686
1687 if (priv->status & STATUS_DIRECT_DWORD)
1688 reg = ipw_read32(priv, priv->direct_dword);
1689 else
1690 reg = 0;
1691
1692 return sprintf(buf, "0x%08x\n", reg);
1693 }
1694 static ssize_t store_direct_dword(struct device *d,
1695 struct device_attribute *attr,
1696 const char *buf, size_t count)
1697 {
1698 struct ipw_priv *priv = d->driver_data;
1699
1700 sscanf(buf, "%x", &priv->direct_dword);
1701 priv->status |= STATUS_DIRECT_DWORD;
1702 return strnlen(buf, count);
1703 }
1704
1705 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1706 show_direct_dword, store_direct_dword);
1707
1708 static int rf_kill_active(struct ipw_priv *priv)
1709 {
1710 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1711 priv->status |= STATUS_RF_KILL_HW;
1712 else
1713 priv->status &= ~STATUS_RF_KILL_HW;
1714
1715 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1716 }
1717
1718 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1719 char *buf)
1720 {
1721 /* 0 - RF kill not enabled
1722 1 - SW based RF kill active (sysfs)
1723 2 - HW based RF kill active
1724 3 - Both HW and SW baed RF kill active */
1725 struct ipw_priv *priv = d->driver_data;
1726 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1727 (rf_kill_active(priv) ? 0x2 : 0x0);
1728 return sprintf(buf, "%i\n", val);
1729 }
1730
1731 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1732 {
1733 if ((disable_radio ? 1 : 0) ==
1734 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1735 return 0;
1736
1737 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1738 disable_radio ? "OFF" : "ON");
1739
1740 if (disable_radio) {
1741 priv->status |= STATUS_RF_KILL_SW;
1742
1743 if (priv->workqueue)
1744 cancel_delayed_work(&priv->request_scan);
1745 queue_work(priv->workqueue, &priv->down);
1746 } else {
1747 priv->status &= ~STATUS_RF_KILL_SW;
1748 if (rf_kill_active(priv)) {
1749 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1750 "disabled by HW switch\n");
1751 /* Make sure the RF_KILL check timer is running */
1752 cancel_delayed_work(&priv->rf_kill);
1753 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1754 round_jiffies(2 * HZ));
1755 } else
1756 queue_work(priv->workqueue, &priv->up);
1757 }
1758
1759 return 1;
1760 }
1761
1762 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1763 const char *buf, size_t count)
1764 {
1765 struct ipw_priv *priv = d->driver_data;
1766
1767 ipw_radio_kill_sw(priv, buf[0] == '1');
1768
1769 return count;
1770 }
1771
1772 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1773
1774 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1775 char *buf)
1776 {
1777 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1778 int pos = 0, len = 0;
1779 if (priv->config & CFG_SPEED_SCAN) {
1780 while (priv->speed_scan[pos] != 0)
1781 len += sprintf(&buf[len], "%d ",
1782 priv->speed_scan[pos++]);
1783 return len + sprintf(&buf[len], "\n");
1784 }
1785
1786 return sprintf(buf, "0\n");
1787 }
1788
1789 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1790 const char *buf, size_t count)
1791 {
1792 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1793 int channel, pos = 0;
1794 const char *p = buf;
1795
1796 /* list of space separated channels to scan, optionally ending with 0 */
1797 while ((channel = simple_strtol(p, NULL, 0))) {
1798 if (pos == MAX_SPEED_SCAN - 1) {
1799 priv->speed_scan[pos] = 0;
1800 break;
1801 }
1802
1803 if (ieee80211_is_valid_channel(priv->ieee, channel))
1804 priv->speed_scan[pos++] = channel;
1805 else
1806 IPW_WARNING("Skipping invalid channel request: %d\n",
1807 channel);
1808 p = strchr(p, ' ');
1809 if (!p)
1810 break;
1811 while (*p == ' ' || *p == '\t')
1812 p++;
1813 }
1814
1815 if (pos == 0)
1816 priv->config &= ~CFG_SPEED_SCAN;
1817 else {
1818 priv->speed_scan_pos = 0;
1819 priv->config |= CFG_SPEED_SCAN;
1820 }
1821
1822 return count;
1823 }
1824
1825 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1826 store_speed_scan);
1827
1828 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1829 char *buf)
1830 {
1831 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1832 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1833 }
1834
1835 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1836 const char *buf, size_t count)
1837 {
1838 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1839 if (buf[0] == '1')
1840 priv->config |= CFG_NET_STATS;
1841 else
1842 priv->config &= ~CFG_NET_STATS;
1843
1844 return count;
1845 }
1846
1847 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1848 show_net_stats, store_net_stats);
1849
1850 static ssize_t show_channels(struct device *d,
1851 struct device_attribute *attr,
1852 char *buf)
1853 {
1854 struct ipw_priv *priv = dev_get_drvdata(d);
1855 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
1856 int len = 0, i;
1857
1858 len = sprintf(&buf[len],
1859 "Displaying %d channels in 2.4Ghz band "
1860 "(802.11bg):\n", geo->bg_channels);
1861
1862 for (i = 0; i < geo->bg_channels; i++) {
1863 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1864 geo->bg[i].channel,
1865 geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT ?
1866 " (radar spectrum)" : "",
1867 ((geo->bg[i].flags & IEEE80211_CH_NO_IBSS) ||
1868 (geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT))
1869 ? "" : ", IBSS",
1870 geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1871 "passive only" : "active/passive",
1872 geo->bg[i].flags & IEEE80211_CH_B_ONLY ?
1873 "B" : "B/G");
1874 }
1875
1876 len += sprintf(&buf[len],
1877 "Displaying %d channels in 5.2Ghz band "
1878 "(802.11a):\n", geo->a_channels);
1879 for (i = 0; i < geo->a_channels; i++) {
1880 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1881 geo->a[i].channel,
1882 geo->a[i].flags & IEEE80211_CH_RADAR_DETECT ?
1883 " (radar spectrum)" : "",
1884 ((geo->a[i].flags & IEEE80211_CH_NO_IBSS) ||
1885 (geo->a[i].flags & IEEE80211_CH_RADAR_DETECT))
1886 ? "" : ", IBSS",
1887 geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1888 "passive only" : "active/passive");
1889 }
1890
1891 return len;
1892 }
1893
1894 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1895
1896 static void notify_wx_assoc_event(struct ipw_priv *priv)
1897 {
1898 union iwreq_data wrqu;
1899 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1900 if (priv->status & STATUS_ASSOCIATED)
1901 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1902 else
1903 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1904 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1905 }
1906
1907 static void ipw_irq_tasklet(struct ipw_priv *priv)
1908 {
1909 u32 inta, inta_mask, handled = 0;
1910 unsigned long flags;
1911 int rc = 0;
1912
1913 spin_lock_irqsave(&priv->irq_lock, flags);
1914
1915 inta = ipw_read32(priv, IPW_INTA_RW);
1916 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1917 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1918
1919 /* Add any cached INTA values that need to be handled */
1920 inta |= priv->isr_inta;
1921
1922 spin_unlock_irqrestore(&priv->irq_lock, flags);
1923
1924 spin_lock_irqsave(&priv->lock, flags);
1925
1926 /* handle all the justifications for the interrupt */
1927 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1928 ipw_rx(priv);
1929 handled |= IPW_INTA_BIT_RX_TRANSFER;
1930 }
1931
1932 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1933 IPW_DEBUG_HC("Command completed.\n");
1934 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1935 priv->status &= ~STATUS_HCMD_ACTIVE;
1936 wake_up_interruptible(&priv->wait_command_queue);
1937 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1938 }
1939
1940 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1941 IPW_DEBUG_TX("TX_QUEUE_1\n");
1942 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1943 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1944 }
1945
1946 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1947 IPW_DEBUG_TX("TX_QUEUE_2\n");
1948 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1949 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1950 }
1951
1952 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1953 IPW_DEBUG_TX("TX_QUEUE_3\n");
1954 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1955 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1956 }
1957
1958 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1959 IPW_DEBUG_TX("TX_QUEUE_4\n");
1960 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1961 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1962 }
1963
1964 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1965 IPW_WARNING("STATUS_CHANGE\n");
1966 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1967 }
1968
1969 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1970 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1971 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1972 }
1973
1974 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1975 IPW_WARNING("HOST_CMD_DONE\n");
1976 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1977 }
1978
1979 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1980 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1981 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1982 }
1983
1984 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1985 IPW_WARNING("PHY_OFF_DONE\n");
1986 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1987 }
1988
1989 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1990 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1991 priv->status |= STATUS_RF_KILL_HW;
1992 wake_up_interruptible(&priv->wait_command_queue);
1993 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1994 cancel_delayed_work(&priv->request_scan);
1995 schedule_work(&priv->link_down);
1996 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1997 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1998 }
1999
2000 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2001 IPW_WARNING("Firmware error detected. Restarting.\n");
2002 if (priv->error) {
2003 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2004 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2005 struct ipw_fw_error *error =
2006 ipw_alloc_error_log(priv);
2007 ipw_dump_error_log(priv, error);
2008 kfree(error);
2009 }
2010 } else {
2011 priv->error = ipw_alloc_error_log(priv);
2012 if (priv->error)
2013 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2014 else
2015 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2016 "log.\n");
2017 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2018 ipw_dump_error_log(priv, priv->error);
2019 }
2020
2021 /* XXX: If hardware encryption is for WPA/WPA2,
2022 * we have to notify the supplicant. */
2023 if (priv->ieee->sec.encrypt) {
2024 priv->status &= ~STATUS_ASSOCIATED;
2025 notify_wx_assoc_event(priv);
2026 }
2027
2028 /* Keep the restart process from trying to send host
2029 * commands by clearing the INIT status bit */
2030 priv->status &= ~STATUS_INIT;
2031
2032 /* Cancel currently queued command. */
2033 priv->status &= ~STATUS_HCMD_ACTIVE;
2034 wake_up_interruptible(&priv->wait_command_queue);
2035
2036 queue_work(priv->workqueue, &priv->adapter_restart);
2037 handled |= IPW_INTA_BIT_FATAL_ERROR;
2038 }
2039
2040 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2041 IPW_ERROR("Parity error\n");
2042 handled |= IPW_INTA_BIT_PARITY_ERROR;
2043 }
2044
2045 if (handled != inta) {
2046 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2047 }
2048
2049 spin_unlock_irqrestore(&priv->lock, flags);
2050
2051 /* enable all interrupts */
2052 ipw_enable_interrupts(priv);
2053 }
2054
2055 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2056 static char *get_cmd_string(u8 cmd)
2057 {
2058 switch (cmd) {
2059 IPW_CMD(HOST_COMPLETE);
2060 IPW_CMD(POWER_DOWN);
2061 IPW_CMD(SYSTEM_CONFIG);
2062 IPW_CMD(MULTICAST_ADDRESS);
2063 IPW_CMD(SSID);
2064 IPW_CMD(ADAPTER_ADDRESS);
2065 IPW_CMD(PORT_TYPE);
2066 IPW_CMD(RTS_THRESHOLD);
2067 IPW_CMD(FRAG_THRESHOLD);
2068 IPW_CMD(POWER_MODE);
2069 IPW_CMD(WEP_KEY);
2070 IPW_CMD(TGI_TX_KEY);
2071 IPW_CMD(SCAN_REQUEST);
2072 IPW_CMD(SCAN_REQUEST_EXT);
2073 IPW_CMD(ASSOCIATE);
2074 IPW_CMD(SUPPORTED_RATES);
2075 IPW_CMD(SCAN_ABORT);
2076 IPW_CMD(TX_FLUSH);
2077 IPW_CMD(QOS_PARAMETERS);
2078 IPW_CMD(DINO_CONFIG);
2079 IPW_CMD(RSN_CAPABILITIES);
2080 IPW_CMD(RX_KEY);
2081 IPW_CMD(CARD_DISABLE);
2082 IPW_CMD(SEED_NUMBER);
2083 IPW_CMD(TX_POWER);
2084 IPW_CMD(COUNTRY_INFO);
2085 IPW_CMD(AIRONET_INFO);
2086 IPW_CMD(AP_TX_POWER);
2087 IPW_CMD(CCKM_INFO);
2088 IPW_CMD(CCX_VER_INFO);
2089 IPW_CMD(SET_CALIBRATION);
2090 IPW_CMD(SENSITIVITY_CALIB);
2091 IPW_CMD(RETRY_LIMIT);
2092 IPW_CMD(IPW_PRE_POWER_DOWN);
2093 IPW_CMD(VAP_BEACON_TEMPLATE);
2094 IPW_CMD(VAP_DTIM_PERIOD);
2095 IPW_CMD(EXT_SUPPORTED_RATES);
2096 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2097 IPW_CMD(VAP_QUIET_INTERVALS);
2098 IPW_CMD(VAP_CHANNEL_SWITCH);
2099 IPW_CMD(VAP_MANDATORY_CHANNELS);
2100 IPW_CMD(VAP_CELL_PWR_LIMIT);
2101 IPW_CMD(VAP_CF_PARAM_SET);
2102 IPW_CMD(VAP_SET_BEACONING_STATE);
2103 IPW_CMD(MEASUREMENT);
2104 IPW_CMD(POWER_CAPABILITY);
2105 IPW_CMD(SUPPORTED_CHANNELS);
2106 IPW_CMD(TPC_REPORT);
2107 IPW_CMD(WME_INFO);
2108 IPW_CMD(PRODUCTION_COMMAND);
2109 default:
2110 return "UNKNOWN";
2111 }
2112 }
2113
2114 #define HOST_COMPLETE_TIMEOUT HZ
2115
2116 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2117 {
2118 int rc = 0;
2119 unsigned long flags;
2120
2121 spin_lock_irqsave(&priv->lock, flags);
2122 if (priv->status & STATUS_HCMD_ACTIVE) {
2123 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2124 get_cmd_string(cmd->cmd));
2125 spin_unlock_irqrestore(&priv->lock, flags);
2126 return -EAGAIN;
2127 }
2128
2129 priv->status |= STATUS_HCMD_ACTIVE;
2130
2131 if (priv->cmdlog) {
2132 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2133 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2134 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2135 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2136 cmd->len);
2137 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2138 }
2139
2140 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2141 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2142 priv->status);
2143
2144 #ifndef DEBUG_CMD_WEP_KEY
2145 if (cmd->cmd == IPW_CMD_WEP_KEY)
2146 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2147 else
2148 #endif
2149 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2150
2151 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2152 if (rc) {
2153 priv->status &= ~STATUS_HCMD_ACTIVE;
2154 IPW_ERROR("Failed to send %s: Reason %d\n",
2155 get_cmd_string(cmd->cmd), rc);
2156 spin_unlock_irqrestore(&priv->lock, flags);
2157 goto exit;
2158 }
2159 spin_unlock_irqrestore(&priv->lock, flags);
2160
2161 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2162 !(priv->
2163 status & STATUS_HCMD_ACTIVE),
2164 HOST_COMPLETE_TIMEOUT);
2165 if (rc == 0) {
2166 spin_lock_irqsave(&priv->lock, flags);
2167 if (priv->status & STATUS_HCMD_ACTIVE) {
2168 IPW_ERROR("Failed to send %s: Command timed out.\n",
2169 get_cmd_string(cmd->cmd));
2170 priv->status &= ~STATUS_HCMD_ACTIVE;
2171 spin_unlock_irqrestore(&priv->lock, flags);
2172 rc = -EIO;
2173 goto exit;
2174 }
2175 spin_unlock_irqrestore(&priv->lock, flags);
2176 } else
2177 rc = 0;
2178
2179 if (priv->status & STATUS_RF_KILL_HW) {
2180 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2181 get_cmd_string(cmd->cmd));
2182 rc = -EIO;
2183 goto exit;
2184 }
2185
2186 exit:
2187 if (priv->cmdlog) {
2188 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2189 priv->cmdlog_pos %= priv->cmdlog_len;
2190 }
2191 return rc;
2192 }
2193
2194 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2195 {
2196 struct host_cmd cmd = {
2197 .cmd = command,
2198 };
2199
2200 return __ipw_send_cmd(priv, &cmd);
2201 }
2202
2203 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2204 void *data)
2205 {
2206 struct host_cmd cmd = {
2207 .cmd = command,
2208 .len = len,
2209 .param = data,
2210 };
2211
2212 return __ipw_send_cmd(priv, &cmd);
2213 }
2214
2215 static int ipw_send_host_complete(struct ipw_priv *priv)
2216 {
2217 if (!priv) {
2218 IPW_ERROR("Invalid args\n");
2219 return -1;
2220 }
2221
2222 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2223 }
2224
2225 static int ipw_send_system_config(struct ipw_priv *priv)
2226 {
2227 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2228 sizeof(priv->sys_config),
2229 &priv->sys_config);
2230 }
2231
2232 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2233 {
2234 if (!priv || !ssid) {
2235 IPW_ERROR("Invalid args\n");
2236 return -1;
2237 }
2238
2239 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2240 ssid);
2241 }
2242
2243 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2244 {
2245 if (!priv || !mac) {
2246 IPW_ERROR("Invalid args\n");
2247 return -1;
2248 }
2249
2250 IPW_DEBUG_INFO("%s: Setting MAC to %s\n",
2251 priv->net_dev->name, print_mac(mac, mac));
2252
2253 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2254 }
2255
2256 /*
2257 * NOTE: This must be executed from our workqueue as it results in udelay
2258 * being called which may corrupt the keyboard if executed on default
2259 * workqueue
2260 */
2261 static void ipw_adapter_restart(void *adapter)
2262 {
2263 struct ipw_priv *priv = adapter;
2264
2265 if (priv->status & STATUS_RF_KILL_MASK)
2266 return;
2267
2268 ipw_down(priv);
2269
2270 if (priv->assoc_network &&
2271 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2272 ipw_remove_current_network(priv);
2273
2274 if (ipw_up(priv)) {
2275 IPW_ERROR("Failed to up device\n");
2276 return;
2277 }
2278 }
2279
2280 static void ipw_bg_adapter_restart(struct work_struct *work)
2281 {
2282 struct ipw_priv *priv =
2283 container_of(work, struct ipw_priv, adapter_restart);
2284 mutex_lock(&priv->mutex);
2285 ipw_adapter_restart(priv);
2286 mutex_unlock(&priv->mutex);
2287 }
2288
2289 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2290
2291 static void ipw_scan_check(void *data)
2292 {
2293 struct ipw_priv *priv = data;
2294 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2295 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2296 "adapter after (%dms).\n",
2297 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2298 queue_work(priv->workqueue, &priv->adapter_restart);
2299 }
2300 }
2301
2302 static void ipw_bg_scan_check(struct work_struct *work)
2303 {
2304 struct ipw_priv *priv =
2305 container_of(work, struct ipw_priv, scan_check.work);
2306 mutex_lock(&priv->mutex);
2307 ipw_scan_check(priv);
2308 mutex_unlock(&priv->mutex);
2309 }
2310
2311 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2312 struct ipw_scan_request_ext *request)
2313 {
2314 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2315 sizeof(*request), request);
2316 }
2317
2318 static int ipw_send_scan_abort(struct ipw_priv *priv)
2319 {
2320 if (!priv) {
2321 IPW_ERROR("Invalid args\n");
2322 return -1;
2323 }
2324
2325 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2326 }
2327
2328 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2329 {
2330 struct ipw_sensitivity_calib calib = {
2331 .beacon_rssi_raw = cpu_to_le16(sens),
2332 };
2333
2334 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2335 &calib);
2336 }
2337
2338 static int ipw_send_associate(struct ipw_priv *priv,
2339 struct ipw_associate *associate)
2340 {
2341 struct ipw_associate tmp_associate;
2342
2343 if (!priv || !associate) {
2344 IPW_ERROR("Invalid args\n");
2345 return -1;
2346 }
2347
2348 memcpy(&tmp_associate, associate, sizeof(*associate));
2349 tmp_associate.policy_support =
2350 cpu_to_le16(tmp_associate.policy_support);
2351 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2352 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2353 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2354 tmp_associate.listen_interval =
2355 cpu_to_le16(tmp_associate.listen_interval);
2356 tmp_associate.beacon_interval =
2357 cpu_to_le16(tmp_associate.beacon_interval);
2358 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2359
2360 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2361 &tmp_associate);
2362 }
2363
2364 static int ipw_send_supported_rates(struct ipw_priv *priv,
2365 struct ipw_supported_rates *rates)
2366 {
2367 if (!priv || !rates) {
2368 IPW_ERROR("Invalid args\n");
2369 return -1;
2370 }
2371
2372 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2373 rates);
2374 }
2375
2376 static int ipw_set_random_seed(struct ipw_priv *priv)
2377 {
2378 u32 val;
2379
2380 if (!priv) {
2381 IPW_ERROR("Invalid args\n");
2382 return -1;
2383 }
2384
2385 get_random_bytes(&val, sizeof(val));
2386
2387 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2388 }
2389
2390 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2391 {
2392 if (!priv) {
2393 IPW_ERROR("Invalid args\n");
2394 return -1;
2395 }
2396
2397 phy_off = cpu_to_le32(phy_off);
2398 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2399 &phy_off);
2400 }
2401
2402 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2403 {
2404 if (!priv || !power) {
2405 IPW_ERROR("Invalid args\n");
2406 return -1;
2407 }
2408
2409 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2410 }
2411
2412 static int ipw_set_tx_power(struct ipw_priv *priv)
2413 {
2414 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2415 struct ipw_tx_power tx_power;
2416 s8 max_power;
2417 int i;
2418
2419 memset(&tx_power, 0, sizeof(tx_power));
2420
2421 /* configure device for 'G' band */
2422 tx_power.ieee_mode = IPW_G_MODE;
2423 tx_power.num_channels = geo->bg_channels;
2424 for (i = 0; i < geo->bg_channels; i++) {
2425 max_power = geo->bg[i].max_power;
2426 tx_power.channels_tx_power[i].channel_number =
2427 geo->bg[i].channel;
2428 tx_power.channels_tx_power[i].tx_power = max_power ?
2429 min(max_power, priv->tx_power) : priv->tx_power;
2430 }
2431 if (ipw_send_tx_power(priv, &tx_power))
2432 return -EIO;
2433
2434 /* configure device to also handle 'B' band */
2435 tx_power.ieee_mode = IPW_B_MODE;
2436 if (ipw_send_tx_power(priv, &tx_power))
2437 return -EIO;
2438
2439 /* configure device to also handle 'A' band */
2440 if (priv->ieee->abg_true) {
2441 tx_power.ieee_mode = IPW_A_MODE;
2442 tx_power.num_channels = geo->a_channels;
2443 for (i = 0; i < tx_power.num_channels; i++) {
2444 max_power = geo->a[i].max_power;
2445 tx_power.channels_tx_power[i].channel_number =
2446 geo->a[i].channel;
2447 tx_power.channels_tx_power[i].tx_power = max_power ?
2448 min(max_power, priv->tx_power) : priv->tx_power;
2449 }
2450 if (ipw_send_tx_power(priv, &tx_power))
2451 return -EIO;
2452 }
2453 return 0;
2454 }
2455
2456 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2457 {
2458 struct ipw_rts_threshold rts_threshold = {
2459 .rts_threshold = cpu_to_le16(rts),
2460 };
2461
2462 if (!priv) {
2463 IPW_ERROR("Invalid args\n");
2464 return -1;
2465 }
2466
2467 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2468 sizeof(rts_threshold), &rts_threshold);
2469 }
2470
2471 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2472 {
2473 struct ipw_frag_threshold frag_threshold = {
2474 .frag_threshold = cpu_to_le16(frag),
2475 };
2476
2477 if (!priv) {
2478 IPW_ERROR("Invalid args\n");
2479 return -1;
2480 }
2481
2482 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2483 sizeof(frag_threshold), &frag_threshold);
2484 }
2485
2486 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2487 {
2488 u32 param;
2489
2490 if (!priv) {
2491 IPW_ERROR("Invalid args\n");
2492 return -1;
2493 }
2494
2495 /* If on battery, set to 3, if AC set to CAM, else user
2496 * level */
2497 switch (mode) {
2498 case IPW_POWER_BATTERY:
2499 param = IPW_POWER_INDEX_3;
2500 break;
2501 case IPW_POWER_AC:
2502 param = IPW_POWER_MODE_CAM;
2503 break;
2504 default:
2505 param = mode;
2506 break;
2507 }
2508
2509 param = cpu_to_le32(param);
2510 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2511 &param);
2512 }
2513
2514 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2515 {
2516 struct ipw_retry_limit retry_limit = {
2517 .short_retry_limit = slimit,
2518 .long_retry_limit = llimit
2519 };
2520
2521 if (!priv) {
2522 IPW_ERROR("Invalid args\n");
2523 return -1;
2524 }
2525
2526 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2527 &retry_limit);
2528 }
2529
2530 /*
2531 * The IPW device contains a Microwire compatible EEPROM that stores
2532 * various data like the MAC address. Usually the firmware has exclusive
2533 * access to the eeprom, but during device initialization (before the
2534 * device driver has sent the HostComplete command to the firmware) the
2535 * device driver has read access to the EEPROM by way of indirect addressing
2536 * through a couple of memory mapped registers.
2537 *
2538 * The following is a simplified implementation for pulling data out of the
2539 * the eeprom, along with some helper functions to find information in
2540 * the per device private data's copy of the eeprom.
2541 *
2542 * NOTE: To better understand how these functions work (i.e what is a chip
2543 * select and why do have to keep driving the eeprom clock?), read
2544 * just about any data sheet for a Microwire compatible EEPROM.
2545 */
2546
2547 /* write a 32 bit value into the indirect accessor register */
2548 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2549 {
2550 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2551
2552 /* the eeprom requires some time to complete the operation */
2553 udelay(p->eeprom_delay);
2554
2555 return;
2556 }
2557
2558 /* perform a chip select operation */
2559 static void eeprom_cs(struct ipw_priv *priv)
2560 {
2561 eeprom_write_reg(priv, 0);
2562 eeprom_write_reg(priv, EEPROM_BIT_CS);
2563 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2564 eeprom_write_reg(priv, EEPROM_BIT_CS);
2565 }
2566
2567 /* perform a chip select operation */
2568 static void eeprom_disable_cs(struct ipw_priv *priv)
2569 {
2570 eeprom_write_reg(priv, EEPROM_BIT_CS);
2571 eeprom_write_reg(priv, 0);
2572 eeprom_write_reg(priv, EEPROM_BIT_SK);
2573 }
2574
2575 /* push a single bit down to the eeprom */
2576 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2577 {
2578 int d = (bit ? EEPROM_BIT_DI : 0);
2579 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2580 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2581 }
2582
2583 /* push an opcode followed by an address down to the eeprom */
2584 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2585 {
2586 int i;
2587
2588 eeprom_cs(priv);
2589 eeprom_write_bit(priv, 1);
2590 eeprom_write_bit(priv, op & 2);
2591 eeprom_write_bit(priv, op & 1);
2592 for (i = 7; i >= 0; i--) {
2593 eeprom_write_bit(priv, addr & (1 << i));
2594 }
2595 }
2596
2597 /* pull 16 bits off the eeprom, one bit at a time */
2598 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2599 {
2600 int i;
2601 u16 r = 0;
2602
2603 /* Send READ Opcode */
2604 eeprom_op(priv, EEPROM_CMD_READ, addr);
2605
2606 /* Send dummy bit */
2607 eeprom_write_reg(priv, EEPROM_BIT_CS);
2608
2609 /* Read the byte off the eeprom one bit at a time */
2610 for (i = 0; i < 16; i++) {
2611 u32 data = 0;
2612 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2613 eeprom_write_reg(priv, EEPROM_BIT_CS);
2614 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2615 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2616 }
2617
2618 /* Send another dummy bit */
2619 eeprom_write_reg(priv, 0);
2620 eeprom_disable_cs(priv);
2621
2622 return r;
2623 }
2624
2625 /* helper function for pulling the mac address out of the private */
2626 /* data's copy of the eeprom data */
2627 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2628 {
2629 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2630 }
2631
2632 /*
2633 * Either the device driver (i.e. the host) or the firmware can
2634 * load eeprom data into the designated region in SRAM. If neither
2635 * happens then the FW will shutdown with a fatal error.
2636 *
2637 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2638 * bit needs region of shared SRAM needs to be non-zero.
2639 */
2640 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2641 {
2642 int i;
2643 u16 *eeprom = (u16 *) priv->eeprom;
2644
2645 IPW_DEBUG_TRACE(">>\n");
2646
2647 /* read entire contents of eeprom into private buffer */
2648 for (i = 0; i < 128; i++)
2649 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2650
2651 /*
2652 If the data looks correct, then copy it to our private
2653 copy. Otherwise let the firmware know to perform the operation
2654 on its own.
2655 */
2656 if (priv->eeprom[EEPROM_VERSION] != 0) {
2657 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2658
2659 /* write the eeprom data to sram */
2660 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2661 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2662
2663 /* Do not load eeprom data on fatal error or suspend */
2664 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2665 } else {
2666 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2667
2668 /* Load eeprom data on fatal error or suspend */
2669 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2670 }
2671
2672 IPW_DEBUG_TRACE("<<\n");
2673 }
2674
2675 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2676 {
2677 count >>= 2;
2678 if (!count)
2679 return;
2680 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2681 while (count--)
2682 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2683 }
2684
2685 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2686 {
2687 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2688 CB_NUMBER_OF_ELEMENTS_SMALL *
2689 sizeof(struct command_block));
2690 }
2691
2692 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2693 { /* start dma engine but no transfers yet */
2694
2695 IPW_DEBUG_FW(">> : \n");
2696
2697 /* Start the dma */
2698 ipw_fw_dma_reset_command_blocks(priv);
2699
2700 /* Write CB base address */
2701 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2702
2703 IPW_DEBUG_FW("<< : \n");
2704 return 0;
2705 }
2706
2707 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2708 {
2709 u32 control = 0;
2710
2711 IPW_DEBUG_FW(">> :\n");
2712
2713 /* set the Stop and Abort bit */
2714 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2715 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2716 priv->sram_desc.last_cb_index = 0;
2717
2718 IPW_DEBUG_FW("<< \n");
2719 }
2720
2721 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2722 struct command_block *cb)
2723 {
2724 u32 address =
2725 IPW_SHARED_SRAM_DMA_CONTROL +
2726 (sizeof(struct command_block) * index);
2727 IPW_DEBUG_FW(">> :\n");
2728
2729 ipw_write_indirect(priv, address, (u8 *) cb,
2730 (int)sizeof(struct command_block));
2731
2732 IPW_DEBUG_FW("<< :\n");
2733 return 0;
2734
2735 }
2736
2737 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2738 {
2739 u32 control = 0;
2740 u32 index = 0;
2741
2742 IPW_DEBUG_FW(">> :\n");
2743
2744 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2745 ipw_fw_dma_write_command_block(priv, index,
2746 &priv->sram_desc.cb_list[index]);
2747
2748 /* Enable the DMA in the CSR register */
2749 ipw_clear_bit(priv, IPW_RESET_REG,
2750 IPW_RESET_REG_MASTER_DISABLED |
2751 IPW_RESET_REG_STOP_MASTER);
2752
2753 /* Set the Start bit. */
2754 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2755 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2756
2757 IPW_DEBUG_FW("<< :\n");
2758 return 0;
2759 }
2760
2761 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2762 {
2763 u32 address;
2764 u32 register_value = 0;
2765 u32 cb_fields_address = 0;
2766
2767 IPW_DEBUG_FW(">> :\n");
2768 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2769 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2770
2771 /* Read the DMA Controlor register */
2772 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2773 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2774
2775 /* Print the CB values */
2776 cb_fields_address = address;
2777 register_value = ipw_read_reg32(priv, cb_fields_address);
2778 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2779
2780 cb_fields_address += sizeof(u32);
2781 register_value = ipw_read_reg32(priv, cb_fields_address);
2782 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2783
2784 cb_fields_address += sizeof(u32);
2785 register_value = ipw_read_reg32(priv, cb_fields_address);
2786 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2787 register_value);
2788
2789 cb_fields_address += sizeof(u32);
2790 register_value = ipw_read_reg32(priv, cb_fields_address);
2791 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2792
2793 IPW_DEBUG_FW(">> :\n");
2794 }
2795
2796 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2797 {
2798 u32 current_cb_address = 0;
2799 u32 current_cb_index = 0;
2800
2801 IPW_DEBUG_FW("<< :\n");
2802 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2803
2804 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2805 sizeof(struct command_block);
2806
2807 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2808 current_cb_index, current_cb_address);
2809
2810 IPW_DEBUG_FW(">> :\n");
2811 return current_cb_index;
2812
2813 }
2814
2815 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2816 u32 src_address,
2817 u32 dest_address,
2818 u32 length,
2819 int interrupt_enabled, int is_last)
2820 {
2821
2822 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2823 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2824 CB_DEST_SIZE_LONG;
2825 struct command_block *cb;
2826 u32 last_cb_element = 0;
2827
2828 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2829 src_address, dest_address, length);
2830
2831 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2832 return -1;
2833
2834 last_cb_element = priv->sram_desc.last_cb_index;
2835 cb = &priv->sram_desc.cb_list[last_cb_element];
2836 priv->sram_desc.last_cb_index++;
2837
2838 /* Calculate the new CB control word */
2839 if (interrupt_enabled)
2840 control |= CB_INT_ENABLED;
2841
2842 if (is_last)
2843 control |= CB_LAST_VALID;
2844
2845 control |= length;
2846
2847 /* Calculate the CB Element's checksum value */
2848 cb->status = control ^ src_address ^ dest_address;
2849
2850 /* Copy the Source and Destination addresses */
2851 cb->dest_addr = dest_address;
2852 cb->source_addr = src_address;
2853
2854 /* Copy the Control Word last */
2855 cb->control = control;
2856
2857 return 0;
2858 }
2859
2860 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2861 u32 src_phys, u32 dest_address, u32 length)
2862 {
2863 u32 bytes_left = length;
2864 u32 src_offset = 0;
2865 u32 dest_offset = 0;
2866 int status = 0;
2867 IPW_DEBUG_FW(">> \n");
2868 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2869 src_phys, dest_address, length);
2870 while (bytes_left > CB_MAX_LENGTH) {
2871 status = ipw_fw_dma_add_command_block(priv,
2872 src_phys + src_offset,
2873 dest_address +
2874 dest_offset,
2875 CB_MAX_LENGTH, 0, 0);
2876 if (status) {
2877 IPW_DEBUG_FW_INFO(": Failed\n");
2878 return -1;
2879 } else
2880 IPW_DEBUG_FW_INFO(": Added new cb\n");
2881
2882 src_offset += CB_MAX_LENGTH;
2883 dest_offset += CB_MAX_LENGTH;
2884 bytes_left -= CB_MAX_LENGTH;
2885 }
2886
2887 /* add the buffer tail */
2888 if (bytes_left > 0) {
2889 status =
2890 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2891 dest_address + dest_offset,
2892 bytes_left, 0, 0);
2893 if (status) {
2894 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2895 return -1;
2896 } else
2897 IPW_DEBUG_FW_INFO
2898 (": Adding new cb - the buffer tail\n");
2899 }
2900
2901 IPW_DEBUG_FW("<< \n");
2902 return 0;
2903 }
2904
2905 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2906 {
2907 u32 current_index = 0, previous_index;
2908 u32 watchdog = 0;
2909
2910 IPW_DEBUG_FW(">> : \n");
2911
2912 current_index = ipw_fw_dma_command_block_index(priv);
2913 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2914 (int)priv->sram_desc.last_cb_index);
2915
2916 while (current_index < priv->sram_desc.last_cb_index) {
2917 udelay(50);
2918 previous_index = current_index;
2919 current_index = ipw_fw_dma_command_block_index(priv);
2920
2921 if (previous_index < current_index) {
2922 watchdog = 0;
2923 continue;
2924 }
2925 if (++watchdog > 400) {
2926 IPW_DEBUG_FW_INFO("Timeout\n");
2927 ipw_fw_dma_dump_command_block(priv);
2928 ipw_fw_dma_abort(priv);
2929 return -1;
2930 }
2931 }
2932
2933 ipw_fw_dma_abort(priv);
2934
2935 /*Disable the DMA in the CSR register */
2936 ipw_set_bit(priv, IPW_RESET_REG,
2937 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2938
2939 IPW_DEBUG_FW("<< dmaWaitSync \n");
2940 return 0;
2941 }
2942
2943 static void ipw_remove_current_network(struct ipw_priv *priv)
2944 {
2945 struct list_head *element, *safe;
2946 struct ieee80211_network *network = NULL;
2947 unsigned long flags;
2948
2949 spin_lock_irqsave(&priv->ieee->lock, flags);
2950 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2951 network = list_entry(element, struct ieee80211_network, list);
2952 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2953 list_del(element);
2954 list_add_tail(&network->list,
2955 &priv->ieee->network_free_list);
2956 }
2957 }
2958 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2959 }
2960
2961 /**
2962 * Check that card is still alive.
2963 * Reads debug register from domain0.
2964 * If card is present, pre-defined value should
2965 * be found there.
2966 *
2967 * @param priv
2968 * @return 1 if card is present, 0 otherwise
2969 */
2970 static inline int ipw_alive(struct ipw_priv *priv)
2971 {
2972 return ipw_read32(priv, 0x90) == 0xd55555d5;
2973 }
2974
2975 /* timeout in msec, attempted in 10-msec quanta */
2976 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2977 int timeout)
2978 {
2979 int i = 0;
2980
2981 do {
2982 if ((ipw_read32(priv, addr) & mask) == mask)
2983 return i;
2984 mdelay(10);
2985 i += 10;
2986 } while (i < timeout);
2987
2988 return -ETIME;
2989 }
2990
2991 /* These functions load the firmware and micro code for the operation of
2992 * the ipw hardware. It assumes the buffer has all the bits for the
2993 * image and the caller is handling the memory allocation and clean up.
2994 */
2995
2996 static int ipw_stop_master(struct ipw_priv *priv)
2997 {
2998 int rc;
2999
3000 IPW_DEBUG_TRACE(">> \n");
3001 /* stop master. typical delay - 0 */
3002 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3003
3004 /* timeout is in msec, polled in 10-msec quanta */
3005 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3006 IPW_RESET_REG_MASTER_DISABLED, 100);
3007 if (rc < 0) {
3008 IPW_ERROR("wait for stop master failed after 100ms\n");
3009 return -1;
3010 }
3011
3012 IPW_DEBUG_INFO("stop master %dms\n", rc);
3013
3014 return rc;
3015 }
3016
3017 static void ipw_arc_release(struct ipw_priv *priv)
3018 {
3019 IPW_DEBUG_TRACE(">> \n");
3020 mdelay(5);
3021
3022 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3023
3024 /* no one knows timing, for safety add some delay */
3025 mdelay(5);
3026 }
3027
3028 struct fw_chunk {
3029 u32 address;
3030 u32 length;
3031 };
3032
3033 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3034 {
3035 int rc = 0, i, addr;
3036 u8 cr = 0;
3037 u16 *image;
3038
3039 image = (u16 *) data;
3040
3041 IPW_DEBUG_TRACE(">> \n");
3042
3043 rc = ipw_stop_master(priv);
3044
3045 if (rc < 0)
3046 return rc;
3047
3048 for (addr = IPW_SHARED_LOWER_BOUND;
3049 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3050 ipw_write32(priv, addr, 0);
3051 }
3052
3053 /* no ucode (yet) */
3054 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3055 /* destroy DMA queues */
3056 /* reset sequence */
3057
3058 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3059 ipw_arc_release(priv);
3060 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3061 mdelay(1);
3062
3063 /* reset PHY */
3064 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3065 mdelay(1);
3066
3067 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3068 mdelay(1);
3069
3070 /* enable ucode store */
3071 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3072 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3073 mdelay(1);
3074
3075 /* write ucode */
3076 /**
3077 * @bug
3078 * Do NOT set indirect address register once and then
3079 * store data to indirect data register in the loop.
3080 * It seems very reasonable, but in this case DINO do not
3081 * accept ucode. It is essential to set address each time.
3082 */
3083 /* load new ipw uCode */
3084 for (i = 0; i < len / 2; i++)
3085 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3086 cpu_to_le16(image[i]));
3087
3088 /* enable DINO */
3089 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3090 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3091
3092 /* this is where the igx / win driver deveates from the VAP driver. */
3093
3094 /* wait for alive response */
3095 for (i = 0; i < 100; i++) {
3096 /* poll for incoming data */
3097 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3098 if (cr & DINO_RXFIFO_DATA)
3099 break;
3100 mdelay(1);
3101 }
3102
3103 if (cr & DINO_RXFIFO_DATA) {
3104 /* alive_command_responce size is NOT multiple of 4 */
3105 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3106
3107 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3108 response_buffer[i] =
3109 le32_to_cpu(ipw_read_reg32(priv,
3110 IPW_BASEBAND_RX_FIFO_READ));
3111 memcpy(&priv->dino_alive, response_buffer,
3112 sizeof(priv->dino_alive));
3113 if (priv->dino_alive.alive_command == 1
3114 && priv->dino_alive.ucode_valid == 1) {
3115 rc = 0;
3116 IPW_DEBUG_INFO
3117 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3118 "of %02d/%02d/%02d %02d:%02d\n",
3119 priv->dino_alive.software_revision,
3120 priv->dino_alive.software_revision,
3121 priv->dino_alive.device_identifier,
3122 priv->dino_alive.device_identifier,
3123 priv->dino_alive.time_stamp[0],
3124 priv->dino_alive.time_stamp[1],
3125 priv->dino_alive.time_stamp[2],
3126 priv->dino_alive.time_stamp[3],
3127 priv->dino_alive.time_stamp[4]);
3128 } else {
3129 IPW_DEBUG_INFO("Microcode is not alive\n");
3130 rc = -EINVAL;
3131 }
3132 } else {
3133 IPW_DEBUG_INFO("No alive response from DINO\n");
3134 rc = -ETIME;
3135 }
3136
3137 /* disable DINO, otherwise for some reason
3138 firmware have problem getting alive resp. */
3139 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3140
3141 return rc;
3142 }
3143
3144 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3145 {
3146 int rc = -1;
3147 int offset = 0;
3148 struct fw_chunk *chunk;
3149 dma_addr_t shared_phys;
3150 u8 *shared_virt;
3151
3152 IPW_DEBUG_TRACE("<< : \n");
3153 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3154
3155 if (!shared_virt)
3156 return -ENOMEM;
3157
3158 memmove(shared_virt, data, len);
3159
3160 /* Start the Dma */
3161 rc = ipw_fw_dma_enable(priv);
3162
3163 if (priv->sram_desc.last_cb_index > 0) {
3164 /* the DMA is already ready this would be a bug. */
3165 BUG();
3166 goto out;
3167 }
3168
3169 do {
3170 chunk = (struct fw_chunk *)(data + offset);
3171 offset += sizeof(struct fw_chunk);
3172 /* build DMA packet and queue up for sending */
3173 /* dma to chunk->address, the chunk->length bytes from data +
3174 * offeset*/
3175 /* Dma loading */
3176 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3177 le32_to_cpu(chunk->address),
3178 le32_to_cpu(chunk->length));
3179 if (rc) {
3180 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3181 goto out;
3182 }
3183
3184 offset += le32_to_cpu(chunk->length);
3185 } while (offset < len);
3186
3187 /* Run the DMA and wait for the answer */
3188 rc = ipw_fw_dma_kick(priv);
3189 if (rc) {
3190 IPW_ERROR("dmaKick Failed\n");
3191 goto out;
3192 }
3193
3194 rc = ipw_fw_dma_wait(priv);
3195 if (rc) {
3196 IPW_ERROR("dmaWaitSync Failed\n");
3197 goto out;
3198 }
3199 out:
3200 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3201 return rc;
3202 }
3203
3204 /* stop nic */
3205 static int ipw_stop_nic(struct ipw_priv *priv)
3206 {
3207 int rc = 0;
3208
3209 /* stop */
3210 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3211
3212 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3213 IPW_RESET_REG_MASTER_DISABLED, 500);
3214 if (rc < 0) {
3215 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3216 return rc;
3217 }
3218
3219 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3220
3221 return rc;
3222 }
3223
3224 static void ipw_start_nic(struct ipw_priv *priv)
3225 {
3226 IPW_DEBUG_TRACE(">>\n");
3227
3228 /* prvHwStartNic release ARC */
3229 ipw_clear_bit(priv, IPW_RESET_REG,
3230 IPW_RESET_REG_MASTER_DISABLED |
3231 IPW_RESET_REG_STOP_MASTER |
3232 CBD_RESET_REG_PRINCETON_RESET);
3233
3234 /* enable power management */
3235 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3236 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3237
3238 IPW_DEBUG_TRACE("<<\n");
3239 }
3240
3241 static int ipw_init_nic(struct ipw_priv *priv)
3242 {
3243 int rc;
3244
3245 IPW_DEBUG_TRACE(">>\n");
3246 /* reset */
3247 /*prvHwInitNic */
3248 /* set "initialization complete" bit to move adapter to D0 state */
3249 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3250
3251 /* low-level PLL activation */
3252 ipw_write32(priv, IPW_READ_INT_REGISTER,
3253 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3254
3255 /* wait for clock stabilization */
3256 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3257 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3258 if (rc < 0)
3259 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3260
3261 /* assert SW reset */
3262 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3263
3264 udelay(10);
3265
3266 /* set "initialization complete" bit to move adapter to D0 state */
3267 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3268
3269 IPW_DEBUG_TRACE(">>\n");
3270 return 0;
3271 }
3272
3273 /* Call this function from process context, it will sleep in request_firmware.
3274 * Probe is an ok place to call this from.
3275 */
3276 static int ipw_reset_nic(struct ipw_priv *priv)
3277 {
3278 int rc = 0;
3279 unsigned long flags;
3280
3281 IPW_DEBUG_TRACE(">>\n");
3282
3283 rc = ipw_init_nic(priv);
3284
3285 spin_lock_irqsave(&priv->lock, flags);
3286 /* Clear the 'host command active' bit... */
3287 priv->status &= ~STATUS_HCMD_ACTIVE;
3288 wake_up_interruptible(&priv->wait_command_queue);
3289 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3290 wake_up_interruptible(&priv->wait_state);
3291 spin_unlock_irqrestore(&priv->lock, flags);
3292
3293 IPW_DEBUG_TRACE("<<\n");
3294 return rc;
3295 }
3296
3297
3298 struct ipw_fw {
3299 __le32 ver;
3300 __le32 boot_size;
3301 __le32 ucode_size;
3302 __le32 fw_size;
3303 u8 data[0];
3304 };
3305
3306 static int ipw_get_fw(struct ipw_priv *priv,
3307 const struct firmware **raw, const char *name)
3308 {
3309 struct ipw_fw *fw;
3310 int rc;
3311
3312 /* ask firmware_class module to get the boot firmware off disk */
3313 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3314 if (rc < 0) {
3315 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3316 return rc;
3317 }
3318
3319 if ((*raw)->size < sizeof(*fw)) {
3320 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3321 return -EINVAL;
3322 }
3323
3324 fw = (void *)(*raw)->data;
3325
3326 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3327 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3328 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3329 name, (*raw)->size);
3330 return -EINVAL;
3331 }
3332
3333 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3334 name,
3335 le32_to_cpu(fw->ver) >> 16,
3336 le32_to_cpu(fw->ver) & 0xff,
3337 (*raw)->size - sizeof(*fw));
3338 return 0;
3339 }
3340
3341 #define IPW_RX_BUF_SIZE (3000)
3342
3343 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3344 struct ipw_rx_queue *rxq)
3345 {
3346 unsigned long flags;
3347 int i;
3348
3349 spin_lock_irqsave(&rxq->lock, flags);
3350
3351 INIT_LIST_HEAD(&rxq->rx_free);
3352 INIT_LIST_HEAD(&rxq->rx_used);
3353
3354 /* Fill the rx_used queue with _all_ of the Rx buffers */
3355 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3356 /* In the reset function, these buffers may have been allocated
3357 * to an SKB, so we need to unmap and free potential storage */
3358 if (rxq->pool[i].skb != NULL) {
3359 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3360 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3361 dev_kfree_skb(rxq->pool[i].skb);
3362 rxq->pool[i].skb = NULL;
3363 }
3364 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3365 }
3366
3367 /* Set us so that we have processed and used all buffers, but have
3368 * not restocked the Rx queue with fresh buffers */
3369 rxq->read = rxq->write = 0;
3370 rxq->processed = RX_QUEUE_SIZE - 1;
3371 rxq->free_count = 0;
3372 spin_unlock_irqrestore(&rxq->lock, flags);
3373 }
3374
3375 #ifdef CONFIG_PM
3376 static int fw_loaded = 0;
3377 static const struct firmware *raw = NULL;
3378
3379 static void free_firmware(void)
3380 {
3381 if (fw_loaded) {
3382 release_firmware(raw);
3383 raw = NULL;
3384 fw_loaded = 0;
3385 }
3386 }
3387 #else
3388 #define free_firmware() do {} while (0)
3389 #endif
3390
3391 static int ipw_load(struct ipw_priv *priv)
3392 {
3393 #ifndef CONFIG_PM
3394 const struct firmware *raw = NULL;
3395 #endif
3396 struct ipw_fw *fw;
3397 u8 *boot_img, *ucode_img, *fw_img;
3398 u8 *name = NULL;
3399 int rc = 0, retries = 3;
3400
3401 switch (priv->ieee->iw_mode) {
3402 case IW_MODE_ADHOC:
3403 name = "ipw2200-ibss.fw";
3404 break;
3405 #ifdef CONFIG_IPW2200_MONITOR
3406 case IW_MODE_MONITOR:
3407 name = "ipw2200-sniffer.fw";
3408 break;
3409 #endif
3410 case IW_MODE_INFRA:
3411 name = "ipw2200-bss.fw";
3412 break;
3413 }
3414
3415 if (!name) {
3416 rc = -EINVAL;
3417 goto error;
3418 }
3419
3420 #ifdef CONFIG_PM
3421 if (!fw_loaded) {
3422 #endif
3423 rc = ipw_get_fw(priv, &raw, name);
3424 if (rc < 0)
3425 goto error;
3426 #ifdef CONFIG_PM
3427 }
3428 #endif
3429
3430 fw = (void *)raw->data;
3431 boot_img = &fw->data[0];
3432 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3433 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3434 le32_to_cpu(fw->ucode_size)];
3435
3436 if (rc < 0)
3437 goto error;
3438
3439 if (!priv->rxq)
3440 priv->rxq = ipw_rx_queue_alloc(priv);
3441 else
3442 ipw_rx_queue_reset(priv, priv->rxq);
3443 if (!priv->rxq) {
3444 IPW_ERROR("Unable to initialize Rx queue\n");
3445 goto error;
3446 }
3447
3448 retry:
3449 /* Ensure interrupts are disabled */
3450 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3451 priv->status &= ~STATUS_INT_ENABLED;
3452
3453 /* ack pending interrupts */
3454 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3455
3456 ipw_stop_nic(priv);
3457
3458 rc = ipw_reset_nic(priv);
3459 if (rc < 0) {
3460 IPW_ERROR("Unable to reset NIC\n");
3461 goto error;
3462 }
3463
3464 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3465 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3466
3467 /* DMA the initial boot firmware into the device */
3468 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3469 if (rc < 0) {
3470 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3471 goto error;
3472 }
3473
3474 /* kick start the device */
3475 ipw_start_nic(priv);
3476
3477 /* wait for the device to finish its initial startup sequence */
3478 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3479 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3480 if (rc < 0) {
3481 IPW_ERROR("device failed to boot initial fw image\n");
3482 goto error;
3483 }
3484 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3485
3486 /* ack fw init done interrupt */
3487 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3488
3489 /* DMA the ucode into the device */
3490 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3491 if (rc < 0) {
3492 IPW_ERROR("Unable to load ucode: %d\n", rc);
3493 goto error;
3494 }
3495
3496 /* stop nic */
3497 ipw_stop_nic(priv);
3498
3499 /* DMA bss firmware into the device */
3500 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3501 if (rc < 0) {
3502 IPW_ERROR("Unable to load firmware: %d\n", rc);
3503 goto error;
3504 }
3505 #ifdef CONFIG_PM
3506 fw_loaded = 1;
3507 #endif
3508
3509 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3510
3511 rc = ipw_queue_reset(priv);
3512 if (rc < 0) {
3513 IPW_ERROR("Unable to initialize queues\n");
3514 goto error;
3515 }
3516
3517 /* Ensure interrupts are disabled */
3518 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3519 /* ack pending interrupts */
3520 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3521
3522 /* kick start the device */
3523 ipw_start_nic(priv);
3524
3525 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3526 if (retries > 0) {
3527 IPW_WARNING("Parity error. Retrying init.\n");
3528 retries--;
3529 goto retry;
3530 }
3531
3532 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3533 rc = -EIO;
3534 goto error;
3535 }
3536
3537 /* wait for the device */
3538 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3539 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3540 if (rc < 0) {
3541 IPW_ERROR("device failed to start within 500ms\n");
3542 goto error;
3543 }
3544 IPW_DEBUG_INFO("device response after %dms\n", rc);
3545
3546 /* ack fw init done interrupt */
3547 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3548
3549 /* read eeprom data and initialize the eeprom region of sram */
3550 priv->eeprom_delay = 1;
3551 ipw_eeprom_init_sram(priv);
3552
3553 /* enable interrupts */
3554 ipw_enable_interrupts(priv);
3555
3556 /* Ensure our queue has valid packets */
3557 ipw_rx_queue_replenish(priv);
3558
3559 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3560
3561 /* ack pending interrupts */
3562 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3563
3564 #ifndef CONFIG_PM
3565 release_firmware(raw);
3566 #endif
3567 return 0;
3568
3569 error:
3570 if (priv->rxq) {
3571 ipw_rx_queue_free(priv, priv->rxq);
3572 priv->rxq = NULL;
3573 }
3574 ipw_tx_queue_free(priv);
3575 if (raw)
3576 release_firmware(raw);
3577 #ifdef CONFIG_PM
3578 fw_loaded = 0;
3579 raw = NULL;
3580 #endif
3581
3582 return rc;
3583 }
3584
3585 /**
3586 * DMA services
3587 *
3588 * Theory of operation
3589 *
3590 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3591 * 2 empty entries always kept in the buffer to protect from overflow.
3592 *
3593 * For Tx queue, there are low mark and high mark limits. If, after queuing
3594 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3595 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3596 * Tx queue resumed.
3597 *
3598 * The IPW operates with six queues, one receive queue in the device's
3599 * sram, one transmit queue for sending commands to the device firmware,
3600 * and four transmit queues for data.
3601 *
3602 * The four transmit queues allow for performing quality of service (qos)
3603 * transmissions as per the 802.11 protocol. Currently Linux does not
3604 * provide a mechanism to the user for utilizing prioritized queues, so
3605 * we only utilize the first data transmit queue (queue1).
3606 */
3607
3608 /**
3609 * Driver allocates buffers of this size for Rx
3610 */
3611
3612 static inline int ipw_queue_space(const struct clx2_queue *q)
3613 {
3614 int s = q->last_used - q->first_empty;
3615 if (s <= 0)
3616 s += q->n_bd;
3617 s -= 2; /* keep some reserve to not confuse empty and full situations */
3618 if (s < 0)
3619 s = 0;
3620 return s;
3621 }
3622
3623 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3624 {
3625 return (++index == n_bd) ? 0 : index;
3626 }
3627
3628 /**
3629 * Initialize common DMA queue structure
3630 *
3631 * @param q queue to init
3632 * @param count Number of BD's to allocate. Should be power of 2
3633 * @param read_register Address for 'read' register
3634 * (not offset within BAR, full address)
3635 * @param write_register Address for 'write' register
3636 * (not offset within BAR, full address)
3637 * @param base_register Address for 'base' register
3638 * (not offset within BAR, full address)
3639 * @param size Address for 'size' register
3640 * (not offset within BAR, full address)
3641 */
3642 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3643 int count, u32 read, u32 write, u32 base, u32 size)
3644 {
3645 q->n_bd = count;
3646
3647 q->low_mark = q->n_bd / 4;
3648 if (q->low_mark < 4)
3649 q->low_mark = 4;
3650
3651 q->high_mark = q->n_bd / 8;
3652 if (q->high_mark < 2)
3653 q->high_mark = 2;
3654
3655 q->first_empty = q->last_used = 0;
3656 q->reg_r = read;
3657 q->reg_w = write;
3658
3659 ipw_write32(priv, base, q->dma_addr);
3660 ipw_write32(priv, size, count);
3661 ipw_write32(priv, read, 0);
3662 ipw_write32(priv, write, 0);
3663
3664 _ipw_read32(priv, 0x90);
3665 }
3666
3667 static int ipw_queue_tx_init(struct ipw_priv *priv,
3668 struct clx2_tx_queue *q,
3669 int count, u32 read, u32 write, u32 base, u32 size)
3670 {
3671 struct pci_dev *dev = priv->pci_dev;
3672
3673 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3674 if (!q->txb) {
3675 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3676 return -ENOMEM;
3677 }
3678
3679 q->bd =
3680 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3681 if (!q->bd) {
3682 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3683 sizeof(q->bd[0]) * count);
3684 kfree(q->txb);
3685 q->txb = NULL;
3686 return -ENOMEM;
3687 }
3688
3689 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3690 return 0;
3691 }
3692
3693 /**
3694 * Free one TFD, those at index [txq->q.last_used].
3695 * Do NOT advance any indexes
3696 *
3697 * @param dev
3698 * @param txq
3699 */
3700 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3701 struct clx2_tx_queue *txq)
3702 {
3703 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3704 struct pci_dev *dev = priv->pci_dev;
3705 int i;
3706
3707 /* classify bd */
3708 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3709 /* nothing to cleanup after for host commands */
3710 return;
3711
3712 /* sanity check */
3713 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3714 IPW_ERROR("Too many chunks: %i\n",
3715 le32_to_cpu(bd->u.data.num_chunks));
3716 /** @todo issue fatal error, it is quite serious situation */
3717 return;
3718 }
3719
3720 /* unmap chunks if any */
3721 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3722 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3723 le16_to_cpu(bd->u.data.chunk_len[i]),
3724 PCI_DMA_TODEVICE);
3725 if (txq->txb[txq->q.last_used]) {
3726 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3727 txq->txb[txq->q.last_used] = NULL;
3728 }
3729 }
3730 }
3731
3732 /**
3733 * Deallocate DMA queue.
3734 *
3735 * Empty queue by removing and destroying all BD's.
3736 * Free all buffers.
3737 *
3738 * @param dev
3739 * @param q
3740 */
3741 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3742 {
3743 struct clx2_queue *q = &txq->q;
3744 struct pci_dev *dev = priv->pci_dev;
3745
3746 if (q->n_bd == 0)
3747 return;
3748
3749 /* first, empty all BD's */
3750 for (; q->first_empty != q->last_used;
3751 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3752 ipw_queue_tx_free_tfd(priv, txq);
3753 }
3754
3755 /* free buffers belonging to queue itself */
3756 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3757 q->dma_addr);
3758 kfree(txq->txb);
3759
3760 /* 0 fill whole structure */
3761 memset(txq, 0, sizeof(*txq));
3762 }
3763
3764 /**
3765 * Destroy all DMA queues and structures
3766 *
3767 * @param priv
3768 */
3769 static void ipw_tx_queue_free(struct ipw_priv *priv)
3770 {
3771 /* Tx CMD queue */
3772 ipw_queue_tx_free(priv, &priv->txq_cmd);
3773
3774 /* Tx queues */
3775 ipw_queue_tx_free(priv, &priv->txq[0]);
3776 ipw_queue_tx_free(priv, &priv->txq[1]);
3777 ipw_queue_tx_free(priv, &priv->txq[2]);
3778 ipw_queue_tx_free(priv, &priv->txq[3]);
3779 }
3780
3781 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3782 {
3783 /* First 3 bytes are manufacturer */
3784 bssid[0] = priv->mac_addr[0];
3785 bssid[1] = priv->mac_addr[1];
3786 bssid[2] = priv->mac_addr[2];
3787
3788 /* Last bytes are random */
3789 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3790
3791 bssid[0] &= 0xfe; /* clear multicast bit */
3792 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3793 }
3794
3795 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3796 {
3797 struct ipw_station_entry entry;
3798 int i;
3799 DECLARE_MAC_BUF(mac);
3800
3801 for (i = 0; i < priv->num_stations; i++) {
3802 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3803 /* Another node is active in network */
3804 priv->missed_adhoc_beacons = 0;
3805 if (!(priv->config & CFG_STATIC_CHANNEL))
3806 /* when other nodes drop out, we drop out */
3807 priv->config &= ~CFG_ADHOC_PERSIST;
3808
3809 return i;
3810 }
3811 }
3812
3813 if (i == MAX_STATIONS)
3814 return IPW_INVALID_STATION;
3815
3816 IPW_DEBUG_SCAN("Adding AdHoc station: %s\n", print_mac(mac, bssid));
3817
3818 entry.reserved = 0;
3819 entry.support_mode = 0;
3820 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3821 memcpy(priv->stations[i], bssid, ETH_ALEN);
3822 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3823 &entry, sizeof(entry));
3824 priv->num_stations++;
3825
3826 return i;
3827 }
3828
3829 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3830 {
3831 int i;
3832
3833 for (i = 0; i < priv->num_stations; i++)
3834 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3835 return i;
3836
3837 return IPW_INVALID_STATION;
3838 }
3839
3840 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3841 {
3842 int err;
3843 DECLARE_MAC_BUF(mac);
3844
3845 if (priv->status & STATUS_ASSOCIATING) {
3846 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3847 queue_work(priv->workqueue, &priv->disassociate);
3848 return;
3849 }
3850
3851 if (!(priv->status & STATUS_ASSOCIATED)) {
3852 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3853 return;
3854 }
3855
3856 IPW_DEBUG_ASSOC("Disassocation attempt from %s "
3857 "on channel %d.\n",
3858 print_mac(mac, priv->assoc_request.bssid),
3859 priv->assoc_request.channel);
3860
3861 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3862 priv->status |= STATUS_DISASSOCIATING;
3863
3864 if (quiet)
3865 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3866 else
3867 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3868
3869 err = ipw_send_associate(priv, &priv->assoc_request);
3870 if (err) {
3871 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3872 "failed.\n");
3873 return;
3874 }
3875
3876 }
3877
3878 static int ipw_disassociate(void *data)
3879 {
3880 struct ipw_priv *priv = data;
3881 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3882 return 0;
3883 ipw_send_disassociate(data, 0);
3884 return 1;
3885 }
3886
3887 static void ipw_bg_disassociate(struct work_struct *work)
3888 {
3889 struct ipw_priv *priv =
3890 container_of(work, struct ipw_priv, disassociate);
3891 mutex_lock(&priv->mutex);
3892 ipw_disassociate(priv);
3893 mutex_unlock(&priv->mutex);
3894 }
3895
3896 static void ipw_system_config(struct work_struct *work)
3897 {
3898 struct ipw_priv *priv =
3899 container_of(work, struct ipw_priv, system_config);
3900
3901 #ifdef CONFIG_IPW2200_PROMISCUOUS
3902 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3903 priv->sys_config.accept_all_data_frames = 1;
3904 priv->sys_config.accept_non_directed_frames = 1;
3905 priv->sys_config.accept_all_mgmt_bcpr = 1;
3906 priv->sys_config.accept_all_mgmt_frames = 1;
3907 }
3908 #endif
3909
3910 ipw_send_system_config(priv);
3911 }
3912
3913 struct ipw_status_code {
3914 u16 status;
3915 const char *reason;
3916 };
3917
3918 static const struct ipw_status_code ipw_status_codes[] = {
3919 {0x00, "Successful"},
3920 {0x01, "Unspecified failure"},
3921 {0x0A, "Cannot support all requested capabilities in the "
3922 "Capability information field"},
3923 {0x0B, "Reassociation denied due to inability to confirm that "
3924 "association exists"},
3925 {0x0C, "Association denied due to reason outside the scope of this "
3926 "standard"},
3927 {0x0D,
3928 "Responding station does not support the specified authentication "
3929 "algorithm"},
3930 {0x0E,
3931 "Received an Authentication frame with authentication sequence "
3932 "transaction sequence number out of expected sequence"},
3933 {0x0F, "Authentication rejected because of challenge failure"},
3934 {0x10, "Authentication rejected due to timeout waiting for next "
3935 "frame in sequence"},
3936 {0x11, "Association denied because AP is unable to handle additional "
3937 "associated stations"},
3938 {0x12,
3939 "Association denied due to requesting station not supporting all "
3940 "of the datarates in the BSSBasicServiceSet Parameter"},
3941 {0x13,
3942 "Association denied due to requesting station not supporting "
3943 "short preamble operation"},
3944 {0x14,
3945 "Association denied due to requesting station not supporting "
3946 "PBCC encoding"},
3947 {0x15,
3948 "Association denied due to requesting station not supporting "
3949 "channel agility"},
3950 {0x19,
3951 "Association denied due to requesting station not supporting "
3952 "short slot operation"},
3953 {0x1A,
3954 "Association denied due to requesting station not supporting "
3955 "DSSS-OFDM operation"},
3956 {0x28, "Invalid Information Element"},
3957 {0x29, "Group Cipher is not valid"},
3958 {0x2A, "Pairwise Cipher is not valid"},
3959 {0x2B, "AKMP is not valid"},
3960 {0x2C, "Unsupported RSN IE version"},
3961 {0x2D, "Invalid RSN IE Capabilities"},
3962 {0x2E, "Cipher suite is rejected per security policy"},
3963 };
3964
3965 static const char *ipw_get_status_code(u16 status)
3966 {
3967 int i;
3968 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3969 if (ipw_status_codes[i].status == (status & 0xff))
3970 return ipw_status_codes[i].reason;
3971 return "Unknown status value.";
3972 }
3973
3974 static void inline average_init(struct average *avg)
3975 {
3976 memset(avg, 0, sizeof(*avg));
3977 }
3978
3979 #define DEPTH_RSSI 8
3980 #define DEPTH_NOISE 16
3981 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3982 {
3983 return ((depth-1)*prev_avg + val)/depth;
3984 }
3985
3986 static void average_add(struct average *avg, s16 val)
3987 {
3988 avg->sum -= avg->entries[avg->pos];
3989 avg->sum += val;
3990 avg->entries[avg->pos++] = val;
3991 if (unlikely(avg->pos == AVG_ENTRIES)) {
3992 avg->init = 1;
3993 avg->pos = 0;
3994 }
3995 }
3996
3997 static s16 average_value(struct average *avg)
3998 {
3999 if (!unlikely(avg->init)) {
4000 if (avg->pos)
4001 return avg->sum / avg->pos;
4002 return 0;
4003 }
4004
4005 return avg->sum / AVG_ENTRIES;
4006 }
4007
4008 static void ipw_reset_stats(struct ipw_priv *priv)
4009 {
4010 u32 len = sizeof(u32);
4011
4012 priv->quality = 0;
4013
4014 average_init(&priv->average_missed_beacons);
4015 priv->exp_avg_rssi = -60;
4016 priv->exp_avg_noise = -85 + 0x100;
4017
4018 priv->last_rate = 0;
4019 priv->last_missed_beacons = 0;
4020 priv->last_rx_packets = 0;
4021 priv->last_tx_packets = 0;
4022 priv->last_tx_failures = 0;
4023
4024 /* Firmware managed, reset only when NIC is restarted, so we have to
4025 * normalize on the current value */
4026 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4027 &priv->last_rx_err, &len);
4028 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4029 &priv->last_tx_failures, &len);
4030
4031 /* Driver managed, reset with each association */
4032 priv->missed_adhoc_beacons = 0;
4033 priv->missed_beacons = 0;
4034 priv->tx_packets = 0;
4035 priv->rx_packets = 0;
4036
4037 }
4038
4039 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4040 {
4041 u32 i = 0x80000000;
4042 u32 mask = priv->rates_mask;
4043 /* If currently associated in B mode, restrict the maximum
4044 * rate match to B rates */
4045 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4046 mask &= IEEE80211_CCK_RATES_MASK;
4047
4048 /* TODO: Verify that the rate is supported by the current rates
4049 * list. */
4050
4051 while (i && !(mask & i))
4052 i >>= 1;
4053 switch (i) {
4054 case IEEE80211_CCK_RATE_1MB_MASK:
4055 return 1000000;
4056 case IEEE80211_CCK_RATE_2MB_MASK:
4057 return 2000000;
4058 case IEEE80211_CCK_RATE_5MB_MASK:
4059 return 5500000;
4060 case IEEE80211_OFDM_RATE_6MB_MASK:
4061 return 6000000;
4062 case IEEE80211_OFDM_RATE_9MB_MASK:
4063 return 9000000;
4064 case IEEE80211_CCK_RATE_11MB_MASK:
4065 return 11000000;
4066 case IEEE80211_OFDM_RATE_12MB_MASK:
4067 return 12000000;
4068 case IEEE80211_OFDM_RATE_18MB_MASK:
4069 return 18000000;
4070 case IEEE80211_OFDM_RATE_24MB_MASK:
4071 return 24000000;
4072 case IEEE80211_OFDM_RATE_36MB_MASK:
4073 return 36000000;
4074 case IEEE80211_OFDM_RATE_48MB_MASK:
4075 return 48000000;
4076 case IEEE80211_OFDM_RATE_54MB_MASK:
4077 return 54000000;
4078 }
4079
4080 if (priv->ieee->mode == IEEE_B)
4081 return 11000000;
4082 else
4083 return 54000000;
4084 }
4085
4086 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4087 {
4088 u32 rate, len = sizeof(rate);
4089 int err;
4090
4091 if (!(priv->status & STATUS_ASSOCIATED))
4092 return 0;
4093
4094 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4095 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4096 &len);
4097 if (err) {
4098 IPW_DEBUG_INFO("failed querying ordinals.\n");
4099 return 0;
4100 }
4101 } else
4102 return ipw_get_max_rate(priv);
4103
4104 switch (rate) {
4105 case IPW_TX_RATE_1MB:
4106 return 1000000;
4107 case IPW_TX_RATE_2MB:
4108 return 2000000;
4109 case IPW_TX_RATE_5MB:
4110 return 5500000;
4111 case IPW_TX_RATE_6MB:
4112 return 6000000;
4113 case IPW_TX_RATE_9MB:
4114 return 9000000;
4115 case IPW_TX_RATE_11MB:
4116 return 11000000;
4117 case IPW_TX_RATE_12MB:
4118 return 12000000;
4119 case IPW_TX_RATE_18MB:
4120 return 18000000;
4121 case IPW_TX_RATE_24MB:
4122 return 24000000;
4123 case IPW_TX_RATE_36MB:
4124 return 36000000;
4125 case IPW_TX_RATE_48MB:
4126 return 48000000;
4127 case IPW_TX_RATE_54MB:
4128 return 54000000;
4129 }
4130
4131 return 0;
4132 }
4133
4134 #define IPW_STATS_INTERVAL (2 * HZ)
4135 static void ipw_gather_stats(struct ipw_priv *priv)
4136 {
4137 u32 rx_err, rx_err_delta, rx_packets_delta;
4138 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4139 u32 missed_beacons_percent, missed_beacons_delta;
4140 u32 quality = 0;
4141 u32 len = sizeof(u32);
4142 s16 rssi;
4143 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4144 rate_quality;
4145 u32 max_rate;
4146
4147 if (!(priv->status & STATUS_ASSOCIATED)) {
4148 priv->quality = 0;
4149 return;
4150 }
4151
4152 /* Update the statistics */
4153 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4154 &priv->missed_beacons, &len);
4155 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4156 priv->last_missed_beacons = priv->missed_beacons;
4157 if (priv->assoc_request.beacon_interval) {
4158 missed_beacons_percent = missed_beacons_delta *
4159 (HZ * priv->assoc_request.beacon_interval) /
4160 (IPW_STATS_INTERVAL * 10);
4161 } else {
4162 missed_beacons_percent = 0;
4163 }
4164 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4165
4166 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4167 rx_err_delta = rx_err - priv->last_rx_err;
4168 priv->last_rx_err = rx_err;
4169
4170 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4171 tx_failures_delta = tx_failures - priv->last_tx_failures;
4172 priv->last_tx_failures = tx_failures;
4173
4174 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4175 priv->last_rx_packets = priv->rx_packets;
4176
4177 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4178 priv->last_tx_packets = priv->tx_packets;
4179
4180 /* Calculate quality based on the following:
4181 *
4182 * Missed beacon: 100% = 0, 0% = 70% missed
4183 * Rate: 60% = 1Mbs, 100% = Max
4184 * Rx and Tx errors represent a straight % of total Rx/Tx
4185 * RSSI: 100% = > -50, 0% = < -80
4186 * Rx errors: 100% = 0, 0% = 50% missed
4187 *
4188 * The lowest computed quality is used.
4189 *
4190 */
4191 #define BEACON_THRESHOLD 5
4192 beacon_quality = 100 - missed_beacons_percent;
4193 if (beacon_quality < BEACON_THRESHOLD)
4194 beacon_quality = 0;
4195 else
4196 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4197 (100 - BEACON_THRESHOLD);
4198 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4199 beacon_quality, missed_beacons_percent);
4200
4201 priv->last_rate = ipw_get_current_rate(priv);
4202 max_rate = ipw_get_max_rate(priv);
4203 rate_quality = priv->last_rate * 40 / max_rate + 60;
4204 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4205 rate_quality, priv->last_rate / 1000000);
4206
4207 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4208 rx_quality = 100 - (rx_err_delta * 100) /
4209 (rx_packets_delta + rx_err_delta);
4210 else
4211 rx_quality = 100;
4212 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4213 rx_quality, rx_err_delta, rx_packets_delta);
4214
4215 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4216 tx_quality = 100 - (tx_failures_delta * 100) /
4217 (tx_packets_delta + tx_failures_delta);
4218 else
4219 tx_quality = 100;
4220 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4221 tx_quality, tx_failures_delta, tx_packets_delta);
4222
4223 rssi = priv->exp_avg_rssi;
4224 signal_quality =
4225 (100 *
4226 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4227 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4228 (priv->ieee->perfect_rssi - rssi) *
4229 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4230 62 * (priv->ieee->perfect_rssi - rssi))) /
4231 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4232 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4233 if (signal_quality > 100)
4234 signal_quality = 100;
4235 else if (signal_quality < 1)
4236 signal_quality = 0;
4237
4238 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4239 signal_quality, rssi);
4240
4241 quality = min(beacon_quality,
4242 min(rate_quality,
4243 min(tx_quality, min(rx_quality, signal_quality))));
4244 if (quality == beacon_quality)
4245 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4246 quality);
4247 if (quality == rate_quality)
4248 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4249 quality);
4250 if (quality == tx_quality)
4251 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4252 quality);
4253 if (quality == rx_quality)
4254 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4255 quality);
4256 if (quality == signal_quality)
4257 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4258 quality);
4259
4260 priv->quality = quality;
4261
4262 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4263 IPW_STATS_INTERVAL);
4264 }
4265
4266 static void ipw_bg_gather_stats(struct work_struct *work)
4267 {
4268 struct ipw_priv *priv =
4269 container_of(work, struct ipw_priv, gather_stats.work);
4270 mutex_lock(&priv->mutex);
4271 ipw_gather_stats(priv);
4272 mutex_unlock(&priv->mutex);
4273 }
4274
4275 /* Missed beacon behavior:
4276 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4277 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4278 * Above disassociate threshold, give up and stop scanning.
4279 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4280 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4281 int missed_count)
4282 {
4283 priv->notif_missed_beacons = missed_count;
4284
4285 if (missed_count > priv->disassociate_threshold &&
4286 priv->status & STATUS_ASSOCIATED) {
4287 /* If associated and we've hit the missed
4288 * beacon threshold, disassociate, turn
4289 * off roaming, and abort any active scans */
4290 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4291 IPW_DL_STATE | IPW_DL_ASSOC,
4292 "Missed beacon: %d - disassociate\n", missed_count);
4293 priv->status &= ~STATUS_ROAMING;
4294 if (priv->status & STATUS_SCANNING) {
4295 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4296 IPW_DL_STATE,
4297 "Aborting scan with missed beacon.\n");
4298 queue_work(priv->workqueue, &priv->abort_scan);
4299 }
4300
4301 queue_work(priv->workqueue, &priv->disassociate);
4302 return;
4303 }
4304
4305 if (priv->status & STATUS_ROAMING) {
4306 /* If we are currently roaming, then just
4307 * print a debug statement... */
4308 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4309 "Missed beacon: %d - roam in progress\n",
4310 missed_count);
4311 return;
4312 }
4313
4314 if (roaming &&
4315 (missed_count > priv->roaming_threshold &&
4316 missed_count <= priv->disassociate_threshold)) {
4317 /* If we are not already roaming, set the ROAM
4318 * bit in the status and kick off a scan.
4319 * This can happen several times before we reach
4320 * disassociate_threshold. */
4321 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4322 "Missed beacon: %d - initiate "
4323 "roaming\n", missed_count);
4324 if (!(priv->status & STATUS_ROAMING)) {
4325 priv->status |= STATUS_ROAMING;
4326 if (!(priv->status & STATUS_SCANNING))
4327 queue_delayed_work(priv->workqueue,
4328 &priv->request_scan, 0);
4329 }
4330 return;
4331 }
4332
4333 if (priv->status & STATUS_SCANNING) {
4334 /* Stop scan to keep fw from getting
4335 * stuck (only if we aren't roaming --
4336 * otherwise we'll never scan more than 2 or 3
4337 * channels..) */
4338 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4339 "Aborting scan with missed beacon.\n");
4340 queue_work(priv->workqueue, &priv->abort_scan);
4341 }
4342
4343 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4344 }
4345
4346 /**
4347 * Handle host notification packet.
4348 * Called from interrupt routine
4349 */
4350 static void ipw_rx_notification(struct ipw_priv *priv,
4351 struct ipw_rx_notification *notif)
4352 {
4353 DECLARE_MAC_BUF(mac);
4354 notif->size = le16_to_cpu(notif->size);
4355
4356 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4357
4358 switch (notif->subtype) {
4359 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4360 struct notif_association *assoc = &notif->u.assoc;
4361
4362 switch (assoc->state) {
4363 case CMAS_ASSOCIATED:{
4364 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4365 IPW_DL_ASSOC,
4366 "associated: '%s' %s"
4367 " \n",
4368 escape_essid(priv->essid,
4369 priv->essid_len),
4370 print_mac(mac, priv->bssid));
4371
4372 switch (priv->ieee->iw_mode) {
4373 case IW_MODE_INFRA:
4374 memcpy(priv->ieee->bssid,
4375 priv->bssid, ETH_ALEN);
4376 break;
4377
4378 case IW_MODE_ADHOC:
4379 memcpy(priv->ieee->bssid,
4380 priv->bssid, ETH_ALEN);
4381
4382 /* clear out the station table */
4383 priv->num_stations = 0;
4384
4385 IPW_DEBUG_ASSOC
4386 ("queueing adhoc check\n");
4387 queue_delayed_work(priv->
4388 workqueue,
4389 &priv->
4390 adhoc_check,
4391 priv->
4392 assoc_request.
4393 beacon_interval);
4394 break;
4395 }
4396
4397 priv->status &= ~STATUS_ASSOCIATING;
4398 priv->status |= STATUS_ASSOCIATED;
4399 queue_work(priv->workqueue,
4400 &priv->system_config);
4401
4402 #ifdef CONFIG_IPW2200_QOS
4403 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4404 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4405 if ((priv->status & STATUS_AUTH) &&
4406 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4407 == IEEE80211_STYPE_ASSOC_RESP)) {
4408 if ((sizeof
4409 (struct
4410 ieee80211_assoc_response)
4411 <= notif->size)
4412 && (notif->size <= 2314)) {
4413 struct
4414 ieee80211_rx_stats
4415 stats = {
4416 .len =
4417 notif->
4418 size - 1,
4419 };
4420
4421 IPW_DEBUG_QOS
4422 ("QoS Associate "
4423 "size %d\n",
4424 notif->size);
4425 ieee80211_rx_mgt(priv->
4426 ieee,
4427 (struct
4428 ieee80211_hdr_4addr
4429 *)
4430 &notif->u.raw, &stats);
4431 }
4432 }
4433 #endif
4434
4435 schedule_work(&priv->link_up);
4436
4437 break;
4438 }
4439
4440 case CMAS_AUTHENTICATED:{
4441 if (priv->
4442 status & (STATUS_ASSOCIATED |
4443 STATUS_AUTH)) {
4444 struct notif_authenticate *auth
4445 = &notif->u.auth;
4446 IPW_DEBUG(IPW_DL_NOTIF |
4447 IPW_DL_STATE |
4448 IPW_DL_ASSOC,
4449 "deauthenticated: '%s' "
4450 "%s"
4451 ": (0x%04X) - %s \n",
4452 escape_essid(priv->
4453 essid,
4454 priv->
4455 essid_len),
4456 print_mac(mac, priv->bssid),
4457 ntohs(auth->status),
4458 ipw_get_status_code
4459 (ntohs
4460 (auth->status)));
4461
4462 priv->status &=
4463 ~(STATUS_ASSOCIATING |
4464 STATUS_AUTH |
4465 STATUS_ASSOCIATED);
4466
4467 schedule_work(&priv->link_down);
4468 break;
4469 }
4470
4471 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4472 IPW_DL_ASSOC,
4473 "authenticated: '%s' %s"
4474 "\n",
4475 escape_essid(priv->essid,
4476 priv->essid_len),
4477 print_mac(mac, priv->bssid));
4478 break;
4479 }
4480
4481 case CMAS_INIT:{
4482 if (priv->status & STATUS_AUTH) {
4483 struct
4484 ieee80211_assoc_response
4485 *resp;
4486 resp =
4487 (struct
4488 ieee80211_assoc_response
4489 *)&notif->u.raw;
4490 IPW_DEBUG(IPW_DL_NOTIF |
4491 IPW_DL_STATE |
4492 IPW_DL_ASSOC,
4493 "association failed (0x%04X): %s\n",
4494 ntohs(resp->status),
4495 ipw_get_status_code
4496 (ntohs
4497 (resp->status)));
4498 }
4499
4500 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4501 IPW_DL_ASSOC,
4502 "disassociated: '%s' %s"
4503 " \n",
4504 escape_essid(priv->essid,
4505 priv->essid_len),
4506 print_mac(mac, priv->bssid));
4507
4508 priv->status &=
4509 ~(STATUS_DISASSOCIATING |
4510 STATUS_ASSOCIATING |
4511 STATUS_ASSOCIATED | STATUS_AUTH);
4512 if (priv->assoc_network
4513 && (priv->assoc_network->
4514 capability &
4515 WLAN_CAPABILITY_IBSS))
4516 ipw_remove_current_network
4517 (priv);
4518
4519 schedule_work(&priv->link_down);
4520
4521 break;
4522 }
4523
4524 case CMAS_RX_ASSOC_RESP:
4525 break;
4526
4527 default:
4528 IPW_ERROR("assoc: unknown (%d)\n",
4529 assoc->state);
4530 break;
4531 }
4532
4533 break;
4534 }
4535
4536 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4537 struct notif_authenticate *auth = &notif->u.auth;
4538 switch (auth->state) {
4539 case CMAS_AUTHENTICATED:
4540 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4541 "authenticated: '%s' %s \n",
4542 escape_essid(priv->essid,
4543 priv->essid_len),
4544 print_mac(mac, priv->bssid));
4545 priv->status |= STATUS_AUTH;
4546 break;
4547
4548 case CMAS_INIT:
4549 if (priv->status & STATUS_AUTH) {
4550 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4551 IPW_DL_ASSOC,
4552 "authentication failed (0x%04X): %s\n",
4553 ntohs(auth->status),
4554 ipw_get_status_code(ntohs
4555 (auth->
4556 status)));
4557 }
4558 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4559 IPW_DL_ASSOC,
4560 "deauthenticated: '%s' %s\n",
4561 escape_essid(priv->essid,
4562 priv->essid_len),
4563 print_mac(mac, priv->bssid));
4564
4565 priv->status &= ~(STATUS_ASSOCIATING |
4566 STATUS_AUTH |
4567 STATUS_ASSOCIATED);
4568
4569 schedule_work(&priv->link_down);
4570 break;
4571
4572 case CMAS_TX_AUTH_SEQ_1:
4573 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4574 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4575 break;
4576 case CMAS_RX_AUTH_SEQ_2:
4577 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4578 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4579 break;
4580 case CMAS_AUTH_SEQ_1_PASS:
4581 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4582 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4583 break;
4584 case CMAS_AUTH_SEQ_1_FAIL:
4585 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4586 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4587 break;
4588 case CMAS_TX_AUTH_SEQ_3:
4589 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4590 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4591 break;
4592 case CMAS_RX_AUTH_SEQ_4:
4593 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4594 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4595 break;
4596 case CMAS_AUTH_SEQ_2_PASS:
4597 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4598 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4599 break;
4600 case CMAS_AUTH_SEQ_2_FAIL:
4601 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4602 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4603 break;
4604 case CMAS_TX_ASSOC:
4605 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4606 IPW_DL_ASSOC, "TX_ASSOC\n");
4607 break;
4608 case CMAS_RX_ASSOC_RESP:
4609 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4610 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4611
4612 break;
4613 case CMAS_ASSOCIATED:
4614 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4615 IPW_DL_ASSOC, "ASSOCIATED\n");
4616 break;
4617 default:
4618 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4619 auth->state);
4620 break;
4621 }
4622 break;
4623 }
4624
4625 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4626 struct notif_channel_result *x =
4627 &notif->u.channel_result;
4628
4629 if (notif->size == sizeof(*x)) {
4630 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4631 x->channel_num);
4632 } else {
4633 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4634 "(should be %zd)\n",
4635 notif->size, sizeof(*x));
4636 }
4637 break;
4638 }
4639
4640 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4641 struct notif_scan_complete *x = &notif->u.scan_complete;
4642 if (notif->size == sizeof(*x)) {
4643 IPW_DEBUG_SCAN
4644 ("Scan completed: type %d, %d channels, "
4645 "%d status\n", x->scan_type,
4646 x->num_channels, x->status);
4647 } else {
4648 IPW_ERROR("Scan completed of wrong size %d "
4649 "(should be %zd)\n",
4650 notif->size, sizeof(*x));
4651 }
4652
4653 priv->status &=
4654 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4655
4656 wake_up_interruptible(&priv->wait_state);
4657 cancel_delayed_work(&priv->scan_check);
4658
4659 if (priv->status & STATUS_EXIT_PENDING)
4660 break;
4661
4662 priv->ieee->scans++;
4663
4664 #ifdef CONFIG_IPW2200_MONITOR
4665 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4666 priv->status |= STATUS_SCAN_FORCED;
4667 queue_delayed_work(priv->workqueue,
4668 &priv->request_scan, 0);
4669 break;
4670 }
4671 priv->status &= ~STATUS_SCAN_FORCED;
4672 #endif /* CONFIG_IPW2200_MONITOR */
4673
4674 if (!(priv->status & (STATUS_ASSOCIATED |
4675 STATUS_ASSOCIATING |
4676 STATUS_ROAMING |
4677 STATUS_DISASSOCIATING)))
4678 queue_work(priv->workqueue, &priv->associate);
4679 else if (priv->status & STATUS_ROAMING) {
4680 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4681 /* If a scan completed and we are in roam mode, then
4682 * the scan that completed was the one requested as a
4683 * result of entering roam... so, schedule the
4684 * roam work */
4685 queue_work(priv->workqueue,
4686 &priv->roam);
4687 else
4688 /* Don't schedule if we aborted the scan */
4689 priv->status &= ~STATUS_ROAMING;
4690 } else if (priv->status & STATUS_SCAN_PENDING)
4691 queue_delayed_work(priv->workqueue,
4692 &priv->request_scan, 0);
4693 else if (priv->config & CFG_BACKGROUND_SCAN
4694 && priv->status & STATUS_ASSOCIATED)
4695 queue_delayed_work(priv->workqueue,
4696 &priv->request_scan,
4697 round_jiffies(HZ));
4698
4699 /* Send an empty event to user space.
4700 * We don't send the received data on the event because
4701 * it would require us to do complex transcoding, and
4702 * we want to minimise the work done in the irq handler
4703 * Use a request to extract the data.
4704 * Also, we generate this even for any scan, regardless
4705 * on how the scan was initiated. User space can just
4706 * sync on periodic scan to get fresh data...
4707 * Jean II */
4708 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
4709 union iwreq_data wrqu;
4710
4711 wrqu.data.length = 0;
4712 wrqu.data.flags = 0;
4713 wireless_send_event(priv->net_dev, SIOCGIWSCAN,
4714 &wrqu, NULL);
4715 }
4716 break;
4717 }
4718
4719 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4720 struct notif_frag_length *x = &notif->u.frag_len;
4721
4722 if (notif->size == sizeof(*x))
4723 IPW_ERROR("Frag length: %d\n",
4724 le16_to_cpu(x->frag_length));
4725 else
4726 IPW_ERROR("Frag length of wrong size %d "
4727 "(should be %zd)\n",
4728 notif->size, sizeof(*x));
4729 break;
4730 }
4731
4732 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4733 struct notif_link_deterioration *x =
4734 &notif->u.link_deterioration;
4735
4736 if (notif->size == sizeof(*x)) {
4737 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4738 "link deterioration: type %d, cnt %d\n",
4739 x->silence_notification_type,
4740 x->silence_count);
4741 memcpy(&priv->last_link_deterioration, x,
4742 sizeof(*x));
4743 } else {
4744 IPW_ERROR("Link Deterioration of wrong size %d "
4745 "(should be %zd)\n",
4746 notif->size, sizeof(*x));
4747 }
4748 break;
4749 }
4750
4751 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4752 IPW_ERROR("Dino config\n");
4753 if (priv->hcmd
4754 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4755 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4756
4757 break;
4758 }
4759
4760 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4761 struct notif_beacon_state *x = &notif->u.beacon_state;
4762 if (notif->size != sizeof(*x)) {
4763 IPW_ERROR
4764 ("Beacon state of wrong size %d (should "
4765 "be %zd)\n", notif->size, sizeof(*x));
4766 break;
4767 }
4768
4769 if (le32_to_cpu(x->state) ==
4770 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4771 ipw_handle_missed_beacon(priv,
4772 le32_to_cpu(x->
4773 number));
4774
4775 break;
4776 }
4777
4778 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4779 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4780 if (notif->size == sizeof(*x)) {
4781 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4782 "0x%02x station %d\n",
4783 x->key_state, x->security_type,
4784 x->station_index);
4785 break;
4786 }
4787
4788 IPW_ERROR
4789 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4790 notif->size, sizeof(*x));
4791 break;
4792 }
4793
4794 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4795 struct notif_calibration *x = &notif->u.calibration;
4796
4797 if (notif->size == sizeof(*x)) {
4798 memcpy(&priv->calib, x, sizeof(*x));
4799 IPW_DEBUG_INFO("TODO: Calibration\n");
4800 break;
4801 }
4802
4803 IPW_ERROR
4804 ("Calibration of wrong size %d (should be %zd)\n",
4805 notif->size, sizeof(*x));
4806 break;
4807 }
4808
4809 case HOST_NOTIFICATION_NOISE_STATS:{
4810 if (notif->size == sizeof(u32)) {
4811 priv->exp_avg_noise =
4812 exponential_average(priv->exp_avg_noise,
4813 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4814 DEPTH_NOISE);
4815 break;
4816 }
4817
4818 IPW_ERROR
4819 ("Noise stat is wrong size %d (should be %zd)\n",
4820 notif->size, sizeof(u32));
4821 break;
4822 }
4823
4824 default:
4825 IPW_DEBUG_NOTIF("Unknown notification: "
4826 "subtype=%d,flags=0x%2x,size=%d\n",
4827 notif->subtype, notif->flags, notif->size);
4828 }
4829 }
4830
4831 /**
4832 * Destroys all DMA structures and initialise them again
4833 *
4834 * @param priv
4835 * @return error code
4836 */
4837 static int ipw_queue_reset(struct ipw_priv *priv)
4838 {
4839 int rc = 0;
4840 /** @todo customize queue sizes */
4841 int nTx = 64, nTxCmd = 8;
4842 ipw_tx_queue_free(priv);
4843 /* Tx CMD queue */
4844 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4845 IPW_TX_CMD_QUEUE_READ_INDEX,
4846 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4847 IPW_TX_CMD_QUEUE_BD_BASE,
4848 IPW_TX_CMD_QUEUE_BD_SIZE);
4849 if (rc) {
4850 IPW_ERROR("Tx Cmd queue init failed\n");
4851 goto error;
4852 }
4853 /* Tx queue(s) */
4854 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4855 IPW_TX_QUEUE_0_READ_INDEX,
4856 IPW_TX_QUEUE_0_WRITE_INDEX,
4857 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4858 if (rc) {
4859 IPW_ERROR("Tx 0 queue init failed\n");
4860 goto error;
4861 }
4862 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4863 IPW_TX_QUEUE_1_READ_INDEX,
4864 IPW_TX_QUEUE_1_WRITE_INDEX,
4865 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4866 if (rc) {
4867 IPW_ERROR("Tx 1 queue init failed\n");
4868 goto error;
4869 }
4870 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4871 IPW_TX_QUEUE_2_READ_INDEX,
4872 IPW_TX_QUEUE_2_WRITE_INDEX,
4873 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4874 if (rc) {
4875 IPW_ERROR("Tx 2 queue init failed\n");
4876 goto error;
4877 }
4878 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4879 IPW_TX_QUEUE_3_READ_INDEX,
4880 IPW_TX_QUEUE_3_WRITE_INDEX,
4881 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4882 if (rc) {
4883 IPW_ERROR("Tx 3 queue init failed\n");
4884 goto error;
4885 }
4886 /* statistics */
4887 priv->rx_bufs_min = 0;
4888 priv->rx_pend_max = 0;
4889 return rc;
4890
4891 error:
4892 ipw_tx_queue_free(priv);
4893 return rc;
4894 }
4895
4896 /**
4897 * Reclaim Tx queue entries no more used by NIC.
4898 *
4899 * When FW adwances 'R' index, all entries between old and
4900 * new 'R' index need to be reclaimed. As result, some free space
4901 * forms. If there is enough free space (> low mark), wake Tx queue.
4902 *
4903 * @note Need to protect against garbage in 'R' index
4904 * @param priv
4905 * @param txq
4906 * @param qindex
4907 * @return Number of used entries remains in the queue
4908 */
4909 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4910 struct clx2_tx_queue *txq, int qindex)
4911 {
4912 u32 hw_tail;
4913 int used;
4914 struct clx2_queue *q = &txq->q;
4915
4916 hw_tail = ipw_read32(priv, q->reg_r);
4917 if (hw_tail >= q->n_bd) {
4918 IPW_ERROR
4919 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4920 hw_tail, q->n_bd);
4921 goto done;
4922 }
4923 for (; q->last_used != hw_tail;
4924 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4925 ipw_queue_tx_free_tfd(priv, txq);
4926 priv->tx_packets++;
4927 }
4928 done:
4929 if ((ipw_queue_space(q) > q->low_mark) &&
4930 (qindex >= 0) &&
4931 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4932 netif_wake_queue(priv->net_dev);
4933 used = q->first_empty - q->last_used;
4934 if (used < 0)
4935 used += q->n_bd;
4936
4937 return used;
4938 }
4939
4940 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4941 int len, int sync)
4942 {
4943 struct clx2_tx_queue *txq = &priv->txq_cmd;
4944 struct clx2_queue *q = &txq->q;
4945 struct tfd_frame *tfd;
4946
4947 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4948 IPW_ERROR("No space for Tx\n");
4949 return -EBUSY;
4950 }
4951
4952 tfd = &txq->bd[q->first_empty];
4953 txq->txb[q->first_empty] = NULL;
4954
4955 memset(tfd, 0, sizeof(*tfd));
4956 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4957 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4958 priv->hcmd_seq++;
4959 tfd->u.cmd.index = hcmd;
4960 tfd->u.cmd.length = len;
4961 memcpy(tfd->u.cmd.payload, buf, len);
4962 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4963 ipw_write32(priv, q->reg_w, q->first_empty);
4964 _ipw_read32(priv, 0x90);
4965
4966 return 0;
4967 }
4968
4969 /*
4970 * Rx theory of operation
4971 *
4972 * The host allocates 32 DMA target addresses and passes the host address
4973 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4974 * 0 to 31
4975 *
4976 * Rx Queue Indexes
4977 * The host/firmware share two index registers for managing the Rx buffers.
4978 *
4979 * The READ index maps to the first position that the firmware may be writing
4980 * to -- the driver can read up to (but not including) this position and get
4981 * good data.
4982 * The READ index is managed by the firmware once the card is enabled.
4983 *
4984 * The WRITE index maps to the last position the driver has read from -- the
4985 * position preceding WRITE is the last slot the firmware can place a packet.
4986 *
4987 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4988 * WRITE = READ.
4989 *
4990 * During initialization the host sets up the READ queue position to the first
4991 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4992 *
4993 * When the firmware places a packet in a buffer it will advance the READ index
4994 * and fire the RX interrupt. The driver can then query the READ index and
4995 * process as many packets as possible, moving the WRITE index forward as it
4996 * resets the Rx queue buffers with new memory.
4997 *
4998 * The management in the driver is as follows:
4999 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5000 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5001 * to replensish the ipw->rxq->rx_free.
5002 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5003 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5004 * 'processed' and 'read' driver indexes as well)
5005 * + A received packet is processed and handed to the kernel network stack,
5006 * detached from the ipw->rxq. The driver 'processed' index is updated.
5007 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5008 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5009 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5010 * were enough free buffers and RX_STALLED is set it is cleared.
5011 *
5012 *
5013 * Driver sequence:
5014 *
5015 * ipw_rx_queue_alloc() Allocates rx_free
5016 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5017 * ipw_rx_queue_restock
5018 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5019 * queue, updates firmware pointers, and updates
5020 * the WRITE index. If insufficient rx_free buffers
5021 * are available, schedules ipw_rx_queue_replenish
5022 *
5023 * -- enable interrupts --
5024 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5025 * READ INDEX, detaching the SKB from the pool.
5026 * Moves the packet buffer from queue to rx_used.
5027 * Calls ipw_rx_queue_restock to refill any empty
5028 * slots.
5029 * ...
5030 *
5031 */
5032
5033 /*
5034 * If there are slots in the RX queue that need to be restocked,
5035 * and we have free pre-allocated buffers, fill the ranks as much
5036 * as we can pulling from rx_free.
5037 *
5038 * This moves the 'write' index forward to catch up with 'processed', and
5039 * also updates the memory address in the firmware to reference the new
5040 * target buffer.
5041 */
5042 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5043 {
5044 struct ipw_rx_queue *rxq = priv->rxq;
5045 struct list_head *element;
5046 struct ipw_rx_mem_buffer *rxb;
5047 unsigned long flags;
5048 int write;
5049
5050 spin_lock_irqsave(&rxq->lock, flags);
5051 write = rxq->write;
5052 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
5053 element = rxq->rx_free.next;
5054 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5055 list_del(element);
5056
5057 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5058 rxb->dma_addr);
5059 rxq->queue[rxq->write] = rxb;
5060 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5061 rxq->free_count--;
5062 }
5063 spin_unlock_irqrestore(&rxq->lock, flags);
5064
5065 /* If the pre-allocated buffer pool is dropping low, schedule to
5066 * refill it */
5067 if (rxq->free_count <= RX_LOW_WATERMARK)
5068 queue_work(priv->workqueue, &priv->rx_replenish);
5069
5070 /* If we've added more space for the firmware to place data, tell it */
5071 if (write != rxq->write)
5072 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5073 }
5074
5075 /*
5076 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5077 * Also restock the Rx queue via ipw_rx_queue_restock.
5078 *
5079 * This is called as a scheduled work item (except for during intialization)
5080 */
5081 static void ipw_rx_queue_replenish(void *data)
5082 {
5083 struct ipw_priv *priv = data;
5084 struct ipw_rx_queue *rxq = priv->rxq;
5085 struct list_head *element;
5086 struct ipw_rx_mem_buffer *rxb;
5087 unsigned long flags;
5088
5089 spin_lock_irqsave(&rxq->lock, flags);
5090 while (!list_empty(&rxq->rx_used)) {
5091 element = rxq->rx_used.next;
5092 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5093 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5094 if (!rxb->skb) {
5095 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5096 priv->net_dev->name);
5097 /* We don't reschedule replenish work here -- we will
5098 * call the restock method and if it still needs
5099 * more buffers it will schedule replenish */
5100 break;
5101 }
5102 list_del(element);
5103
5104 rxb->dma_addr =
5105 pci_map_single(priv->pci_dev, rxb->skb->data,
5106 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5107
5108 list_add_tail(&rxb->list, &rxq->rx_free);
5109 rxq->free_count++;
5110 }
5111 spin_unlock_irqrestore(&rxq->lock, flags);
5112
5113 ipw_rx_queue_restock(priv);
5114 }
5115
5116 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5117 {
5118 struct ipw_priv *priv =
5119 container_of(work, struct ipw_priv, rx_replenish);
5120 mutex_lock(&priv->mutex);
5121 ipw_rx_queue_replenish(priv);
5122 mutex_unlock(&priv->mutex);
5123 }
5124
5125 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5126 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5127 * This free routine walks the list of POOL entries and if SKB is set to
5128 * non NULL it is unmapped and freed
5129 */
5130 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5131 {
5132 int i;
5133
5134 if (!rxq)
5135 return;
5136
5137 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5138 if (rxq->pool[i].skb != NULL) {
5139 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5140 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5141 dev_kfree_skb(rxq->pool[i].skb);
5142 }
5143 }
5144
5145 kfree(rxq);
5146 }
5147
5148 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5149 {
5150 struct ipw_rx_queue *rxq;
5151 int i;
5152
5153 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5154 if (unlikely(!rxq)) {
5155 IPW_ERROR("memory allocation failed\n");
5156 return NULL;
5157 }
5158 spin_lock_init(&rxq->lock);
5159 INIT_LIST_HEAD(&rxq->rx_free);
5160 INIT_LIST_HEAD(&rxq->rx_used);
5161
5162 /* Fill the rx_used queue with _all_ of the Rx buffers */
5163 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5164 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5165
5166 /* Set us so that we have processed and used all buffers, but have
5167 * not restocked the Rx queue with fresh buffers */
5168 rxq->read = rxq->write = 0;
5169 rxq->processed = RX_QUEUE_SIZE - 1;
5170 rxq->free_count = 0;
5171
5172 return rxq;
5173 }
5174
5175 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5176 {
5177 rate &= ~IEEE80211_BASIC_RATE_MASK;
5178 if (ieee_mode == IEEE_A) {
5179 switch (rate) {
5180 case IEEE80211_OFDM_RATE_6MB:
5181 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5182 1 : 0;
5183 case IEEE80211_OFDM_RATE_9MB:
5184 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5185 1 : 0;
5186 case IEEE80211_OFDM_RATE_12MB:
5187 return priv->
5188 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5189 case IEEE80211_OFDM_RATE_18MB:
5190 return priv->
5191 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5192 case IEEE80211_OFDM_RATE_24MB:
5193 return priv->
5194 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5195 case IEEE80211_OFDM_RATE_36MB:
5196 return priv->
5197 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5198 case IEEE80211_OFDM_RATE_48MB:
5199 return priv->
5200 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5201 case IEEE80211_OFDM_RATE_54MB:
5202 return priv->
5203 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5204 default:
5205 return 0;
5206 }
5207 }
5208
5209 /* B and G mixed */
5210 switch (rate) {
5211 case IEEE80211_CCK_RATE_1MB:
5212 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5213 case IEEE80211_CCK_RATE_2MB:
5214 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5215 case IEEE80211_CCK_RATE_5MB:
5216 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5217 case IEEE80211_CCK_RATE_11MB:
5218 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5219 }
5220
5221 /* If we are limited to B modulations, bail at this point */
5222 if (ieee_mode == IEEE_B)
5223 return 0;
5224
5225 /* G */
5226 switch (rate) {
5227 case IEEE80211_OFDM_RATE_6MB:
5228 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5229 case IEEE80211_OFDM_RATE_9MB:
5230 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5231 case IEEE80211_OFDM_RATE_12MB:
5232 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5233 case IEEE80211_OFDM_RATE_18MB:
5234 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5235 case IEEE80211_OFDM_RATE_24MB:
5236 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5237 case IEEE80211_OFDM_RATE_36MB:
5238 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5239 case IEEE80211_OFDM_RATE_48MB:
5240 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5241 case IEEE80211_OFDM_RATE_54MB:
5242 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5243 }
5244
5245 return 0;
5246 }
5247
5248 static int ipw_compatible_rates(struct ipw_priv *priv,
5249 const struct ieee80211_network *network,
5250 struct ipw_supported_rates *rates)
5251 {
5252 int num_rates, i;
5253
5254 memset(rates, 0, sizeof(*rates));
5255 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5256 rates->num_rates = 0;
5257 for (i = 0; i < num_rates; i++) {
5258 if (!ipw_is_rate_in_mask(priv, network->mode,
5259 network->rates[i])) {
5260
5261 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5262 IPW_DEBUG_SCAN("Adding masked mandatory "
5263 "rate %02X\n",
5264 network->rates[i]);
5265 rates->supported_rates[rates->num_rates++] =
5266 network->rates[i];
5267 continue;
5268 }
5269
5270 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5271 network->rates[i], priv->rates_mask);
5272 continue;
5273 }
5274
5275 rates->supported_rates[rates->num_rates++] = network->rates[i];
5276 }
5277
5278 num_rates = min(network->rates_ex_len,
5279 (u8) (IPW_MAX_RATES - num_rates));
5280 for (i = 0; i < num_rates; i++) {
5281 if (!ipw_is_rate_in_mask(priv, network->mode,
5282 network->rates_ex[i])) {
5283 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5284 IPW_DEBUG_SCAN("Adding masked mandatory "
5285 "rate %02X\n",
5286 network->rates_ex[i]);
5287 rates->supported_rates[rates->num_rates++] =
5288 network->rates[i];
5289 continue;
5290 }
5291
5292 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5293 network->rates_ex[i], priv->rates_mask);
5294 continue;
5295 }
5296
5297 rates->supported_rates[rates->num_rates++] =
5298 network->rates_ex[i];
5299 }
5300
5301 return 1;
5302 }
5303
5304 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5305 const struct ipw_supported_rates *src)
5306 {
5307 u8 i;
5308 for (i = 0; i < src->num_rates; i++)
5309 dest->supported_rates[i] = src->supported_rates[i];
5310 dest->num_rates = src->num_rates;
5311 }
5312
5313 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5314 * mask should ever be used -- right now all callers to add the scan rates are
5315 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5316 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5317 u8 modulation, u32 rate_mask)
5318 {
5319 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5320 IEEE80211_BASIC_RATE_MASK : 0;
5321
5322 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5323 rates->supported_rates[rates->num_rates++] =
5324 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5325
5326 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5327 rates->supported_rates[rates->num_rates++] =
5328 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5329
5330 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5331 rates->supported_rates[rates->num_rates++] = basic_mask |
5332 IEEE80211_CCK_RATE_5MB;
5333
5334 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5335 rates->supported_rates[rates->num_rates++] = basic_mask |
5336 IEEE80211_CCK_RATE_11MB;
5337 }
5338
5339 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5340 u8 modulation, u32 rate_mask)
5341 {
5342 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5343 IEEE80211_BASIC_RATE_MASK : 0;
5344
5345 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5346 rates->supported_rates[rates->num_rates++] = basic_mask |
5347 IEEE80211_OFDM_RATE_6MB;
5348
5349 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5350 rates->supported_rates[rates->num_rates++] =
5351 IEEE80211_OFDM_RATE_9MB;
5352
5353 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5354 rates->supported_rates[rates->num_rates++] = basic_mask |
5355 IEEE80211_OFDM_RATE_12MB;
5356
5357 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5358 rates->supported_rates[rates->num_rates++] =
5359 IEEE80211_OFDM_RATE_18MB;
5360
5361 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5362 rates->supported_rates[rates->num_rates++] = basic_mask |
5363 IEEE80211_OFDM_RATE_24MB;
5364
5365 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5366 rates->supported_rates[rates->num_rates++] =
5367 IEEE80211_OFDM_RATE_36MB;
5368
5369 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5370 rates->supported_rates[rates->num_rates++] =
5371 IEEE80211_OFDM_RATE_48MB;
5372
5373 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5374 rates->supported_rates[rates->num_rates++] =
5375 IEEE80211_OFDM_RATE_54MB;
5376 }
5377
5378 struct ipw_network_match {
5379 struct ieee80211_network *network;
5380 struct ipw_supported_rates rates;
5381 };
5382
5383 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5384 struct ipw_network_match *match,
5385 struct ieee80211_network *network,
5386 int roaming)
5387 {
5388 struct ipw_supported_rates rates;
5389 DECLARE_MAC_BUF(mac);
5390 DECLARE_MAC_BUF(mac2);
5391
5392 /* Verify that this network's capability is compatible with the
5393 * current mode (AdHoc or Infrastructure) */
5394 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5395 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5396 IPW_DEBUG_MERGE("Network '%s (%s)' excluded due to "
5397 "capability mismatch.\n",
5398 escape_essid(network->ssid, network->ssid_len),
5399 print_mac(mac, network->bssid));
5400 return 0;
5401 }
5402
5403 /* If we do not have an ESSID for this AP, we can not associate with
5404 * it */
5405 if (network->flags & NETWORK_EMPTY_ESSID) {
5406 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5407 "because of hidden ESSID.\n",
5408 escape_essid(network->ssid, network->ssid_len),
5409 print_mac(mac, network->bssid));
5410 return 0;
5411 }
5412
5413 if (unlikely(roaming)) {
5414 /* If we are roaming, then ensure check if this is a valid
5415 * network to try and roam to */
5416 if ((network->ssid_len != match->network->ssid_len) ||
5417 memcmp(network->ssid, match->network->ssid,
5418 network->ssid_len)) {
5419 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5420 "because of non-network ESSID.\n",
5421 escape_essid(network->ssid,
5422 network->ssid_len),
5423 print_mac(mac, network->bssid));
5424 return 0;
5425 }
5426 } else {
5427 /* If an ESSID has been configured then compare the broadcast
5428 * ESSID to ours */
5429 if ((priv->config & CFG_STATIC_ESSID) &&
5430 ((network->ssid_len != priv->essid_len) ||
5431 memcmp(network->ssid, priv->essid,
5432 min(network->ssid_len, priv->essid_len)))) {
5433 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5434
5435 strncpy(escaped,
5436 escape_essid(network->ssid, network->ssid_len),
5437 sizeof(escaped));
5438 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5439 "because of ESSID mismatch: '%s'.\n",
5440 escaped, print_mac(mac, network->bssid),
5441 escape_essid(priv->essid,
5442 priv->essid_len));
5443 return 0;
5444 }
5445 }
5446
5447 /* If the old network rate is better than this one, don't bother
5448 * testing everything else. */
5449
5450 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5451 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5452 "current network.\n",
5453 escape_essid(match->network->ssid,
5454 match->network->ssid_len));
5455 return 0;
5456 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5457 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5458 "current network.\n",
5459 escape_essid(match->network->ssid,
5460 match->network->ssid_len));
5461 return 0;
5462 }
5463
5464 /* Now go through and see if the requested network is valid... */
5465 if (priv->ieee->scan_age != 0 &&
5466 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5467 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5468 "because of age: %ums.\n",
5469 escape_essid(network->ssid, network->ssid_len),
5470 print_mac(mac, network->bssid),
5471 jiffies_to_msecs(jiffies -
5472 network->last_scanned));
5473 return 0;
5474 }
5475
5476 if ((priv->config & CFG_STATIC_CHANNEL) &&
5477 (network->channel != priv->channel)) {
5478 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5479 "because of channel mismatch: %d != %d.\n",
5480 escape_essid(network->ssid, network->ssid_len),
5481 print_mac(mac, network->bssid),
5482 network->channel, priv->channel);
5483 return 0;
5484 }
5485
5486 /* Verify privacy compatability */
5487 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5488 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5489 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5490 "because of privacy mismatch: %s != %s.\n",
5491 escape_essid(network->ssid, network->ssid_len),
5492 print_mac(mac, network->bssid),
5493 priv->
5494 capability & CAP_PRIVACY_ON ? "on" : "off",
5495 network->
5496 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5497 "off");
5498 return 0;
5499 }
5500
5501 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5502 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5503 "because of the same BSSID match: %s"
5504 ".\n", escape_essid(network->ssid,
5505 network->ssid_len),
5506 print_mac(mac, network->bssid),
5507 print_mac(mac2, priv->bssid));
5508 return 0;
5509 }
5510
5511 /* Filter out any incompatible freq / mode combinations */
5512 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5513 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5514 "because of invalid frequency/mode "
5515 "combination.\n",
5516 escape_essid(network->ssid, network->ssid_len),
5517 print_mac(mac, network->bssid));
5518 return 0;
5519 }
5520
5521 /* Ensure that the rates supported by the driver are compatible with
5522 * this AP, including verification of basic rates (mandatory) */
5523 if (!ipw_compatible_rates(priv, network, &rates)) {
5524 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5525 "because configured rate mask excludes "
5526 "AP mandatory rate.\n",
5527 escape_essid(network->ssid, network->ssid_len),
5528 print_mac(mac, network->bssid));
5529 return 0;
5530 }
5531
5532 if (rates.num_rates == 0) {
5533 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5534 "because of no compatible rates.\n",
5535 escape_essid(network->ssid, network->ssid_len),
5536 print_mac(mac, network->bssid));
5537 return 0;
5538 }
5539
5540 /* TODO: Perform any further minimal comparititive tests. We do not
5541 * want to put too much policy logic here; intelligent scan selection
5542 * should occur within a generic IEEE 802.11 user space tool. */
5543
5544 /* Set up 'new' AP to this network */
5545 ipw_copy_rates(&match->rates, &rates);
5546 match->network = network;
5547 IPW_DEBUG_MERGE("Network '%s (%s)' is a viable match.\n",
5548 escape_essid(network->ssid, network->ssid_len),
5549 print_mac(mac, network->bssid));
5550
5551 return 1;
5552 }
5553
5554 static void ipw_merge_adhoc_network(struct work_struct *work)
5555 {
5556 struct ipw_priv *priv =
5557 container_of(work, struct ipw_priv, merge_networks);
5558 struct ieee80211_network *network = NULL;
5559 struct ipw_network_match match = {
5560 .network = priv->assoc_network
5561 };
5562
5563 if ((priv->status & STATUS_ASSOCIATED) &&
5564 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5565 /* First pass through ROAM process -- look for a better
5566 * network */
5567 unsigned long flags;
5568
5569 spin_lock_irqsave(&priv->ieee->lock, flags);
5570 list_for_each_entry(network, &priv->ieee->network_list, list) {
5571 if (network != priv->assoc_network)
5572 ipw_find_adhoc_network(priv, &match, network,
5573 1);
5574 }
5575 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5576
5577 if (match.network == priv->assoc_network) {
5578 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5579 "merge to.\n");
5580 return;
5581 }
5582
5583 mutex_lock(&priv->mutex);
5584 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5585 IPW_DEBUG_MERGE("remove network %s\n",
5586 escape_essid(priv->essid,
5587 priv->essid_len));
5588 ipw_remove_current_network(priv);
5589 }
5590
5591 ipw_disassociate(priv);
5592 priv->assoc_network = match.network;
5593 mutex_unlock(&priv->mutex);
5594 return;
5595 }
5596 }
5597
5598 static int ipw_best_network(struct ipw_priv *priv,
5599 struct ipw_network_match *match,
5600 struct ieee80211_network *network, int roaming)
5601 {
5602 struct ipw_supported_rates rates;
5603 DECLARE_MAC_BUF(mac);
5604
5605 /* Verify that this network's capability is compatible with the
5606 * current mode (AdHoc or Infrastructure) */
5607 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5608 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5609 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5610 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5611 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded due to "
5612 "capability mismatch.\n",
5613 escape_essid(network->ssid, network->ssid_len),
5614 print_mac(mac, network->bssid));
5615 return 0;
5616 }
5617
5618 /* If we do not have an ESSID for this AP, we can not associate with
5619 * it */
5620 if (network->flags & NETWORK_EMPTY_ESSID) {
5621 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5622 "because of hidden ESSID.\n",
5623 escape_essid(network->ssid, network->ssid_len),
5624 print_mac(mac, network->bssid));
5625 return 0;
5626 }
5627
5628 if (unlikely(roaming)) {
5629 /* If we are roaming, then ensure check if this is a valid
5630 * network to try and roam to */
5631 if ((network->ssid_len != match->network->ssid_len) ||
5632 memcmp(network->ssid, match->network->ssid,
5633 network->ssid_len)) {
5634 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5635 "because of non-network ESSID.\n",
5636 escape_essid(network->ssid,
5637 network->ssid_len),
5638 print_mac(mac, network->bssid));
5639 return 0;
5640 }
5641 } else {
5642 /* If an ESSID has been configured then compare the broadcast
5643 * ESSID to ours */
5644 if ((priv->config & CFG_STATIC_ESSID) &&
5645 ((network->ssid_len != priv->essid_len) ||
5646 memcmp(network->ssid, priv->essid,
5647 min(network->ssid_len, priv->essid_len)))) {
5648 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5649 strncpy(escaped,
5650 escape_essid(network->ssid, network->ssid_len),
5651 sizeof(escaped));
5652 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5653 "because of ESSID mismatch: '%s'.\n",
5654 escaped, print_mac(mac, network->bssid),
5655 escape_essid(priv->essid,
5656 priv->essid_len));
5657 return 0;
5658 }
5659 }
5660
5661 /* If the old network rate is better than this one, don't bother
5662 * testing everything else. */
5663 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5664 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5665 strncpy(escaped,
5666 escape_essid(network->ssid, network->ssid_len),
5667 sizeof(escaped));
5668 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded because "
5669 "'%s (%s)' has a stronger signal.\n",
5670 escaped, print_mac(mac, network->bssid),
5671 escape_essid(match->network->ssid,
5672 match->network->ssid_len),
5673 print_mac(mac, match->network->bssid));
5674 return 0;
5675 }
5676
5677 /* If this network has already had an association attempt within the
5678 * last 3 seconds, do not try and associate again... */
5679 if (network->last_associate &&
5680 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5681 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5682 "because of storming (%ums since last "
5683 "assoc attempt).\n",
5684 escape_essid(network->ssid, network->ssid_len),
5685 print_mac(mac, network->bssid),
5686 jiffies_to_msecs(jiffies -
5687 network->last_associate));
5688 return 0;
5689 }
5690
5691 /* Now go through and see if the requested network is valid... */
5692 if (priv->ieee->scan_age != 0 &&
5693 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5694 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5695 "because of age: %ums.\n",
5696 escape_essid(network->ssid, network->ssid_len),
5697 print_mac(mac, network->bssid),
5698 jiffies_to_msecs(jiffies -
5699 network->last_scanned));
5700 return 0;
5701 }
5702
5703 if ((priv->config & CFG_STATIC_CHANNEL) &&
5704 (network->channel != priv->channel)) {
5705 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5706 "because of channel mismatch: %d != %d.\n",
5707 escape_essid(network->ssid, network->ssid_len),
5708 print_mac(mac, network->bssid),
5709 network->channel, priv->channel);
5710 return 0;
5711 }
5712
5713 /* Verify privacy compatability */
5714 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5715 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5716 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5717 "because of privacy mismatch: %s != %s.\n",
5718 escape_essid(network->ssid, network->ssid_len),
5719 print_mac(mac, network->bssid),
5720 priv->capability & CAP_PRIVACY_ON ? "on" :
5721 "off",
5722 network->capability &
5723 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5724 return 0;
5725 }
5726
5727 if ((priv->config & CFG_STATIC_BSSID) &&
5728 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5729 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5730 "because of BSSID mismatch: %s.\n",
5731 escape_essid(network->ssid, network->ssid_len),
5732 print_mac(mac, network->bssid), print_mac(mac, priv->bssid));
5733 return 0;
5734 }
5735
5736 /* Filter out any incompatible freq / mode combinations */
5737 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5738 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5739 "because of invalid frequency/mode "
5740 "combination.\n",
5741 escape_essid(network->ssid, network->ssid_len),
5742 print_mac(mac, network->bssid));
5743 return 0;
5744 }
5745
5746 /* Filter out invalid channel in current GEO */
5747 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5748 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5749 "because of invalid channel in current GEO\n",
5750 escape_essid(network->ssid, network->ssid_len),
5751 print_mac(mac, network->bssid));
5752 return 0;
5753 }
5754
5755 /* Ensure that the rates supported by the driver are compatible with
5756 * this AP, including verification of basic rates (mandatory) */
5757 if (!ipw_compatible_rates(priv, network, &rates)) {
5758 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5759 "because configured rate mask excludes "
5760 "AP mandatory rate.\n",
5761 escape_essid(network->ssid, network->ssid_len),
5762 print_mac(mac, network->bssid));
5763 return 0;
5764 }
5765
5766 if (rates.num_rates == 0) {
5767 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5768 "because of no compatible rates.\n",
5769 escape_essid(network->ssid, network->ssid_len),
5770 print_mac(mac, network->bssid));
5771 return 0;
5772 }
5773
5774 /* TODO: Perform any further minimal comparititive tests. We do not
5775 * want to put too much policy logic here; intelligent scan selection
5776 * should occur within a generic IEEE 802.11 user space tool. */
5777
5778 /* Set up 'new' AP to this network */
5779 ipw_copy_rates(&match->rates, &rates);
5780 match->network = network;
5781
5782 IPW_DEBUG_ASSOC("Network '%s (%s)' is a viable match.\n",
5783 escape_essid(network->ssid, network->ssid_len),
5784 print_mac(mac, network->bssid));
5785
5786 return 1;
5787 }
5788
5789 static void ipw_adhoc_create(struct ipw_priv *priv,
5790 struct ieee80211_network *network)
5791 {
5792 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5793 int i;
5794
5795 /*
5796 * For the purposes of scanning, we can set our wireless mode
5797 * to trigger scans across combinations of bands, but when it
5798 * comes to creating a new ad-hoc network, we have tell the FW
5799 * exactly which band to use.
5800 *
5801 * We also have the possibility of an invalid channel for the
5802 * chossen band. Attempting to create a new ad-hoc network
5803 * with an invalid channel for wireless mode will trigger a
5804 * FW fatal error.
5805 *
5806 */
5807 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5808 case IEEE80211_52GHZ_BAND:
5809 network->mode = IEEE_A;
5810 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5811 BUG_ON(i == -1);
5812 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5813 IPW_WARNING("Overriding invalid channel\n");
5814 priv->channel = geo->a[0].channel;
5815 }
5816 break;
5817
5818 case IEEE80211_24GHZ_BAND:
5819 if (priv->ieee->mode & IEEE_G)
5820 network->mode = IEEE_G;
5821 else
5822 network->mode = IEEE_B;
5823 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5824 BUG_ON(i == -1);
5825 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5826 IPW_WARNING("Overriding invalid channel\n");
5827 priv->channel = geo->bg[0].channel;
5828 }
5829 break;
5830
5831 default:
5832 IPW_WARNING("Overriding invalid channel\n");
5833 if (priv->ieee->mode & IEEE_A) {
5834 network->mode = IEEE_A;
5835 priv->channel = geo->a[0].channel;
5836 } else if (priv->ieee->mode & IEEE_G) {
5837 network->mode = IEEE_G;
5838 priv->channel = geo->bg[0].channel;
5839 } else {
5840 network->mode = IEEE_B;
5841 priv->channel = geo->bg[0].channel;
5842 }
5843 break;
5844 }
5845
5846 network->channel = priv->channel;
5847 priv->config |= CFG_ADHOC_PERSIST;
5848 ipw_create_bssid(priv, network->bssid);
5849 network->ssid_len = priv->essid_len;
5850 memcpy(network->ssid, priv->essid, priv->essid_len);
5851 memset(&network->stats, 0, sizeof(network->stats));
5852 network->capability = WLAN_CAPABILITY_IBSS;
5853 if (!(priv->config & CFG_PREAMBLE_LONG))
5854 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5855 if (priv->capability & CAP_PRIVACY_ON)
5856 network->capability |= WLAN_CAPABILITY_PRIVACY;
5857 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5858 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5859 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5860 memcpy(network->rates_ex,
5861 &priv->rates.supported_rates[network->rates_len],
5862 network->rates_ex_len);
5863 network->last_scanned = 0;
5864 network->flags = 0;
5865 network->last_associate = 0;
5866 network->time_stamp[0] = 0;
5867 network->time_stamp[1] = 0;
5868 network->beacon_interval = 100; /* Default */
5869 network->listen_interval = 10; /* Default */
5870 network->atim_window = 0; /* Default */
5871 network->wpa_ie_len = 0;
5872 network->rsn_ie_len = 0;
5873 }
5874
5875 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5876 {
5877 struct ipw_tgi_tx_key key;
5878
5879 if (!(priv->ieee->sec.flags & (1 << index)))
5880 return;
5881
5882 key.key_id = index;
5883 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5884 key.security_type = type;
5885 key.station_index = 0; /* always 0 for BSS */
5886 key.flags = 0;
5887 /* 0 for new key; previous value of counter (after fatal error) */
5888 key.tx_counter[0] = cpu_to_le32(0);
5889 key.tx_counter[1] = cpu_to_le32(0);
5890
5891 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5892 }
5893
5894 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5895 {
5896 struct ipw_wep_key key;
5897 int i;
5898
5899 key.cmd_id = DINO_CMD_WEP_KEY;
5900 key.seq_num = 0;
5901
5902 /* Note: AES keys cannot be set for multiple times.
5903 * Only set it at the first time. */
5904 for (i = 0; i < 4; i++) {
5905 key.key_index = i | type;
5906 if (!(priv->ieee->sec.flags & (1 << i))) {
5907 key.key_size = 0;
5908 continue;
5909 }
5910
5911 key.key_size = priv->ieee->sec.key_sizes[i];
5912 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5913
5914 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5915 }
5916 }
5917
5918 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5919 {
5920 if (priv->ieee->host_encrypt)
5921 return;
5922
5923 switch (level) {
5924 case SEC_LEVEL_3:
5925 priv->sys_config.disable_unicast_decryption = 0;
5926 priv->ieee->host_decrypt = 0;
5927 break;
5928 case SEC_LEVEL_2:
5929 priv->sys_config.disable_unicast_decryption = 1;
5930 priv->ieee->host_decrypt = 1;
5931 break;
5932 case SEC_LEVEL_1:
5933 priv->sys_config.disable_unicast_decryption = 0;
5934 priv->ieee->host_decrypt = 0;
5935 break;
5936 case SEC_LEVEL_0:
5937 priv->sys_config.disable_unicast_decryption = 1;
5938 break;
5939 default:
5940 break;
5941 }
5942 }
5943
5944 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5945 {
5946 if (priv->ieee->host_encrypt)
5947 return;
5948
5949 switch (level) {
5950 case SEC_LEVEL_3:
5951 priv->sys_config.disable_multicast_decryption = 0;
5952 break;
5953 case SEC_LEVEL_2:
5954 priv->sys_config.disable_multicast_decryption = 1;
5955 break;
5956 case SEC_LEVEL_1:
5957 priv->sys_config.disable_multicast_decryption = 0;
5958 break;
5959 case SEC_LEVEL_0:
5960 priv->sys_config.disable_multicast_decryption = 1;
5961 break;
5962 default:
5963 break;
5964 }
5965 }
5966
5967 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5968 {
5969 switch (priv->ieee->sec.level) {
5970 case SEC_LEVEL_3:
5971 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5972 ipw_send_tgi_tx_key(priv,
5973 DCT_FLAG_EXT_SECURITY_CCM,
5974 priv->ieee->sec.active_key);
5975
5976 if (!priv->ieee->host_mc_decrypt)
5977 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5978 break;
5979 case SEC_LEVEL_2:
5980 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5981 ipw_send_tgi_tx_key(priv,
5982 DCT_FLAG_EXT_SECURITY_TKIP,
5983 priv->ieee->sec.active_key);
5984 break;
5985 case SEC_LEVEL_1:
5986 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5987 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5988 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5989 break;
5990 case SEC_LEVEL_0:
5991 default:
5992 break;
5993 }
5994 }
5995
5996 static void ipw_adhoc_check(void *data)
5997 {
5998 struct ipw_priv *priv = data;
5999
6000 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6001 !(priv->config & CFG_ADHOC_PERSIST)) {
6002 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6003 IPW_DL_STATE | IPW_DL_ASSOC,
6004 "Missed beacon: %d - disassociate\n",
6005 priv->missed_adhoc_beacons);
6006 ipw_remove_current_network(priv);
6007 ipw_disassociate(priv);
6008 return;
6009 }
6010
6011 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6012 priv->assoc_request.beacon_interval);
6013 }
6014
6015 static void ipw_bg_adhoc_check(struct work_struct *work)
6016 {
6017 struct ipw_priv *priv =
6018 container_of(work, struct ipw_priv, adhoc_check.work);
6019 mutex_lock(&priv->mutex);
6020 ipw_adhoc_check(priv);
6021 mutex_unlock(&priv->mutex);
6022 }
6023
6024 static void ipw_debug_config(struct ipw_priv *priv)
6025 {
6026 DECLARE_MAC_BUF(mac);
6027 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6028 "[CFG 0x%08X]\n", priv->config);
6029 if (priv->config & CFG_STATIC_CHANNEL)
6030 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6031 else
6032 IPW_DEBUG_INFO("Channel unlocked.\n");
6033 if (priv->config & CFG_STATIC_ESSID)
6034 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6035 escape_essid(priv->essid, priv->essid_len));
6036 else
6037 IPW_DEBUG_INFO("ESSID unlocked.\n");
6038 if (priv->config & CFG_STATIC_BSSID)
6039 IPW_DEBUG_INFO("BSSID locked to %s\n",
6040 print_mac(mac, priv->bssid));
6041 else
6042 IPW_DEBUG_INFO("BSSID unlocked.\n");
6043 if (priv->capability & CAP_PRIVACY_ON)
6044 IPW_DEBUG_INFO("PRIVACY on\n");
6045 else
6046 IPW_DEBUG_INFO("PRIVACY off\n");
6047 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6048 }
6049
6050 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6051 {
6052 /* TODO: Verify that this works... */
6053 struct ipw_fixed_rate fr = {
6054 .tx_rates = priv->rates_mask
6055 };
6056 u32 reg;
6057 u16 mask = 0;
6058
6059 /* Identify 'current FW band' and match it with the fixed
6060 * Tx rates */
6061
6062 switch (priv->ieee->freq_band) {
6063 case IEEE80211_52GHZ_BAND: /* A only */
6064 /* IEEE_A */
6065 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6066 /* Invalid fixed rate mask */
6067 IPW_DEBUG_WX
6068 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6069 fr.tx_rates = 0;
6070 break;
6071 }
6072
6073 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6074 break;
6075
6076 default: /* 2.4Ghz or Mixed */
6077 /* IEEE_B */
6078 if (mode == IEEE_B) {
6079 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6080 /* Invalid fixed rate mask */
6081 IPW_DEBUG_WX
6082 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6083 fr.tx_rates = 0;
6084 }
6085 break;
6086 }
6087
6088 /* IEEE_G */
6089 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6090 IEEE80211_OFDM_RATES_MASK)) {
6091 /* Invalid fixed rate mask */
6092 IPW_DEBUG_WX
6093 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6094 fr.tx_rates = 0;
6095 break;
6096 }
6097
6098 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6099 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6100 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6101 }
6102
6103 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6104 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6105 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6106 }
6107
6108 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6109 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6110 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6111 }
6112
6113 fr.tx_rates |= mask;
6114 break;
6115 }
6116
6117 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6118 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6119 }
6120
6121 static void ipw_abort_scan(struct ipw_priv *priv)
6122 {
6123 int err;
6124
6125 if (priv->status & STATUS_SCAN_ABORTING) {
6126 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6127 return;
6128 }
6129 priv->status |= STATUS_SCAN_ABORTING;
6130
6131 err = ipw_send_scan_abort(priv);
6132 if (err)
6133 IPW_DEBUG_HC("Request to abort scan failed.\n");
6134 }
6135
6136 static void ipw_add_scan_channels(struct ipw_priv *priv,
6137 struct ipw_scan_request_ext *scan,
6138 int scan_type)
6139 {
6140 int channel_index = 0;
6141 const struct ieee80211_geo *geo;
6142 int i;
6143
6144 geo = ieee80211_get_geo(priv->ieee);
6145
6146 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6147 int start = channel_index;
6148 for (i = 0; i < geo->a_channels; i++) {
6149 if ((priv->status & STATUS_ASSOCIATED) &&
6150 geo->a[i].channel == priv->channel)
6151 continue;
6152 channel_index++;
6153 scan->channels_list[channel_index] = geo->a[i].channel;
6154 ipw_set_scan_type(scan, channel_index,
6155 geo->a[i].
6156 flags & IEEE80211_CH_PASSIVE_ONLY ?
6157 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6158 scan_type);
6159 }
6160
6161 if (start != channel_index) {
6162 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6163 (channel_index - start);
6164 channel_index++;
6165 }
6166 }
6167
6168 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6169 int start = channel_index;
6170 if (priv->config & CFG_SPEED_SCAN) {
6171 int index;
6172 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6173 /* nop out the list */
6174 [0] = 0
6175 };
6176
6177 u8 channel;
6178 while (channel_index < IPW_SCAN_CHANNELS) {
6179 channel =
6180 priv->speed_scan[priv->speed_scan_pos];
6181 if (channel == 0) {
6182 priv->speed_scan_pos = 0;
6183 channel = priv->speed_scan[0];
6184 }
6185 if ((priv->status & STATUS_ASSOCIATED) &&
6186 channel == priv->channel) {
6187 priv->speed_scan_pos++;
6188 continue;
6189 }
6190
6191 /* If this channel has already been
6192 * added in scan, break from loop
6193 * and this will be the first channel
6194 * in the next scan.
6195 */
6196 if (channels[channel - 1] != 0)
6197 break;
6198
6199 channels[channel - 1] = 1;
6200 priv->speed_scan_pos++;
6201 channel_index++;
6202 scan->channels_list[channel_index] = channel;
6203 index =
6204 ieee80211_channel_to_index(priv->ieee, channel);
6205 ipw_set_scan_type(scan, channel_index,
6206 geo->bg[index].
6207 flags &
6208 IEEE80211_CH_PASSIVE_ONLY ?
6209 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6210 : scan_type);
6211 }
6212 } else {
6213 for (i = 0; i < geo->bg_channels; i++) {
6214 if ((priv->status & STATUS_ASSOCIATED) &&
6215 geo->bg[i].channel == priv->channel)
6216 continue;
6217 channel_index++;
6218 scan->channels_list[channel_index] =
6219 geo->bg[i].channel;
6220 ipw_set_scan_type(scan, channel_index,
6221 geo->bg[i].
6222 flags &
6223 IEEE80211_CH_PASSIVE_ONLY ?
6224 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6225 : scan_type);
6226 }
6227 }
6228
6229 if (start != channel_index) {
6230 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6231 (channel_index - start);
6232 }
6233 }
6234 }
6235
6236 static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
6237 {
6238 struct ipw_scan_request_ext scan;
6239 int err = 0, scan_type;
6240
6241 if (!(priv->status & STATUS_INIT) ||
6242 (priv->status & STATUS_EXIT_PENDING))
6243 return 0;
6244
6245 mutex_lock(&priv->mutex);
6246
6247 if (priv->status & STATUS_SCANNING) {
6248 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6249 priv->status |= STATUS_SCAN_PENDING;
6250 goto done;
6251 }
6252
6253 if (!(priv->status & STATUS_SCAN_FORCED) &&
6254 priv->status & STATUS_SCAN_ABORTING) {
6255 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6256 priv->status |= STATUS_SCAN_PENDING;
6257 goto done;
6258 }
6259
6260 if (priv->status & STATUS_RF_KILL_MASK) {
6261 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6262 priv->status |= STATUS_SCAN_PENDING;
6263 goto done;
6264 }
6265
6266 memset(&scan, 0, sizeof(scan));
6267 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6268
6269 if (type == IW_SCAN_TYPE_PASSIVE) {
6270 IPW_DEBUG_WX("use passive scanning\n");
6271 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6272 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6273 cpu_to_le16(120);
6274 ipw_add_scan_channels(priv, &scan, scan_type);
6275 goto send_request;
6276 }
6277
6278 /* Use active scan by default. */
6279 if (priv->config & CFG_SPEED_SCAN)
6280 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6281 cpu_to_le16(30);
6282 else
6283 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6284 cpu_to_le16(20);
6285
6286 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6287 cpu_to_le16(20);
6288
6289 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6290
6291 #ifdef CONFIG_IPW2200_MONITOR
6292 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6293 u8 channel;
6294 u8 band = 0;
6295
6296 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6297 case IEEE80211_52GHZ_BAND:
6298 band = (u8) (IPW_A_MODE << 6) | 1;
6299 channel = priv->channel;
6300 break;
6301
6302 case IEEE80211_24GHZ_BAND:
6303 band = (u8) (IPW_B_MODE << 6) | 1;
6304 channel = priv->channel;
6305 break;
6306
6307 default:
6308 band = (u8) (IPW_B_MODE << 6) | 1;
6309 channel = 9;
6310 break;
6311 }
6312
6313 scan.channels_list[0] = band;
6314 scan.channels_list[1] = channel;
6315 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6316
6317 /* NOTE: The card will sit on this channel for this time
6318 * period. Scan aborts are timing sensitive and frequently
6319 * result in firmware restarts. As such, it is best to
6320 * set a small dwell_time here and just keep re-issuing
6321 * scans. Otherwise fast channel hopping will not actually
6322 * hop channels.
6323 *
6324 * TODO: Move SPEED SCAN support to all modes and bands */
6325 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6326 cpu_to_le16(2000);
6327 } else {
6328 #endif /* CONFIG_IPW2200_MONITOR */
6329 /* If we are roaming, then make this a directed scan for the
6330 * current network. Otherwise, ensure that every other scan
6331 * is a fast channel hop scan */
6332 if ((priv->status & STATUS_ROAMING)
6333 || (!(priv->status & STATUS_ASSOCIATED)
6334 && (priv->config & CFG_STATIC_ESSID)
6335 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6336 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6337 if (err) {
6338 IPW_DEBUG_HC("Attempt to send SSID command "
6339 "failed.\n");
6340 goto done;
6341 }
6342
6343 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6344 } else
6345 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6346
6347 ipw_add_scan_channels(priv, &scan, scan_type);
6348 #ifdef CONFIG_IPW2200_MONITOR
6349 }
6350 #endif
6351
6352 send_request:
6353 err = ipw_send_scan_request_ext(priv, &scan);
6354 if (err) {
6355 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6356 goto done;
6357 }
6358
6359 priv->status |= STATUS_SCANNING;
6360 priv->status &= ~STATUS_SCAN_PENDING;
6361 queue_delayed_work(priv->workqueue, &priv->scan_check,
6362 IPW_SCAN_CHECK_WATCHDOG);
6363 done:
6364 mutex_unlock(&priv->mutex);
6365 return err;
6366 }
6367
6368 static void ipw_request_passive_scan(struct work_struct *work)
6369 {
6370 struct ipw_priv *priv =
6371 container_of(work, struct ipw_priv, request_passive_scan);
6372 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
6373 }
6374
6375 static void ipw_request_scan(struct work_struct *work)
6376 {
6377 struct ipw_priv *priv =
6378 container_of(work, struct ipw_priv, request_scan.work);
6379 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
6380 }
6381
6382 static void ipw_bg_abort_scan(struct work_struct *work)
6383 {
6384 struct ipw_priv *priv =
6385 container_of(work, struct ipw_priv, abort_scan);
6386 mutex_lock(&priv->mutex);
6387 ipw_abort_scan(priv);
6388 mutex_unlock(&priv->mutex);
6389 }
6390
6391 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6392 {
6393 /* This is called when wpa_supplicant loads and closes the driver
6394 * interface. */
6395 priv->ieee->wpa_enabled = value;
6396 return 0;
6397 }
6398
6399 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6400 {
6401 struct ieee80211_device *ieee = priv->ieee;
6402 struct ieee80211_security sec = {
6403 .flags = SEC_AUTH_MODE,
6404 };
6405 int ret = 0;
6406
6407 if (value & IW_AUTH_ALG_SHARED_KEY) {
6408 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6409 ieee->open_wep = 0;
6410 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6411 sec.auth_mode = WLAN_AUTH_OPEN;
6412 ieee->open_wep = 1;
6413 } else if (value & IW_AUTH_ALG_LEAP) {
6414 sec.auth_mode = WLAN_AUTH_LEAP;
6415 ieee->open_wep = 1;
6416 } else
6417 return -EINVAL;
6418
6419 if (ieee->set_security)
6420 ieee->set_security(ieee->dev, &sec);
6421 else
6422 ret = -EOPNOTSUPP;
6423
6424 return ret;
6425 }
6426
6427 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6428 int wpa_ie_len)
6429 {
6430 /* make sure WPA is enabled */
6431 ipw_wpa_enable(priv, 1);
6432 }
6433
6434 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6435 char *capabilities, int length)
6436 {
6437 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6438
6439 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6440 capabilities);
6441 }
6442
6443 /*
6444 * WE-18 support
6445 */
6446
6447 /* SIOCSIWGENIE */
6448 static int ipw_wx_set_genie(struct net_device *dev,
6449 struct iw_request_info *info,
6450 union iwreq_data *wrqu, char *extra)
6451 {
6452 struct ipw_priv *priv = ieee80211_priv(dev);
6453 struct ieee80211_device *ieee = priv->ieee;
6454 u8 *buf;
6455 int err = 0;
6456
6457 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6458 (wrqu->data.length && extra == NULL))
6459 return -EINVAL;
6460
6461 if (wrqu->data.length) {
6462 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6463 if (buf == NULL) {
6464 err = -ENOMEM;
6465 goto out;
6466 }
6467
6468 memcpy(buf, extra, wrqu->data.length);
6469 kfree(ieee->wpa_ie);
6470 ieee->wpa_ie = buf;
6471 ieee->wpa_ie_len = wrqu->data.length;
6472 } else {
6473 kfree(ieee->wpa_ie);
6474 ieee->wpa_ie = NULL;
6475 ieee->wpa_ie_len = 0;
6476 }
6477
6478 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6479 out:
6480 return err;
6481 }
6482
6483 /* SIOCGIWGENIE */
6484 static int ipw_wx_get_genie(struct net_device *dev,
6485 struct iw_request_info *info,
6486 union iwreq_data *wrqu, char *extra)
6487 {
6488 struct ipw_priv *priv = ieee80211_priv(dev);
6489 struct ieee80211_device *ieee = priv->ieee;
6490 int err = 0;
6491
6492 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6493 wrqu->data.length = 0;
6494 goto out;
6495 }
6496
6497 if (wrqu->data.length < ieee->wpa_ie_len) {
6498 err = -E2BIG;
6499 goto out;
6500 }
6501
6502 wrqu->data.length = ieee->wpa_ie_len;
6503 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6504
6505 out:
6506 return err;
6507 }
6508
6509 static int wext_cipher2level(int cipher)
6510 {
6511 switch (cipher) {
6512 case IW_AUTH_CIPHER_NONE:
6513 return SEC_LEVEL_0;
6514 case IW_AUTH_CIPHER_WEP40:
6515 case IW_AUTH_CIPHER_WEP104:
6516 return SEC_LEVEL_1;
6517 case IW_AUTH_CIPHER_TKIP:
6518 return SEC_LEVEL_2;
6519 case IW_AUTH_CIPHER_CCMP:
6520 return SEC_LEVEL_3;
6521 default:
6522 return -1;
6523 }
6524 }
6525
6526 /* SIOCSIWAUTH */
6527 static int ipw_wx_set_auth(struct net_device *dev,
6528 struct iw_request_info *info,
6529 union iwreq_data *wrqu, char *extra)
6530 {
6531 struct ipw_priv *priv = ieee80211_priv(dev);
6532 struct ieee80211_device *ieee = priv->ieee;
6533 struct iw_param *param = &wrqu->param;
6534 struct ieee80211_crypt_data *crypt;
6535 unsigned long flags;
6536 int ret = 0;
6537
6538 switch (param->flags & IW_AUTH_INDEX) {
6539 case IW_AUTH_WPA_VERSION:
6540 break;
6541 case IW_AUTH_CIPHER_PAIRWISE:
6542 ipw_set_hw_decrypt_unicast(priv,
6543 wext_cipher2level(param->value));
6544 break;
6545 case IW_AUTH_CIPHER_GROUP:
6546 ipw_set_hw_decrypt_multicast(priv,
6547 wext_cipher2level(param->value));
6548 break;
6549 case IW_AUTH_KEY_MGMT:
6550 /*
6551 * ipw2200 does not use these parameters
6552 */
6553 break;
6554
6555 case IW_AUTH_TKIP_COUNTERMEASURES:
6556 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6557 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6558 break;
6559
6560 flags = crypt->ops->get_flags(crypt->priv);
6561
6562 if (param->value)
6563 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6564 else
6565 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6566
6567 crypt->ops->set_flags(flags, crypt->priv);
6568
6569 break;
6570
6571 case IW_AUTH_DROP_UNENCRYPTED:{
6572 /* HACK:
6573 *
6574 * wpa_supplicant calls set_wpa_enabled when the driver
6575 * is loaded and unloaded, regardless of if WPA is being
6576 * used. No other calls are made which can be used to
6577 * determine if encryption will be used or not prior to
6578 * association being expected. If encryption is not being
6579 * used, drop_unencrypted is set to false, else true -- we
6580 * can use this to determine if the CAP_PRIVACY_ON bit should
6581 * be set.
6582 */
6583 struct ieee80211_security sec = {
6584 .flags = SEC_ENABLED,
6585 .enabled = param->value,
6586 };
6587 priv->ieee->drop_unencrypted = param->value;
6588 /* We only change SEC_LEVEL for open mode. Others
6589 * are set by ipw_wpa_set_encryption.
6590 */
6591 if (!param->value) {
6592 sec.flags |= SEC_LEVEL;
6593 sec.level = SEC_LEVEL_0;
6594 } else {
6595 sec.flags |= SEC_LEVEL;
6596 sec.level = SEC_LEVEL_1;
6597 }
6598 if (priv->ieee->set_security)
6599 priv->ieee->set_security(priv->ieee->dev, &sec);
6600 break;
6601 }
6602
6603 case IW_AUTH_80211_AUTH_ALG:
6604 ret = ipw_wpa_set_auth_algs(priv, param->value);
6605 break;
6606
6607 case IW_AUTH_WPA_ENABLED:
6608 ret = ipw_wpa_enable(priv, param->value);
6609 ipw_disassociate(priv);
6610 break;
6611
6612 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6613 ieee->ieee802_1x = param->value;
6614 break;
6615
6616 case IW_AUTH_PRIVACY_INVOKED:
6617 ieee->privacy_invoked = param->value;
6618 break;
6619
6620 default:
6621 return -EOPNOTSUPP;
6622 }
6623 return ret;
6624 }
6625
6626 /* SIOCGIWAUTH */
6627 static int ipw_wx_get_auth(struct net_device *dev,
6628 struct iw_request_info *info,
6629 union iwreq_data *wrqu, char *extra)
6630 {
6631 struct ipw_priv *priv = ieee80211_priv(dev);
6632 struct ieee80211_device *ieee = priv->ieee;
6633 struct ieee80211_crypt_data *crypt;
6634 struct iw_param *param = &wrqu->param;
6635 int ret = 0;
6636
6637 switch (param->flags & IW_AUTH_INDEX) {
6638 case IW_AUTH_WPA_VERSION:
6639 case IW_AUTH_CIPHER_PAIRWISE:
6640 case IW_AUTH_CIPHER_GROUP:
6641 case IW_AUTH_KEY_MGMT:
6642 /*
6643 * wpa_supplicant will control these internally
6644 */
6645 ret = -EOPNOTSUPP;
6646 break;
6647
6648 case IW_AUTH_TKIP_COUNTERMEASURES:
6649 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6650 if (!crypt || !crypt->ops->get_flags)
6651 break;
6652
6653 param->value = (crypt->ops->get_flags(crypt->priv) &
6654 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6655
6656 break;
6657
6658 case IW_AUTH_DROP_UNENCRYPTED:
6659 param->value = ieee->drop_unencrypted;
6660 break;
6661
6662 case IW_AUTH_80211_AUTH_ALG:
6663 param->value = ieee->sec.auth_mode;
6664 break;
6665
6666 case IW_AUTH_WPA_ENABLED:
6667 param->value = ieee->wpa_enabled;
6668 break;
6669
6670 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6671 param->value = ieee->ieee802_1x;
6672 break;
6673
6674 case IW_AUTH_ROAMING_CONTROL:
6675 case IW_AUTH_PRIVACY_INVOKED:
6676 param->value = ieee->privacy_invoked;
6677 break;
6678
6679 default:
6680 return -EOPNOTSUPP;
6681 }
6682 return 0;
6683 }
6684
6685 /* SIOCSIWENCODEEXT */
6686 static int ipw_wx_set_encodeext(struct net_device *dev,
6687 struct iw_request_info *info,
6688 union iwreq_data *wrqu, char *extra)
6689 {
6690 struct ipw_priv *priv = ieee80211_priv(dev);
6691 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6692
6693 if (hwcrypto) {
6694 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6695 /* IPW HW can't build TKIP MIC,
6696 host decryption still needed */
6697 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6698 priv->ieee->host_mc_decrypt = 1;
6699 else {
6700 priv->ieee->host_encrypt = 0;
6701 priv->ieee->host_encrypt_msdu = 1;
6702 priv->ieee->host_decrypt = 1;
6703 }
6704 } else {
6705 priv->ieee->host_encrypt = 0;
6706 priv->ieee->host_encrypt_msdu = 0;
6707 priv->ieee->host_decrypt = 0;
6708 priv->ieee->host_mc_decrypt = 0;
6709 }
6710 }
6711
6712 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6713 }
6714
6715 /* SIOCGIWENCODEEXT */
6716 static int ipw_wx_get_encodeext(struct net_device *dev,
6717 struct iw_request_info *info,
6718 union iwreq_data *wrqu, char *extra)
6719 {
6720 struct ipw_priv *priv = ieee80211_priv(dev);
6721 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6722 }
6723
6724 /* SIOCSIWMLME */
6725 static int ipw_wx_set_mlme(struct net_device *dev,
6726 struct iw_request_info *info,
6727 union iwreq_data *wrqu, char *extra)
6728 {
6729 struct ipw_priv *priv = ieee80211_priv(dev);
6730 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6731 u16 reason;
6732
6733 reason = cpu_to_le16(mlme->reason_code);
6734
6735 switch (mlme->cmd) {
6736 case IW_MLME_DEAUTH:
6737 /* silently ignore */
6738 break;
6739
6740 case IW_MLME_DISASSOC:
6741 ipw_disassociate(priv);
6742 break;
6743
6744 default:
6745 return -EOPNOTSUPP;
6746 }
6747 return 0;
6748 }
6749
6750 #ifdef CONFIG_IPW2200_QOS
6751
6752 /* QoS */
6753 /*
6754 * get the modulation type of the current network or
6755 * the card current mode
6756 */
6757 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6758 {
6759 u8 mode = 0;
6760
6761 if (priv->status & STATUS_ASSOCIATED) {
6762 unsigned long flags;
6763
6764 spin_lock_irqsave(&priv->ieee->lock, flags);
6765 mode = priv->assoc_network->mode;
6766 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6767 } else {
6768 mode = priv->ieee->mode;
6769 }
6770 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6771 return mode;
6772 }
6773
6774 /*
6775 * Handle management frame beacon and probe response
6776 */
6777 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6778 int active_network,
6779 struct ieee80211_network *network)
6780 {
6781 u32 size = sizeof(struct ieee80211_qos_parameters);
6782
6783 if (network->capability & WLAN_CAPABILITY_IBSS)
6784 network->qos_data.active = network->qos_data.supported;
6785
6786 if (network->flags & NETWORK_HAS_QOS_MASK) {
6787 if (active_network &&
6788 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6789 network->qos_data.active = network->qos_data.supported;
6790
6791 if ((network->qos_data.active == 1) && (active_network == 1) &&
6792 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6793 (network->qos_data.old_param_count !=
6794 network->qos_data.param_count)) {
6795 network->qos_data.old_param_count =
6796 network->qos_data.param_count;
6797 schedule_work(&priv->qos_activate);
6798 IPW_DEBUG_QOS("QoS parameters change call "
6799 "qos_activate\n");
6800 }
6801 } else {
6802 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6803 memcpy(&network->qos_data.parameters,
6804 &def_parameters_CCK, size);
6805 else
6806 memcpy(&network->qos_data.parameters,
6807 &def_parameters_OFDM, size);
6808
6809 if ((network->qos_data.active == 1) && (active_network == 1)) {
6810 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6811 schedule_work(&priv->qos_activate);
6812 }
6813
6814 network->qos_data.active = 0;
6815 network->qos_data.supported = 0;
6816 }
6817 if ((priv->status & STATUS_ASSOCIATED) &&
6818 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6819 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6820 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6821 !(network->flags & NETWORK_EMPTY_ESSID))
6822 if ((network->ssid_len ==
6823 priv->assoc_network->ssid_len) &&
6824 !memcmp(network->ssid,
6825 priv->assoc_network->ssid,
6826 network->ssid_len)) {
6827 queue_work(priv->workqueue,
6828 &priv->merge_networks);
6829 }
6830 }
6831
6832 return 0;
6833 }
6834
6835 /*
6836 * This function set up the firmware to support QoS. It sends
6837 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6838 */
6839 static int ipw_qos_activate(struct ipw_priv *priv,
6840 struct ieee80211_qos_data *qos_network_data)
6841 {
6842 int err;
6843 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6844 struct ieee80211_qos_parameters *active_one = NULL;
6845 u32 size = sizeof(struct ieee80211_qos_parameters);
6846 u32 burst_duration;
6847 int i;
6848 u8 type;
6849
6850 type = ipw_qos_current_mode(priv);
6851
6852 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6853 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6854 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6855 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6856
6857 if (qos_network_data == NULL) {
6858 if (type == IEEE_B) {
6859 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6860 active_one = &def_parameters_CCK;
6861 } else
6862 active_one = &def_parameters_OFDM;
6863
6864 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6865 burst_duration = ipw_qos_get_burst_duration(priv);
6866 for (i = 0; i < QOS_QUEUE_NUM; i++)
6867 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6868 (u16)burst_duration;
6869 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6870 if (type == IEEE_B) {
6871 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6872 type);
6873 if (priv->qos_data.qos_enable == 0)
6874 active_one = &def_parameters_CCK;
6875 else
6876 active_one = priv->qos_data.def_qos_parm_CCK;
6877 } else {
6878 if (priv->qos_data.qos_enable == 0)
6879 active_one = &def_parameters_OFDM;
6880 else
6881 active_one = priv->qos_data.def_qos_parm_OFDM;
6882 }
6883 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6884 } else {
6885 unsigned long flags;
6886 int active;
6887
6888 spin_lock_irqsave(&priv->ieee->lock, flags);
6889 active_one = &(qos_network_data->parameters);
6890 qos_network_data->old_param_count =
6891 qos_network_data->param_count;
6892 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6893 active = qos_network_data->supported;
6894 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6895
6896 if (active == 0) {
6897 burst_duration = ipw_qos_get_burst_duration(priv);
6898 for (i = 0; i < QOS_QUEUE_NUM; i++)
6899 qos_parameters[QOS_PARAM_SET_ACTIVE].
6900 tx_op_limit[i] = (u16)burst_duration;
6901 }
6902 }
6903
6904 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6905 for (i = 0; i < 3; i++) {
6906 int j;
6907 for (j = 0; j < QOS_QUEUE_NUM; j++) {
6908 qos_parameters[i].cw_min[j] = cpu_to_le16(qos_parameters[i].cw_min[j]);
6909 qos_parameters[i].cw_max[j] = cpu_to_le16(qos_parameters[i].cw_max[j]);
6910 qos_parameters[i].tx_op_limit[j] = cpu_to_le16(qos_parameters[i].tx_op_limit[j]);
6911 }
6912 }
6913
6914 err = ipw_send_qos_params_command(priv,
6915 (struct ieee80211_qos_parameters *)
6916 &(qos_parameters[0]));
6917 if (err)
6918 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6919
6920 return err;
6921 }
6922
6923 /*
6924 * send IPW_CMD_WME_INFO to the firmware
6925 */
6926 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6927 {
6928 int ret = 0;
6929 struct ieee80211_qos_information_element qos_info;
6930
6931 if (priv == NULL)
6932 return -1;
6933
6934 qos_info.elementID = QOS_ELEMENT_ID;
6935 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6936
6937 qos_info.version = QOS_VERSION_1;
6938 qos_info.ac_info = 0;
6939
6940 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6941 qos_info.qui_type = QOS_OUI_TYPE;
6942 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6943
6944 ret = ipw_send_qos_info_command(priv, &qos_info);
6945 if (ret != 0) {
6946 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6947 }
6948 return ret;
6949 }
6950
6951 /*
6952 * Set the QoS parameter with the association request structure
6953 */
6954 static int ipw_qos_association(struct ipw_priv *priv,
6955 struct ieee80211_network *network)
6956 {
6957 int err = 0;
6958 struct ieee80211_qos_data *qos_data = NULL;
6959 struct ieee80211_qos_data ibss_data = {
6960 .supported = 1,
6961 .active = 1,
6962 };
6963
6964 switch (priv->ieee->iw_mode) {
6965 case IW_MODE_ADHOC:
6966 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6967
6968 qos_data = &ibss_data;
6969 break;
6970
6971 case IW_MODE_INFRA:
6972 qos_data = &network->qos_data;
6973 break;
6974
6975 default:
6976 BUG();
6977 break;
6978 }
6979
6980 err = ipw_qos_activate(priv, qos_data);
6981 if (err) {
6982 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6983 return err;
6984 }
6985
6986 if (priv->qos_data.qos_enable && qos_data->supported) {
6987 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6988 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6989 return ipw_qos_set_info_element(priv);
6990 }
6991
6992 return 0;
6993 }
6994
6995 /*
6996 * handling the beaconing responses. if we get different QoS setting
6997 * off the network from the associated setting, adjust the QoS
6998 * setting
6999 */
7000 static int ipw_qos_association_resp(struct ipw_priv *priv,
7001 struct ieee80211_network *network)
7002 {
7003 int ret = 0;
7004 unsigned long flags;
7005 u32 size = sizeof(struct ieee80211_qos_parameters);
7006 int set_qos_param = 0;
7007
7008 if ((priv == NULL) || (network == NULL) ||
7009 (priv->assoc_network == NULL))
7010 return ret;
7011
7012 if (!(priv->status & STATUS_ASSOCIATED))
7013 return ret;
7014
7015 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7016 return ret;
7017
7018 spin_lock_irqsave(&priv->ieee->lock, flags);
7019 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7020 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7021 sizeof(struct ieee80211_qos_data));
7022 priv->assoc_network->qos_data.active = 1;
7023 if ((network->qos_data.old_param_count !=
7024 network->qos_data.param_count)) {
7025 set_qos_param = 1;
7026 network->qos_data.old_param_count =
7027 network->qos_data.param_count;
7028 }
7029
7030 } else {
7031 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7032 memcpy(&priv->assoc_network->qos_data.parameters,
7033 &def_parameters_CCK, size);
7034 else
7035 memcpy(&priv->assoc_network->qos_data.parameters,
7036 &def_parameters_OFDM, size);
7037 priv->assoc_network->qos_data.active = 0;
7038 priv->assoc_network->qos_data.supported = 0;
7039 set_qos_param = 1;
7040 }
7041
7042 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7043
7044 if (set_qos_param == 1)
7045 schedule_work(&priv->qos_activate);
7046
7047 return ret;
7048 }
7049
7050 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7051 {
7052 u32 ret = 0;
7053
7054 if ((priv == NULL))
7055 return 0;
7056
7057 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
7058 ret = priv->qos_data.burst_duration_CCK;
7059 else
7060 ret = priv->qos_data.burst_duration_OFDM;
7061
7062 return ret;
7063 }
7064
7065 /*
7066 * Initialize the setting of QoS global
7067 */
7068 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7069 int burst_enable, u32 burst_duration_CCK,
7070 u32 burst_duration_OFDM)
7071 {
7072 priv->qos_data.qos_enable = enable;
7073
7074 if (priv->qos_data.qos_enable) {
7075 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7076 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7077 IPW_DEBUG_QOS("QoS is enabled\n");
7078 } else {
7079 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7080 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7081 IPW_DEBUG_QOS("QoS is not enabled\n");
7082 }
7083
7084 priv->qos_data.burst_enable = burst_enable;
7085
7086 if (burst_enable) {
7087 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7088 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7089 } else {
7090 priv->qos_data.burst_duration_CCK = 0;
7091 priv->qos_data.burst_duration_OFDM = 0;
7092 }
7093 }
7094
7095 /*
7096 * map the packet priority to the right TX Queue
7097 */
7098 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7099 {
7100 if (priority > 7 || !priv->qos_data.qos_enable)
7101 priority = 0;
7102
7103 return from_priority_to_tx_queue[priority] - 1;
7104 }
7105
7106 static int ipw_is_qos_active(struct net_device *dev,
7107 struct sk_buff *skb)
7108 {
7109 struct ipw_priv *priv = ieee80211_priv(dev);
7110 struct ieee80211_qos_data *qos_data = NULL;
7111 int active, supported;
7112 u8 *daddr = skb->data + ETH_ALEN;
7113 int unicast = !is_multicast_ether_addr(daddr);
7114
7115 if (!(priv->status & STATUS_ASSOCIATED))
7116 return 0;
7117
7118 qos_data = &priv->assoc_network->qos_data;
7119
7120 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7121 if (unicast == 0)
7122 qos_data->active = 0;
7123 else
7124 qos_data->active = qos_data->supported;
7125 }
7126 active = qos_data->active;
7127 supported = qos_data->supported;
7128 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7129 "unicast %d\n",
7130 priv->qos_data.qos_enable, active, supported, unicast);
7131 if (active && priv->qos_data.qos_enable)
7132 return 1;
7133
7134 return 0;
7135
7136 }
7137 /*
7138 * add QoS parameter to the TX command
7139 */
7140 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7141 u16 priority,
7142 struct tfd_data *tfd)
7143 {
7144 int tx_queue_id = 0;
7145
7146
7147 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7148 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7149
7150 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7151 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7152 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7153 }
7154 return 0;
7155 }
7156
7157 /*
7158 * background support to run QoS activate functionality
7159 */
7160 static void ipw_bg_qos_activate(struct work_struct *work)
7161 {
7162 struct ipw_priv *priv =
7163 container_of(work, struct ipw_priv, qos_activate);
7164
7165 if (priv == NULL)
7166 return;
7167
7168 mutex_lock(&priv->mutex);
7169
7170 if (priv->status & STATUS_ASSOCIATED)
7171 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7172
7173 mutex_unlock(&priv->mutex);
7174 }
7175
7176 static int ipw_handle_probe_response(struct net_device *dev,
7177 struct ieee80211_probe_response *resp,
7178 struct ieee80211_network *network)
7179 {
7180 struct ipw_priv *priv = ieee80211_priv(dev);
7181 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7182 (network == priv->assoc_network));
7183
7184 ipw_qos_handle_probe_response(priv, active_network, network);
7185
7186 return 0;
7187 }
7188
7189 static int ipw_handle_beacon(struct net_device *dev,
7190 struct ieee80211_beacon *resp,
7191 struct ieee80211_network *network)
7192 {
7193 struct ipw_priv *priv = ieee80211_priv(dev);
7194 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7195 (network == priv->assoc_network));
7196
7197 ipw_qos_handle_probe_response(priv, active_network, network);
7198
7199 return 0;
7200 }
7201
7202 static int ipw_handle_assoc_response(struct net_device *dev,
7203 struct ieee80211_assoc_response *resp,
7204 struct ieee80211_network *network)
7205 {
7206 struct ipw_priv *priv = ieee80211_priv(dev);
7207 ipw_qos_association_resp(priv, network);
7208 return 0;
7209 }
7210
7211 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7212 *qos_param)
7213 {
7214 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7215 sizeof(*qos_param) * 3, qos_param);
7216 }
7217
7218 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7219 *qos_param)
7220 {
7221 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7222 qos_param);
7223 }
7224
7225 #endif /* CONFIG_IPW2200_QOS */
7226
7227 static int ipw_associate_network(struct ipw_priv *priv,
7228 struct ieee80211_network *network,
7229 struct ipw_supported_rates *rates, int roaming)
7230 {
7231 int err;
7232 DECLARE_MAC_BUF(mac);
7233
7234 if (priv->config & CFG_FIXED_RATE)
7235 ipw_set_fixed_rate(priv, network->mode);
7236
7237 if (!(priv->config & CFG_STATIC_ESSID)) {
7238 priv->essid_len = min(network->ssid_len,
7239 (u8) IW_ESSID_MAX_SIZE);
7240 memcpy(priv->essid, network->ssid, priv->essid_len);
7241 }
7242
7243 network->last_associate = jiffies;
7244
7245 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7246 priv->assoc_request.channel = network->channel;
7247 priv->assoc_request.auth_key = 0;
7248
7249 if ((priv->capability & CAP_PRIVACY_ON) &&
7250 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7251 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7252 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7253
7254 if (priv->ieee->sec.level == SEC_LEVEL_1)
7255 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7256
7257 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7258 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7259 priv->assoc_request.auth_type = AUTH_LEAP;
7260 else
7261 priv->assoc_request.auth_type = AUTH_OPEN;
7262
7263 if (priv->ieee->wpa_ie_len) {
7264 priv->assoc_request.policy_support = 0x02; /* RSN active */
7265 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7266 priv->ieee->wpa_ie_len);
7267 }
7268
7269 /*
7270 * It is valid for our ieee device to support multiple modes, but
7271 * when it comes to associating to a given network we have to choose
7272 * just one mode.
7273 */
7274 if (network->mode & priv->ieee->mode & IEEE_A)
7275 priv->assoc_request.ieee_mode = IPW_A_MODE;
7276 else if (network->mode & priv->ieee->mode & IEEE_G)
7277 priv->assoc_request.ieee_mode = IPW_G_MODE;
7278 else if (network->mode & priv->ieee->mode & IEEE_B)
7279 priv->assoc_request.ieee_mode = IPW_B_MODE;
7280
7281 priv->assoc_request.capability = network->capability;
7282 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7283 && !(priv->config & CFG_PREAMBLE_LONG)) {
7284 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7285 } else {
7286 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7287
7288 /* Clear the short preamble if we won't be supporting it */
7289 priv->assoc_request.capability &=
7290 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7291 }
7292
7293 /* Clear capability bits that aren't used in Ad Hoc */
7294 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7295 priv->assoc_request.capability &=
7296 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7297
7298 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7299 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7300 roaming ? "Rea" : "A",
7301 escape_essid(priv->essid, priv->essid_len),
7302 network->channel,
7303 ipw_modes[priv->assoc_request.ieee_mode],
7304 rates->num_rates,
7305 (priv->assoc_request.preamble_length ==
7306 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7307 network->capability &
7308 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7309 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7310 priv->capability & CAP_PRIVACY_ON ?
7311 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7312 "(open)") : "",
7313 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7314 priv->capability & CAP_PRIVACY_ON ?
7315 '1' + priv->ieee->sec.active_key : '.',
7316 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7317
7318 priv->assoc_request.beacon_interval = network->beacon_interval;
7319 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7320 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7321 priv->assoc_request.assoc_type = HC_IBSS_START;
7322 priv->assoc_request.assoc_tsf_msw = 0;
7323 priv->assoc_request.assoc_tsf_lsw = 0;
7324 } else {
7325 if (unlikely(roaming))
7326 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7327 else
7328 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7329 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7330 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7331 }
7332
7333 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7334
7335 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7336 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7337 priv->assoc_request.atim_window = network->atim_window;
7338 } else {
7339 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7340 priv->assoc_request.atim_window = 0;
7341 }
7342
7343 priv->assoc_request.listen_interval = network->listen_interval;
7344
7345 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7346 if (err) {
7347 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7348 return err;
7349 }
7350
7351 rates->ieee_mode = priv->assoc_request.ieee_mode;
7352 rates->purpose = IPW_RATE_CONNECT;
7353 ipw_send_supported_rates(priv, rates);
7354
7355 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7356 priv->sys_config.dot11g_auto_detection = 1;
7357 else
7358 priv->sys_config.dot11g_auto_detection = 0;
7359
7360 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7361 priv->sys_config.answer_broadcast_ssid_probe = 1;
7362 else
7363 priv->sys_config.answer_broadcast_ssid_probe = 0;
7364
7365 err = ipw_send_system_config(priv);
7366 if (err) {
7367 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7368 return err;
7369 }
7370
7371 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7372 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7373 if (err) {
7374 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7375 return err;
7376 }
7377
7378 /*
7379 * If preemption is enabled, it is possible for the association
7380 * to complete before we return from ipw_send_associate. Therefore
7381 * we have to be sure and update our priviate data first.
7382 */
7383 priv->channel = network->channel;
7384 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7385 priv->status |= STATUS_ASSOCIATING;
7386 priv->status &= ~STATUS_SECURITY_UPDATED;
7387
7388 priv->assoc_network = network;
7389
7390 #ifdef CONFIG_IPW2200_QOS
7391 ipw_qos_association(priv, network);
7392 #endif
7393
7394 err = ipw_send_associate(priv, &priv->assoc_request);
7395 if (err) {
7396 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7397 return err;
7398 }
7399
7400 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %s \n",
7401 escape_essid(priv->essid, priv->essid_len),
7402 print_mac(mac, priv->bssid));
7403
7404 return 0;
7405 }
7406
7407 static void ipw_roam(void *data)
7408 {
7409 struct ipw_priv *priv = data;
7410 struct ieee80211_network *network = NULL;
7411 struct ipw_network_match match = {
7412 .network = priv->assoc_network
7413 };
7414
7415 /* The roaming process is as follows:
7416 *
7417 * 1. Missed beacon threshold triggers the roaming process by
7418 * setting the status ROAM bit and requesting a scan.
7419 * 2. When the scan completes, it schedules the ROAM work
7420 * 3. The ROAM work looks at all of the known networks for one that
7421 * is a better network than the currently associated. If none
7422 * found, the ROAM process is over (ROAM bit cleared)
7423 * 4. If a better network is found, a disassociation request is
7424 * sent.
7425 * 5. When the disassociation completes, the roam work is again
7426 * scheduled. The second time through, the driver is no longer
7427 * associated, and the newly selected network is sent an
7428 * association request.
7429 * 6. At this point ,the roaming process is complete and the ROAM
7430 * status bit is cleared.
7431 */
7432
7433 /* If we are no longer associated, and the roaming bit is no longer
7434 * set, then we are not actively roaming, so just return */
7435 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7436 return;
7437
7438 if (priv->status & STATUS_ASSOCIATED) {
7439 /* First pass through ROAM process -- look for a better
7440 * network */
7441 unsigned long flags;
7442 u8 rssi = priv->assoc_network->stats.rssi;
7443 priv->assoc_network->stats.rssi = -128;
7444 spin_lock_irqsave(&priv->ieee->lock, flags);
7445 list_for_each_entry(network, &priv->ieee->network_list, list) {
7446 if (network != priv->assoc_network)
7447 ipw_best_network(priv, &match, network, 1);
7448 }
7449 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7450 priv->assoc_network->stats.rssi = rssi;
7451
7452 if (match.network == priv->assoc_network) {
7453 IPW_DEBUG_ASSOC("No better APs in this network to "
7454 "roam to.\n");
7455 priv->status &= ~STATUS_ROAMING;
7456 ipw_debug_config(priv);
7457 return;
7458 }
7459
7460 ipw_send_disassociate(priv, 1);
7461 priv->assoc_network = match.network;
7462
7463 return;
7464 }
7465
7466 /* Second pass through ROAM process -- request association */
7467 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7468 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7469 priv->status &= ~STATUS_ROAMING;
7470 }
7471
7472 static void ipw_bg_roam(struct work_struct *work)
7473 {
7474 struct ipw_priv *priv =
7475 container_of(work, struct ipw_priv, roam);
7476 mutex_lock(&priv->mutex);
7477 ipw_roam(priv);
7478 mutex_unlock(&priv->mutex);
7479 }
7480
7481 static int ipw_associate(void *data)
7482 {
7483 struct ipw_priv *priv = data;
7484
7485 struct ieee80211_network *network = NULL;
7486 struct ipw_network_match match = {
7487 .network = NULL
7488 };
7489 struct ipw_supported_rates *rates;
7490 struct list_head *element;
7491 unsigned long flags;
7492
7493 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7494 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7495 return 0;
7496 }
7497
7498 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7499 IPW_DEBUG_ASSOC("Not attempting association (already in "
7500 "progress)\n");
7501 return 0;
7502 }
7503
7504 if (priv->status & STATUS_DISASSOCIATING) {
7505 IPW_DEBUG_ASSOC("Not attempting association (in "
7506 "disassociating)\n ");
7507 queue_work(priv->workqueue, &priv->associate);
7508 return 0;
7509 }
7510
7511 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7512 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7513 "initialized)\n");
7514 return 0;
7515 }
7516
7517 if (!(priv->config & CFG_ASSOCIATE) &&
7518 !(priv->config & (CFG_STATIC_ESSID |
7519 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7520 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7521 return 0;
7522 }
7523
7524 /* Protect our use of the network_list */
7525 spin_lock_irqsave(&priv->ieee->lock, flags);
7526 list_for_each_entry(network, &priv->ieee->network_list, list)
7527 ipw_best_network(priv, &match, network, 0);
7528
7529 network = match.network;
7530 rates = &match.rates;
7531
7532 if (network == NULL &&
7533 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7534 priv->config & CFG_ADHOC_CREATE &&
7535 priv->config & CFG_STATIC_ESSID &&
7536 priv->config & CFG_STATIC_CHANNEL &&
7537 !list_empty(&priv->ieee->network_free_list)) {
7538 element = priv->ieee->network_free_list.next;
7539 network = list_entry(element, struct ieee80211_network, list);
7540 ipw_adhoc_create(priv, network);
7541 rates = &priv->rates;
7542 list_del(element);
7543 list_add_tail(&network->list, &priv->ieee->network_list);
7544 }
7545 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7546
7547 /* If we reached the end of the list, then we don't have any valid
7548 * matching APs */
7549 if (!network) {
7550 ipw_debug_config(priv);
7551
7552 if (!(priv->status & STATUS_SCANNING)) {
7553 if (!(priv->config & CFG_SPEED_SCAN))
7554 queue_delayed_work(priv->workqueue,
7555 &priv->request_scan,
7556 SCAN_INTERVAL);
7557 else
7558 queue_delayed_work(priv->workqueue,
7559 &priv->request_scan, 0);
7560 }
7561
7562 return 0;
7563 }
7564
7565 ipw_associate_network(priv, network, rates, 0);
7566
7567 return 1;
7568 }
7569
7570 static void ipw_bg_associate(struct work_struct *work)
7571 {
7572 struct ipw_priv *priv =
7573 container_of(work, struct ipw_priv, associate);
7574 mutex_lock(&priv->mutex);
7575 ipw_associate(priv);
7576 mutex_unlock(&priv->mutex);
7577 }
7578
7579 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7580 struct sk_buff *skb)
7581 {
7582 struct ieee80211_hdr *hdr;
7583 u16 fc;
7584
7585 hdr = (struct ieee80211_hdr *)skb->data;
7586 fc = le16_to_cpu(hdr->frame_ctl);
7587 if (!(fc & IEEE80211_FCTL_PROTECTED))
7588 return;
7589
7590 fc &= ~IEEE80211_FCTL_PROTECTED;
7591 hdr->frame_ctl = cpu_to_le16(fc);
7592 switch (priv->ieee->sec.level) {
7593 case SEC_LEVEL_3:
7594 /* Remove CCMP HDR */
7595 memmove(skb->data + IEEE80211_3ADDR_LEN,
7596 skb->data + IEEE80211_3ADDR_LEN + 8,
7597 skb->len - IEEE80211_3ADDR_LEN - 8);
7598 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7599 break;
7600 case SEC_LEVEL_2:
7601 break;
7602 case SEC_LEVEL_1:
7603 /* Remove IV */
7604 memmove(skb->data + IEEE80211_3ADDR_LEN,
7605 skb->data + IEEE80211_3ADDR_LEN + 4,
7606 skb->len - IEEE80211_3ADDR_LEN - 4);
7607 skb_trim(skb, skb->len - 8); /* IV + ICV */
7608 break;
7609 case SEC_LEVEL_0:
7610 break;
7611 default:
7612 printk(KERN_ERR "Unknow security level %d\n",
7613 priv->ieee->sec.level);
7614 break;
7615 }
7616 }
7617
7618 static void ipw_handle_data_packet(struct ipw_priv *priv,
7619 struct ipw_rx_mem_buffer *rxb,
7620 struct ieee80211_rx_stats *stats)
7621 {
7622 struct ieee80211_hdr_4addr *hdr;
7623 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7624
7625 /* We received data from the HW, so stop the watchdog */
7626 priv->net_dev->trans_start = jiffies;
7627
7628 /* We only process data packets if the
7629 * interface is open */
7630 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7631 skb_tailroom(rxb->skb))) {
7632 priv->ieee->stats.rx_errors++;
7633 priv->wstats.discard.misc++;
7634 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7635 return;
7636 } else if (unlikely(!netif_running(priv->net_dev))) {
7637 priv->ieee->stats.rx_dropped++;
7638 priv->wstats.discard.misc++;
7639 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7640 return;
7641 }
7642
7643 /* Advance skb->data to the start of the actual payload */
7644 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7645
7646 /* Set the size of the skb to the size of the frame */
7647 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7648
7649 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7650
7651 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7652 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7653 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7654 (is_multicast_ether_addr(hdr->addr1) ?
7655 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7656 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7657
7658 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7659 priv->ieee->stats.rx_errors++;
7660 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7661 rxb->skb = NULL;
7662 __ipw_led_activity_on(priv);
7663 }
7664 }
7665
7666 #ifdef CONFIG_IPW2200_RADIOTAP
7667 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7668 struct ipw_rx_mem_buffer *rxb,
7669 struct ieee80211_rx_stats *stats)
7670 {
7671 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7672 struct ipw_rx_frame *frame = &pkt->u.frame;
7673
7674 /* initial pull of some data */
7675 u16 received_channel = frame->received_channel;
7676 u8 antennaAndPhy = frame->antennaAndPhy;
7677 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7678 u16 pktrate = frame->rate;
7679
7680 /* Magic struct that slots into the radiotap header -- no reason
7681 * to build this manually element by element, we can write it much
7682 * more efficiently than we can parse it. ORDER MATTERS HERE */
7683 struct ipw_rt_hdr *ipw_rt;
7684
7685 short len = le16_to_cpu(pkt->u.frame.length);
7686
7687 /* We received data from the HW, so stop the watchdog */
7688 priv->net_dev->trans_start = jiffies;
7689
7690 /* We only process data packets if the
7691 * interface is open */
7692 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7693 skb_tailroom(rxb->skb))) {
7694 priv->ieee->stats.rx_errors++;
7695 priv->wstats.discard.misc++;
7696 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7697 return;
7698 } else if (unlikely(!netif_running(priv->net_dev))) {
7699 priv->ieee->stats.rx_dropped++;
7700 priv->wstats.discard.misc++;
7701 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7702 return;
7703 }
7704
7705 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7706 * that now */
7707 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7708 /* FIXME: Should alloc bigger skb instead */
7709 priv->ieee->stats.rx_dropped++;
7710 priv->wstats.discard.misc++;
7711 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7712 return;
7713 }
7714
7715 /* copy the frame itself */
7716 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7717 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7718
7719 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7720 * part of our real header, saves a little time.
7721 *
7722 * No longer necessary since we fill in all our data. Purge before merging
7723 * patch officially.
7724 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7725 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7726 */
7727
7728 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7729
7730 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7731 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7732 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7733
7734 /* Big bitfield of all the fields we provide in radiotap */
7735 ipw_rt->rt_hdr.it_present =
7736 ((1 << IEEE80211_RADIOTAP_TSFT) |
7737 (1 << IEEE80211_RADIOTAP_FLAGS) |
7738 (1 << IEEE80211_RADIOTAP_RATE) |
7739 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7740 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7741 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7742 (1 << IEEE80211_RADIOTAP_ANTENNA));
7743
7744 /* Zero the flags, we'll add to them as we go */
7745 ipw_rt->rt_flags = 0;
7746 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7747 frame->parent_tsf[2] << 16 |
7748 frame->parent_tsf[1] << 8 |
7749 frame->parent_tsf[0]);
7750
7751 /* Convert signal to DBM */
7752 ipw_rt->rt_dbmsignal = antsignal;
7753 ipw_rt->rt_dbmnoise = frame->noise;
7754
7755 /* Convert the channel data and set the flags */
7756 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7757 if (received_channel > 14) { /* 802.11a */
7758 ipw_rt->rt_chbitmask =
7759 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7760 } else if (antennaAndPhy & 32) { /* 802.11b */
7761 ipw_rt->rt_chbitmask =
7762 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7763 } else { /* 802.11g */
7764 ipw_rt->rt_chbitmask =
7765 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7766 }
7767
7768 /* set the rate in multiples of 500k/s */
7769 switch (pktrate) {
7770 case IPW_TX_RATE_1MB:
7771 ipw_rt->rt_rate = 2;
7772 break;
7773 case IPW_TX_RATE_2MB:
7774 ipw_rt->rt_rate = 4;
7775 break;
7776 case IPW_TX_RATE_5MB:
7777 ipw_rt->rt_rate = 10;
7778 break;
7779 case IPW_TX_RATE_6MB:
7780 ipw_rt->rt_rate = 12;
7781 break;
7782 case IPW_TX_RATE_9MB:
7783 ipw_rt->rt_rate = 18;
7784 break;
7785 case IPW_TX_RATE_11MB:
7786 ipw_rt->rt_rate = 22;
7787 break;
7788 case IPW_TX_RATE_12MB:
7789 ipw_rt->rt_rate = 24;
7790 break;
7791 case IPW_TX_RATE_18MB:
7792 ipw_rt->rt_rate = 36;
7793 break;
7794 case IPW_TX_RATE_24MB:
7795 ipw_rt->rt_rate = 48;
7796 break;
7797 case IPW_TX_RATE_36MB:
7798 ipw_rt->rt_rate = 72;
7799 break;
7800 case IPW_TX_RATE_48MB:
7801 ipw_rt->rt_rate = 96;
7802 break;
7803 case IPW_TX_RATE_54MB:
7804 ipw_rt->rt_rate = 108;
7805 break;
7806 default:
7807 ipw_rt->rt_rate = 0;
7808 break;
7809 }
7810
7811 /* antenna number */
7812 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7813
7814 /* set the preamble flag if we have it */
7815 if ((antennaAndPhy & 64))
7816 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7817
7818 /* Set the size of the skb to the size of the frame */
7819 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7820
7821 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7822
7823 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7824 priv->ieee->stats.rx_errors++;
7825 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7826 rxb->skb = NULL;
7827 /* no LED during capture */
7828 }
7829 }
7830 #endif
7831
7832 #ifdef CONFIG_IPW2200_PROMISCUOUS
7833 #define ieee80211_is_probe_response(fc) \
7834 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7835 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7836
7837 #define ieee80211_is_management(fc) \
7838 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7839
7840 #define ieee80211_is_control(fc) \
7841 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7842
7843 #define ieee80211_is_data(fc) \
7844 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7845
7846 #define ieee80211_is_assoc_request(fc) \
7847 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7848
7849 #define ieee80211_is_reassoc_request(fc) \
7850 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7851
7852 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7853 struct ipw_rx_mem_buffer *rxb,
7854 struct ieee80211_rx_stats *stats)
7855 {
7856 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7857 struct ipw_rx_frame *frame = &pkt->u.frame;
7858 struct ipw_rt_hdr *ipw_rt;
7859
7860 /* First cache any information we need before we overwrite
7861 * the information provided in the skb from the hardware */
7862 struct ieee80211_hdr *hdr;
7863 u16 channel = frame->received_channel;
7864 u8 phy_flags = frame->antennaAndPhy;
7865 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7866 s8 noise = frame->noise;
7867 u8 rate = frame->rate;
7868 short len = le16_to_cpu(pkt->u.frame.length);
7869 struct sk_buff *skb;
7870 int hdr_only = 0;
7871 u16 filter = priv->prom_priv->filter;
7872
7873 /* If the filter is set to not include Rx frames then return */
7874 if (filter & IPW_PROM_NO_RX)
7875 return;
7876
7877 /* We received data from the HW, so stop the watchdog */
7878 priv->prom_net_dev->trans_start = jiffies;
7879
7880 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7881 priv->prom_priv->ieee->stats.rx_errors++;
7882 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7883 return;
7884 }
7885
7886 /* We only process data packets if the interface is open */
7887 if (unlikely(!netif_running(priv->prom_net_dev))) {
7888 priv->prom_priv->ieee->stats.rx_dropped++;
7889 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7890 return;
7891 }
7892
7893 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7894 * that now */
7895 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7896 /* FIXME: Should alloc bigger skb instead */
7897 priv->prom_priv->ieee->stats.rx_dropped++;
7898 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7899 return;
7900 }
7901
7902 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7903 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
7904 if (filter & IPW_PROM_NO_MGMT)
7905 return;
7906 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7907 hdr_only = 1;
7908 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
7909 if (filter & IPW_PROM_NO_CTL)
7910 return;
7911 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7912 hdr_only = 1;
7913 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
7914 if (filter & IPW_PROM_NO_DATA)
7915 return;
7916 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7917 hdr_only = 1;
7918 }
7919
7920 /* Copy the SKB since this is for the promiscuous side */
7921 skb = skb_copy(rxb->skb, GFP_ATOMIC);
7922 if (skb == NULL) {
7923 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7924 return;
7925 }
7926
7927 /* copy the frame data to write after where the radiotap header goes */
7928 ipw_rt = (void *)skb->data;
7929
7930 if (hdr_only)
7931 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
7932
7933 memcpy(ipw_rt->payload, hdr, len);
7934
7935 /* Zero the radiotap static buffer ... We only need to zero the bytes
7936 * NOT part of our real header, saves a little time.
7937 *
7938 * No longer necessary since we fill in all our data. Purge before
7939 * merging patch officially.
7940 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7941 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7942 */
7943
7944 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7945 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7946 ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt); /* total header+data */
7947
7948 /* Set the size of the skb to the size of the frame */
7949 skb_put(skb, ipw_rt->rt_hdr.it_len + len);
7950
7951 /* Big bitfield of all the fields we provide in radiotap */
7952 ipw_rt->rt_hdr.it_present =
7953 ((1 << IEEE80211_RADIOTAP_TSFT) |
7954 (1 << IEEE80211_RADIOTAP_FLAGS) |
7955 (1 << IEEE80211_RADIOTAP_RATE) |
7956 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7957 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7958 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7959 (1 << IEEE80211_RADIOTAP_ANTENNA));
7960
7961 /* Zero the flags, we'll add to them as we go */
7962 ipw_rt->rt_flags = 0;
7963 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7964 frame->parent_tsf[2] << 16 |
7965 frame->parent_tsf[1] << 8 |
7966 frame->parent_tsf[0]);
7967
7968 /* Convert to DBM */
7969 ipw_rt->rt_dbmsignal = signal;
7970 ipw_rt->rt_dbmnoise = noise;
7971
7972 /* Convert the channel data and set the flags */
7973 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
7974 if (channel > 14) { /* 802.11a */
7975 ipw_rt->rt_chbitmask =
7976 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7977 } else if (phy_flags & (1 << 5)) { /* 802.11b */
7978 ipw_rt->rt_chbitmask =
7979 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7980 } else { /* 802.11g */
7981 ipw_rt->rt_chbitmask =
7982 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7983 }
7984
7985 /* set the rate in multiples of 500k/s */
7986 switch (rate) {
7987 case IPW_TX_RATE_1MB:
7988 ipw_rt->rt_rate = 2;
7989 break;
7990 case IPW_TX_RATE_2MB:
7991 ipw_rt->rt_rate = 4;
7992 break;
7993 case IPW_TX_RATE_5MB:
7994 ipw_rt->rt_rate = 10;
7995 break;
7996 case IPW_TX_RATE_6MB:
7997 ipw_rt->rt_rate = 12;
7998 break;
7999 case IPW_TX_RATE_9MB:
8000 ipw_rt->rt_rate = 18;
8001 break;
8002 case IPW_TX_RATE_11MB:
8003 ipw_rt->rt_rate = 22;
8004 break;
8005 case IPW_TX_RATE_12MB:
8006 ipw_rt->rt_rate = 24;
8007 break;
8008 case IPW_TX_RATE_18MB:
8009 ipw_rt->rt_rate = 36;
8010 break;
8011 case IPW_TX_RATE_24MB:
8012 ipw_rt->rt_rate = 48;
8013 break;
8014 case IPW_TX_RATE_36MB:
8015 ipw_rt->rt_rate = 72;
8016 break;
8017 case IPW_TX_RATE_48MB:
8018 ipw_rt->rt_rate = 96;
8019 break;
8020 case IPW_TX_RATE_54MB:
8021 ipw_rt->rt_rate = 108;
8022 break;
8023 default:
8024 ipw_rt->rt_rate = 0;
8025 break;
8026 }
8027
8028 /* antenna number */
8029 ipw_rt->rt_antenna = (phy_flags & 3);
8030
8031 /* set the preamble flag if we have it */
8032 if (phy_flags & (1 << 6))
8033 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8034
8035 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8036
8037 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
8038 priv->prom_priv->ieee->stats.rx_errors++;
8039 dev_kfree_skb_any(skb);
8040 }
8041 }
8042 #endif
8043
8044 static int is_network_packet(struct ipw_priv *priv,
8045 struct ieee80211_hdr_4addr *header)
8046 {
8047 /* Filter incoming packets to determine if they are targetted toward
8048 * this network, discarding packets coming from ourselves */
8049 switch (priv->ieee->iw_mode) {
8050 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8051 /* packets from our adapter are dropped (echo) */
8052 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8053 return 0;
8054
8055 /* {broad,multi}cast packets to our BSSID go through */
8056 if (is_multicast_ether_addr(header->addr1))
8057 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8058
8059 /* packets to our adapter go through */
8060 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8061 ETH_ALEN);
8062
8063 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8064 /* packets from our adapter are dropped (echo) */
8065 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8066 return 0;
8067
8068 /* {broad,multi}cast packets to our BSS go through */
8069 if (is_multicast_ether_addr(header->addr1))
8070 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8071
8072 /* packets to our adapter go through */
8073 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8074 ETH_ALEN);
8075 }
8076
8077 return 1;
8078 }
8079
8080 #define IPW_PACKET_RETRY_TIME HZ
8081
8082 static int is_duplicate_packet(struct ipw_priv *priv,
8083 struct ieee80211_hdr_4addr *header)
8084 {
8085 u16 sc = le16_to_cpu(header->seq_ctl);
8086 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8087 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8088 u16 *last_seq, *last_frag;
8089 unsigned long *last_time;
8090
8091 switch (priv->ieee->iw_mode) {
8092 case IW_MODE_ADHOC:
8093 {
8094 struct list_head *p;
8095 struct ipw_ibss_seq *entry = NULL;
8096 u8 *mac = header->addr2;
8097 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8098
8099 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8100 entry =
8101 list_entry(p, struct ipw_ibss_seq, list);
8102 if (!memcmp(entry->mac, mac, ETH_ALEN))
8103 break;
8104 }
8105 if (p == &priv->ibss_mac_hash[index]) {
8106 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8107 if (!entry) {
8108 IPW_ERROR
8109 ("Cannot malloc new mac entry\n");
8110 return 0;
8111 }
8112 memcpy(entry->mac, mac, ETH_ALEN);
8113 entry->seq_num = seq;
8114 entry->frag_num = frag;
8115 entry->packet_time = jiffies;
8116 list_add(&entry->list,
8117 &priv->ibss_mac_hash[index]);
8118 return 0;
8119 }
8120 last_seq = &entry->seq_num;
8121 last_frag = &entry->frag_num;
8122 last_time = &entry->packet_time;
8123 break;
8124 }
8125 case IW_MODE_INFRA:
8126 last_seq = &priv->last_seq_num;
8127 last_frag = &priv->last_frag_num;
8128 last_time = &priv->last_packet_time;
8129 break;
8130 default:
8131 return 0;
8132 }
8133 if ((*last_seq == seq) &&
8134 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8135 if (*last_frag == frag)
8136 goto drop;
8137 if (*last_frag + 1 != frag)
8138 /* out-of-order fragment */
8139 goto drop;
8140 } else
8141 *last_seq = seq;
8142
8143 *last_frag = frag;
8144 *last_time = jiffies;
8145 return 0;
8146
8147 drop:
8148 /* Comment this line now since we observed the card receives
8149 * duplicate packets but the FCTL_RETRY bit is not set in the
8150 * IBSS mode with fragmentation enabled.
8151 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
8152 return 1;
8153 }
8154
8155 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8156 struct ipw_rx_mem_buffer *rxb,
8157 struct ieee80211_rx_stats *stats)
8158 {
8159 struct sk_buff *skb = rxb->skb;
8160 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8161 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8162 (skb->data + IPW_RX_FRAME_SIZE);
8163
8164 ieee80211_rx_mgt(priv->ieee, header, stats);
8165
8166 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8167 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8168 IEEE80211_STYPE_PROBE_RESP) ||
8169 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8170 IEEE80211_STYPE_BEACON))) {
8171 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8172 ipw_add_station(priv, header->addr2);
8173 }
8174
8175 if (priv->config & CFG_NET_STATS) {
8176 IPW_DEBUG_HC("sending stat packet\n");
8177
8178 /* Set the size of the skb to the size of the full
8179 * ipw header and 802.11 frame */
8180 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8181 IPW_RX_FRAME_SIZE);
8182
8183 /* Advance past the ipw packet header to the 802.11 frame */
8184 skb_pull(skb, IPW_RX_FRAME_SIZE);
8185
8186 /* Push the ieee80211_rx_stats before the 802.11 frame */
8187 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8188
8189 skb->dev = priv->ieee->dev;
8190
8191 /* Point raw at the ieee80211_stats */
8192 skb_reset_mac_header(skb);
8193
8194 skb->pkt_type = PACKET_OTHERHOST;
8195 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8196 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8197 netif_rx(skb);
8198 rxb->skb = NULL;
8199 }
8200 }
8201
8202 /*
8203 * Main entry function for recieving a packet with 80211 headers. This
8204 * should be called when ever the FW has notified us that there is a new
8205 * skb in the recieve queue.
8206 */
8207 static void ipw_rx(struct ipw_priv *priv)
8208 {
8209 struct ipw_rx_mem_buffer *rxb;
8210 struct ipw_rx_packet *pkt;
8211 struct ieee80211_hdr_4addr *header;
8212 u32 r, w, i;
8213 u8 network_packet;
8214 DECLARE_MAC_BUF(mac);
8215 DECLARE_MAC_BUF(mac2);
8216 DECLARE_MAC_BUF(mac3);
8217
8218 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8219 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8220 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
8221
8222 while (i != r) {
8223 rxb = priv->rxq->queue[i];
8224 if (unlikely(rxb == NULL)) {
8225 printk(KERN_CRIT "Queue not allocated!\n");
8226 break;
8227 }
8228 priv->rxq->queue[i] = NULL;
8229
8230 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8231 IPW_RX_BUF_SIZE,
8232 PCI_DMA_FROMDEVICE);
8233
8234 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8235 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8236 pkt->header.message_type,
8237 pkt->header.rx_seq_num, pkt->header.control_bits);
8238
8239 switch (pkt->header.message_type) {
8240 case RX_FRAME_TYPE: /* 802.11 frame */ {
8241 struct ieee80211_rx_stats stats = {
8242 .rssi = pkt->u.frame.rssi_dbm -
8243 IPW_RSSI_TO_DBM,
8244 .signal =
8245 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8246 IPW_RSSI_TO_DBM + 0x100,
8247 .noise =
8248 le16_to_cpu(pkt->u.frame.noise),
8249 .rate = pkt->u.frame.rate,
8250 .mac_time = jiffies,
8251 .received_channel =
8252 pkt->u.frame.received_channel,
8253 .freq =
8254 (pkt->u.frame.
8255 control & (1 << 0)) ?
8256 IEEE80211_24GHZ_BAND :
8257 IEEE80211_52GHZ_BAND,
8258 .len = le16_to_cpu(pkt->u.frame.length),
8259 };
8260
8261 if (stats.rssi != 0)
8262 stats.mask |= IEEE80211_STATMASK_RSSI;
8263 if (stats.signal != 0)
8264 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8265 if (stats.noise != 0)
8266 stats.mask |= IEEE80211_STATMASK_NOISE;
8267 if (stats.rate != 0)
8268 stats.mask |= IEEE80211_STATMASK_RATE;
8269
8270 priv->rx_packets++;
8271
8272 #ifdef CONFIG_IPW2200_PROMISCUOUS
8273 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8274 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8275 #endif
8276
8277 #ifdef CONFIG_IPW2200_MONITOR
8278 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8279 #ifdef CONFIG_IPW2200_RADIOTAP
8280
8281 ipw_handle_data_packet_monitor(priv,
8282 rxb,
8283 &stats);
8284 #else
8285 ipw_handle_data_packet(priv, rxb,
8286 &stats);
8287 #endif
8288 break;
8289 }
8290 #endif
8291
8292 header =
8293 (struct ieee80211_hdr_4addr *)(rxb->skb->
8294 data +
8295 IPW_RX_FRAME_SIZE);
8296 /* TODO: Check Ad-Hoc dest/source and make sure
8297 * that we are actually parsing these packets
8298 * correctly -- we should probably use the
8299 * frame control of the packet and disregard
8300 * the current iw_mode */
8301
8302 network_packet =
8303 is_network_packet(priv, header);
8304 if (network_packet && priv->assoc_network) {
8305 priv->assoc_network->stats.rssi =
8306 stats.rssi;
8307 priv->exp_avg_rssi =
8308 exponential_average(priv->exp_avg_rssi,
8309 stats.rssi, DEPTH_RSSI);
8310 }
8311
8312 IPW_DEBUG_RX("Frame: len=%u\n",
8313 le16_to_cpu(pkt->u.frame.length));
8314
8315 if (le16_to_cpu(pkt->u.frame.length) <
8316 ieee80211_get_hdrlen(le16_to_cpu(
8317 header->frame_ctl))) {
8318 IPW_DEBUG_DROP
8319 ("Received packet is too small. "
8320 "Dropping.\n");
8321 priv->ieee->stats.rx_errors++;
8322 priv->wstats.discard.misc++;
8323 break;
8324 }
8325
8326 switch (WLAN_FC_GET_TYPE
8327 (le16_to_cpu(header->frame_ctl))) {
8328
8329 case IEEE80211_FTYPE_MGMT:
8330 ipw_handle_mgmt_packet(priv, rxb,
8331 &stats);
8332 break;
8333
8334 case IEEE80211_FTYPE_CTL:
8335 break;
8336
8337 case IEEE80211_FTYPE_DATA:
8338 if (unlikely(!network_packet ||
8339 is_duplicate_packet(priv,
8340 header)))
8341 {
8342 IPW_DEBUG_DROP("Dropping: "
8343 "%s, "
8344 "%s, "
8345 "%s\n",
8346 print_mac(mac,
8347 header->
8348 addr1),
8349 print_mac(mac2,
8350 header->
8351 addr2),
8352 print_mac(mac3,
8353 header->
8354 addr3));
8355 break;
8356 }
8357
8358 ipw_handle_data_packet(priv, rxb,
8359 &stats);
8360
8361 break;
8362 }
8363 break;
8364 }
8365
8366 case RX_HOST_NOTIFICATION_TYPE:{
8367 IPW_DEBUG_RX
8368 ("Notification: subtype=%02X flags=%02X size=%d\n",
8369 pkt->u.notification.subtype,
8370 pkt->u.notification.flags,
8371 le16_to_cpu(pkt->u.notification.size));
8372 ipw_rx_notification(priv, &pkt->u.notification);
8373 break;
8374 }
8375
8376 default:
8377 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8378 pkt->header.message_type);
8379 break;
8380 }
8381
8382 /* For now we just don't re-use anything. We can tweak this
8383 * later to try and re-use notification packets and SKBs that
8384 * fail to Rx correctly */
8385 if (rxb->skb != NULL) {
8386 dev_kfree_skb_any(rxb->skb);
8387 rxb->skb = NULL;
8388 }
8389
8390 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8391 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8392 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8393
8394 i = (i + 1) % RX_QUEUE_SIZE;
8395 }
8396
8397 /* Backtrack one entry */
8398 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
8399
8400 ipw_rx_queue_restock(priv);
8401 }
8402
8403 #define DEFAULT_RTS_THRESHOLD 2304U
8404 #define MIN_RTS_THRESHOLD 1U
8405 #define MAX_RTS_THRESHOLD 2304U
8406 #define DEFAULT_BEACON_INTERVAL 100U
8407 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8408 #define DEFAULT_LONG_RETRY_LIMIT 4U
8409
8410 /**
8411 * ipw_sw_reset
8412 * @option: options to control different reset behaviour
8413 * 0 = reset everything except the 'disable' module_param
8414 * 1 = reset everything and print out driver info (for probe only)
8415 * 2 = reset everything
8416 */
8417 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8418 {
8419 int band, modulation;
8420 int old_mode = priv->ieee->iw_mode;
8421
8422 /* Initialize module parameter values here */
8423 priv->config = 0;
8424
8425 /* We default to disabling the LED code as right now it causes
8426 * too many systems to lock up... */
8427 if (!led)
8428 priv->config |= CFG_NO_LED;
8429
8430 if (associate)
8431 priv->config |= CFG_ASSOCIATE;
8432 else
8433 IPW_DEBUG_INFO("Auto associate disabled.\n");
8434
8435 if (auto_create)
8436 priv->config |= CFG_ADHOC_CREATE;
8437 else
8438 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8439
8440 priv->config &= ~CFG_STATIC_ESSID;
8441 priv->essid_len = 0;
8442 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8443
8444 if (disable && option) {
8445 priv->status |= STATUS_RF_KILL_SW;
8446 IPW_DEBUG_INFO("Radio disabled.\n");
8447 }
8448
8449 if (channel != 0) {
8450 priv->config |= CFG_STATIC_CHANNEL;
8451 priv->channel = channel;
8452 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8453 /* TODO: Validate that provided channel is in range */
8454 }
8455 #ifdef CONFIG_IPW2200_QOS
8456 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8457 burst_duration_CCK, burst_duration_OFDM);
8458 #endif /* CONFIG_IPW2200_QOS */
8459
8460 switch (mode) {
8461 case 1:
8462 priv->ieee->iw_mode = IW_MODE_ADHOC;
8463 priv->net_dev->type = ARPHRD_ETHER;
8464
8465 break;
8466 #ifdef CONFIG_IPW2200_MONITOR
8467 case 2:
8468 priv->ieee->iw_mode = IW_MODE_MONITOR;
8469 #ifdef CONFIG_IPW2200_RADIOTAP
8470 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8471 #else
8472 priv->net_dev->type = ARPHRD_IEEE80211;
8473 #endif
8474 break;
8475 #endif
8476 default:
8477 case 0:
8478 priv->net_dev->type = ARPHRD_ETHER;
8479 priv->ieee->iw_mode = IW_MODE_INFRA;
8480 break;
8481 }
8482
8483 if (hwcrypto) {
8484 priv->ieee->host_encrypt = 0;
8485 priv->ieee->host_encrypt_msdu = 0;
8486 priv->ieee->host_decrypt = 0;
8487 priv->ieee->host_mc_decrypt = 0;
8488 }
8489 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8490
8491 /* IPW2200/2915 is abled to do hardware fragmentation. */
8492 priv->ieee->host_open_frag = 0;
8493
8494 if ((priv->pci_dev->device == 0x4223) ||
8495 (priv->pci_dev->device == 0x4224)) {
8496 if (option == 1)
8497 printk(KERN_INFO DRV_NAME
8498 ": Detected Intel PRO/Wireless 2915ABG Network "
8499 "Connection\n");
8500 priv->ieee->abg_true = 1;
8501 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8502 modulation = IEEE80211_OFDM_MODULATION |
8503 IEEE80211_CCK_MODULATION;
8504 priv->adapter = IPW_2915ABG;
8505 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8506 } else {
8507 if (option == 1)
8508 printk(KERN_INFO DRV_NAME
8509 ": Detected Intel PRO/Wireless 2200BG Network "
8510 "Connection\n");
8511
8512 priv->ieee->abg_true = 0;
8513 band = IEEE80211_24GHZ_BAND;
8514 modulation = IEEE80211_OFDM_MODULATION |
8515 IEEE80211_CCK_MODULATION;
8516 priv->adapter = IPW_2200BG;
8517 priv->ieee->mode = IEEE_G | IEEE_B;
8518 }
8519
8520 priv->ieee->freq_band = band;
8521 priv->ieee->modulation = modulation;
8522
8523 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8524
8525 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8526 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8527
8528 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8529 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8530 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8531
8532 /* If power management is turned on, default to AC mode */
8533 priv->power_mode = IPW_POWER_AC;
8534 priv->tx_power = IPW_TX_POWER_DEFAULT;
8535
8536 return old_mode == priv->ieee->iw_mode;
8537 }
8538
8539 /*
8540 * This file defines the Wireless Extension handlers. It does not
8541 * define any methods of hardware manipulation and relies on the
8542 * functions defined in ipw_main to provide the HW interaction.
8543 *
8544 * The exception to this is the use of the ipw_get_ordinal()
8545 * function used to poll the hardware vs. making unecessary calls.
8546 *
8547 */
8548
8549 static int ipw_wx_get_name(struct net_device *dev,
8550 struct iw_request_info *info,
8551 union iwreq_data *wrqu, char *extra)
8552 {
8553 struct ipw_priv *priv = ieee80211_priv(dev);
8554 mutex_lock(&priv->mutex);
8555 if (priv->status & STATUS_RF_KILL_MASK)
8556 strcpy(wrqu->name, "radio off");
8557 else if (!(priv->status & STATUS_ASSOCIATED))
8558 strcpy(wrqu->name, "unassociated");
8559 else
8560 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8561 ipw_modes[priv->assoc_request.ieee_mode]);
8562 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8563 mutex_unlock(&priv->mutex);
8564 return 0;
8565 }
8566
8567 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8568 {
8569 if (channel == 0) {
8570 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8571 priv->config &= ~CFG_STATIC_CHANNEL;
8572 IPW_DEBUG_ASSOC("Attempting to associate with new "
8573 "parameters.\n");
8574 ipw_associate(priv);
8575 return 0;
8576 }
8577
8578 priv->config |= CFG_STATIC_CHANNEL;
8579
8580 if (priv->channel == channel) {
8581 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8582 channel);
8583 return 0;
8584 }
8585
8586 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8587 priv->channel = channel;
8588
8589 #ifdef CONFIG_IPW2200_MONITOR
8590 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8591 int i;
8592 if (priv->status & STATUS_SCANNING) {
8593 IPW_DEBUG_SCAN("Scan abort triggered due to "
8594 "channel change.\n");
8595 ipw_abort_scan(priv);
8596 }
8597
8598 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8599 udelay(10);
8600
8601 if (priv->status & STATUS_SCANNING)
8602 IPW_DEBUG_SCAN("Still scanning...\n");
8603 else
8604 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8605 1000 - i);
8606
8607 return 0;
8608 }
8609 #endif /* CONFIG_IPW2200_MONITOR */
8610
8611 /* Network configuration changed -- force [re]association */
8612 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8613 if (!ipw_disassociate(priv))
8614 ipw_associate(priv);
8615
8616 return 0;
8617 }
8618
8619 static int ipw_wx_set_freq(struct net_device *dev,
8620 struct iw_request_info *info,
8621 union iwreq_data *wrqu, char *extra)
8622 {
8623 struct ipw_priv *priv = ieee80211_priv(dev);
8624 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8625 struct iw_freq *fwrq = &wrqu->freq;
8626 int ret = 0, i;
8627 u8 channel, flags;
8628 int band;
8629
8630 if (fwrq->m == 0) {
8631 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8632 mutex_lock(&priv->mutex);
8633 ret = ipw_set_channel(priv, 0);
8634 mutex_unlock(&priv->mutex);
8635 return ret;
8636 }
8637 /* if setting by freq convert to channel */
8638 if (fwrq->e == 1) {
8639 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8640 if (channel == 0)
8641 return -EINVAL;
8642 } else
8643 channel = fwrq->m;
8644
8645 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8646 return -EINVAL;
8647
8648 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8649 i = ieee80211_channel_to_index(priv->ieee, channel);
8650 if (i == -1)
8651 return -EINVAL;
8652
8653 flags = (band == IEEE80211_24GHZ_BAND) ?
8654 geo->bg[i].flags : geo->a[i].flags;
8655 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8656 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8657 return -EINVAL;
8658 }
8659 }
8660
8661 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8662 mutex_lock(&priv->mutex);
8663 ret = ipw_set_channel(priv, channel);
8664 mutex_unlock(&priv->mutex);
8665 return ret;
8666 }
8667
8668 static int ipw_wx_get_freq(struct net_device *dev,
8669 struct iw_request_info *info,
8670 union iwreq_data *wrqu, char *extra)
8671 {
8672 struct ipw_priv *priv = ieee80211_priv(dev);
8673
8674 wrqu->freq.e = 0;
8675
8676 /* If we are associated, trying to associate, or have a statically
8677 * configured CHANNEL then return that; otherwise return ANY */
8678 mutex_lock(&priv->mutex);
8679 if (priv->config & CFG_STATIC_CHANNEL ||
8680 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8681 int i;
8682
8683 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8684 BUG_ON(i == -1);
8685 wrqu->freq.e = 1;
8686
8687 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8688 case IEEE80211_52GHZ_BAND:
8689 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8690 break;
8691
8692 case IEEE80211_24GHZ_BAND:
8693 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8694 break;
8695
8696 default:
8697 BUG();
8698 }
8699 } else
8700 wrqu->freq.m = 0;
8701
8702 mutex_unlock(&priv->mutex);
8703 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8704 return 0;
8705 }
8706
8707 static int ipw_wx_set_mode(struct net_device *dev,
8708 struct iw_request_info *info,
8709 union iwreq_data *wrqu, char *extra)
8710 {
8711 struct ipw_priv *priv = ieee80211_priv(dev);
8712 int err = 0;
8713
8714 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8715
8716 switch (wrqu->mode) {
8717 #ifdef CONFIG_IPW2200_MONITOR
8718 case IW_MODE_MONITOR:
8719 #endif
8720 case IW_MODE_ADHOC:
8721 case IW_MODE_INFRA:
8722 break;
8723 case IW_MODE_AUTO:
8724 wrqu->mode = IW_MODE_INFRA;
8725 break;
8726 default:
8727 return -EINVAL;
8728 }
8729 if (wrqu->mode == priv->ieee->iw_mode)
8730 return 0;
8731
8732 mutex_lock(&priv->mutex);
8733
8734 ipw_sw_reset(priv, 0);
8735
8736 #ifdef CONFIG_IPW2200_MONITOR
8737 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8738 priv->net_dev->type = ARPHRD_ETHER;
8739
8740 if (wrqu->mode == IW_MODE_MONITOR)
8741 #ifdef CONFIG_IPW2200_RADIOTAP
8742 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8743 #else
8744 priv->net_dev->type = ARPHRD_IEEE80211;
8745 #endif
8746 #endif /* CONFIG_IPW2200_MONITOR */
8747
8748 /* Free the existing firmware and reset the fw_loaded
8749 * flag so ipw_load() will bring in the new firmawre */
8750 free_firmware();
8751
8752 priv->ieee->iw_mode = wrqu->mode;
8753
8754 queue_work(priv->workqueue, &priv->adapter_restart);
8755 mutex_unlock(&priv->mutex);
8756 return err;
8757 }
8758
8759 static int ipw_wx_get_mode(struct net_device *dev,
8760 struct iw_request_info *info,
8761 union iwreq_data *wrqu, char *extra)
8762 {
8763 struct ipw_priv *priv = ieee80211_priv(dev);
8764 mutex_lock(&priv->mutex);
8765 wrqu->mode = priv->ieee->iw_mode;
8766 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8767 mutex_unlock(&priv->mutex);
8768 return 0;
8769 }
8770
8771 /* Values are in microsecond */
8772 static const s32 timeout_duration[] = {
8773 350000,
8774 250000,
8775 75000,
8776 37000,
8777 25000,
8778 };
8779
8780 static const s32 period_duration[] = {
8781 400000,
8782 700000,
8783 1000000,
8784 1000000,
8785 1000000
8786 };
8787
8788 static int ipw_wx_get_range(struct net_device *dev,
8789 struct iw_request_info *info,
8790 union iwreq_data *wrqu, char *extra)
8791 {
8792 struct ipw_priv *priv = ieee80211_priv(dev);
8793 struct iw_range *range = (struct iw_range *)extra;
8794 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8795 int i = 0, j;
8796
8797 wrqu->data.length = sizeof(*range);
8798 memset(range, 0, sizeof(*range));
8799
8800 /* 54Mbs == ~27 Mb/s real (802.11g) */
8801 range->throughput = 27 * 1000 * 1000;
8802
8803 range->max_qual.qual = 100;
8804 /* TODO: Find real max RSSI and stick here */
8805 range->max_qual.level = 0;
8806 range->max_qual.noise = 0;
8807 range->max_qual.updated = 7; /* Updated all three */
8808
8809 range->avg_qual.qual = 70;
8810 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8811 range->avg_qual.level = 0; /* FIXME to real average level */
8812 range->avg_qual.noise = 0;
8813 range->avg_qual.updated = 7; /* Updated all three */
8814 mutex_lock(&priv->mutex);
8815 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8816
8817 for (i = 0; i < range->num_bitrates; i++)
8818 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8819 500000;
8820
8821 range->max_rts = DEFAULT_RTS_THRESHOLD;
8822 range->min_frag = MIN_FRAG_THRESHOLD;
8823 range->max_frag = MAX_FRAG_THRESHOLD;
8824
8825 range->encoding_size[0] = 5;
8826 range->encoding_size[1] = 13;
8827 range->num_encoding_sizes = 2;
8828 range->max_encoding_tokens = WEP_KEYS;
8829
8830 /* Set the Wireless Extension versions */
8831 range->we_version_compiled = WIRELESS_EXT;
8832 range->we_version_source = 18;
8833
8834 i = 0;
8835 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8836 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8837 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8838 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8839 continue;
8840
8841 range->freq[i].i = geo->bg[j].channel;
8842 range->freq[i].m = geo->bg[j].freq * 100000;
8843 range->freq[i].e = 1;
8844 i++;
8845 }
8846 }
8847
8848 if (priv->ieee->mode & IEEE_A) {
8849 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8850 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8851 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8852 continue;
8853
8854 range->freq[i].i = geo->a[j].channel;
8855 range->freq[i].m = geo->a[j].freq * 100000;
8856 range->freq[i].e = 1;
8857 i++;
8858 }
8859 }
8860
8861 range->num_channels = i;
8862 range->num_frequency = i;
8863
8864 mutex_unlock(&priv->mutex);
8865
8866 /* Event capability (kernel + driver) */
8867 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8868 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8869 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8870 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8871 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8872
8873 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8874 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8875
8876 IPW_DEBUG_WX("GET Range\n");
8877 return 0;
8878 }
8879
8880 static int ipw_wx_set_wap(struct net_device *dev,
8881 struct iw_request_info *info,
8882 union iwreq_data *wrqu, char *extra)
8883 {
8884 struct ipw_priv *priv = ieee80211_priv(dev);
8885 DECLARE_MAC_BUF(mac);
8886
8887 static const unsigned char any[] = {
8888 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8889 };
8890 static const unsigned char off[] = {
8891 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8892 };
8893
8894 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8895 return -EINVAL;
8896 mutex_lock(&priv->mutex);
8897 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8898 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8899 /* we disable mandatory BSSID association */
8900 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8901 priv->config &= ~CFG_STATIC_BSSID;
8902 IPW_DEBUG_ASSOC("Attempting to associate with new "
8903 "parameters.\n");
8904 ipw_associate(priv);
8905 mutex_unlock(&priv->mutex);
8906 return 0;
8907 }
8908
8909 priv->config |= CFG_STATIC_BSSID;
8910 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8911 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8912 mutex_unlock(&priv->mutex);
8913 return 0;
8914 }
8915
8916 IPW_DEBUG_WX("Setting mandatory BSSID to %s\n",
8917 print_mac(mac, wrqu->ap_addr.sa_data));
8918
8919 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8920
8921 /* Network configuration changed -- force [re]association */
8922 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8923 if (!ipw_disassociate(priv))
8924 ipw_associate(priv);
8925
8926 mutex_unlock(&priv->mutex);
8927 return 0;
8928 }
8929
8930 static int ipw_wx_get_wap(struct net_device *dev,
8931 struct iw_request_info *info,
8932 union iwreq_data *wrqu, char *extra)
8933 {
8934 struct ipw_priv *priv = ieee80211_priv(dev);
8935 DECLARE_MAC_BUF(mac);
8936
8937 /* If we are associated, trying to associate, or have a statically
8938 * configured BSSID then return that; otherwise return ANY */
8939 mutex_lock(&priv->mutex);
8940 if (priv->config & CFG_STATIC_BSSID ||
8941 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8942 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8943 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8944 } else
8945 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8946
8947 IPW_DEBUG_WX("Getting WAP BSSID: %s\n",
8948 print_mac(mac, wrqu->ap_addr.sa_data));
8949 mutex_unlock(&priv->mutex);
8950 return 0;
8951 }
8952
8953 static int ipw_wx_set_essid(struct net_device *dev,
8954 struct iw_request_info *info,
8955 union iwreq_data *wrqu, char *extra)
8956 {
8957 struct ipw_priv *priv = ieee80211_priv(dev);
8958 int length;
8959
8960 mutex_lock(&priv->mutex);
8961
8962 if (!wrqu->essid.flags)
8963 {
8964 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8965 ipw_disassociate(priv);
8966 priv->config &= ~CFG_STATIC_ESSID;
8967 ipw_associate(priv);
8968 mutex_unlock(&priv->mutex);
8969 return 0;
8970 }
8971
8972 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
8973
8974 priv->config |= CFG_STATIC_ESSID;
8975
8976 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
8977 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
8978 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8979 mutex_unlock(&priv->mutex);
8980 return 0;
8981 }
8982
8983 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length),
8984 length);
8985
8986 priv->essid_len = length;
8987 memcpy(priv->essid, extra, priv->essid_len);
8988
8989 /* Network configuration changed -- force [re]association */
8990 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8991 if (!ipw_disassociate(priv))
8992 ipw_associate(priv);
8993
8994 mutex_unlock(&priv->mutex);
8995 return 0;
8996 }
8997
8998 static int ipw_wx_get_essid(struct net_device *dev,
8999 struct iw_request_info *info,
9000 union iwreq_data *wrqu, char *extra)
9001 {
9002 struct ipw_priv *priv = ieee80211_priv(dev);
9003
9004 /* If we are associated, trying to associate, or have a statically
9005 * configured ESSID then return that; otherwise return ANY */
9006 mutex_lock(&priv->mutex);
9007 if (priv->config & CFG_STATIC_ESSID ||
9008 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9009 IPW_DEBUG_WX("Getting essid: '%s'\n",
9010 escape_essid(priv->essid, priv->essid_len));
9011 memcpy(extra, priv->essid, priv->essid_len);
9012 wrqu->essid.length = priv->essid_len;
9013 wrqu->essid.flags = 1; /* active */
9014 } else {
9015 IPW_DEBUG_WX("Getting essid: ANY\n");
9016 wrqu->essid.length = 0;
9017 wrqu->essid.flags = 0; /* active */
9018 }
9019 mutex_unlock(&priv->mutex);
9020 return 0;
9021 }
9022
9023 static int ipw_wx_set_nick(struct net_device *dev,
9024 struct iw_request_info *info,
9025 union iwreq_data *wrqu, char *extra)
9026 {
9027 struct ipw_priv *priv = ieee80211_priv(dev);
9028
9029 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9030 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9031 return -E2BIG;
9032 mutex_lock(&priv->mutex);
9033 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9034 memset(priv->nick, 0, sizeof(priv->nick));
9035 memcpy(priv->nick, extra, wrqu->data.length);
9036 IPW_DEBUG_TRACE("<<\n");
9037 mutex_unlock(&priv->mutex);
9038 return 0;
9039
9040 }
9041
9042 static int ipw_wx_get_nick(struct net_device *dev,
9043 struct iw_request_info *info,
9044 union iwreq_data *wrqu, char *extra)
9045 {
9046 struct ipw_priv *priv = ieee80211_priv(dev);
9047 IPW_DEBUG_WX("Getting nick\n");
9048 mutex_lock(&priv->mutex);
9049 wrqu->data.length = strlen(priv->nick);
9050 memcpy(extra, priv->nick, wrqu->data.length);
9051 wrqu->data.flags = 1; /* active */
9052 mutex_unlock(&priv->mutex);
9053 return 0;
9054 }
9055
9056 static int ipw_wx_set_sens(struct net_device *dev,
9057 struct iw_request_info *info,
9058 union iwreq_data *wrqu, char *extra)
9059 {
9060 struct ipw_priv *priv = ieee80211_priv(dev);
9061 int err = 0;
9062
9063 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9064 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9065 mutex_lock(&priv->mutex);
9066
9067 if (wrqu->sens.fixed == 0)
9068 {
9069 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9070 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9071 goto out;
9072 }
9073 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9074 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9075 err = -EINVAL;
9076 goto out;
9077 }
9078
9079 priv->roaming_threshold = wrqu->sens.value;
9080 priv->disassociate_threshold = 3*wrqu->sens.value;
9081 out:
9082 mutex_unlock(&priv->mutex);
9083 return err;
9084 }
9085
9086 static int ipw_wx_get_sens(struct net_device *dev,
9087 struct iw_request_info *info,
9088 union iwreq_data *wrqu, char *extra)
9089 {
9090 struct ipw_priv *priv = ieee80211_priv(dev);
9091 mutex_lock(&priv->mutex);
9092 wrqu->sens.fixed = 1;
9093 wrqu->sens.value = priv->roaming_threshold;
9094 mutex_unlock(&priv->mutex);
9095
9096 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9097 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9098
9099 return 0;
9100 }
9101
9102 static int ipw_wx_set_rate(struct net_device *dev,
9103 struct iw_request_info *info,
9104 union iwreq_data *wrqu, char *extra)
9105 {
9106 /* TODO: We should use semaphores or locks for access to priv */
9107 struct ipw_priv *priv = ieee80211_priv(dev);
9108 u32 target_rate = wrqu->bitrate.value;
9109 u32 fixed, mask;
9110
9111 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9112 /* value = X, fixed = 1 means only rate X */
9113 /* value = X, fixed = 0 means all rates lower equal X */
9114
9115 if (target_rate == -1) {
9116 fixed = 0;
9117 mask = IEEE80211_DEFAULT_RATES_MASK;
9118 /* Now we should reassociate */
9119 goto apply;
9120 }
9121
9122 mask = 0;
9123 fixed = wrqu->bitrate.fixed;
9124
9125 if (target_rate == 1000000 || !fixed)
9126 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9127 if (target_rate == 1000000)
9128 goto apply;
9129
9130 if (target_rate == 2000000 || !fixed)
9131 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9132 if (target_rate == 2000000)
9133 goto apply;
9134
9135 if (target_rate == 5500000 || !fixed)
9136 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9137 if (target_rate == 5500000)
9138 goto apply;
9139
9140 if (target_rate == 6000000 || !fixed)
9141 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9142 if (target_rate == 6000000)
9143 goto apply;
9144
9145 if (target_rate == 9000000 || !fixed)
9146 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9147 if (target_rate == 9000000)
9148 goto apply;
9149
9150 if (target_rate == 11000000 || !fixed)
9151 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9152 if (target_rate == 11000000)
9153 goto apply;
9154
9155 if (target_rate == 12000000 || !fixed)
9156 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9157 if (target_rate == 12000000)
9158 goto apply;
9159
9160 if (target_rate == 18000000 || !fixed)
9161 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9162 if (target_rate == 18000000)
9163 goto apply;
9164
9165 if (target_rate == 24000000 || !fixed)
9166 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9167 if (target_rate == 24000000)
9168 goto apply;
9169
9170 if (target_rate == 36000000 || !fixed)
9171 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9172 if (target_rate == 36000000)
9173 goto apply;
9174
9175 if (target_rate == 48000000 || !fixed)
9176 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9177 if (target_rate == 48000000)
9178 goto apply;
9179
9180 if (target_rate == 54000000 || !fixed)
9181 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9182 if (target_rate == 54000000)
9183 goto apply;
9184
9185 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9186 return -EINVAL;
9187
9188 apply:
9189 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9190 mask, fixed ? "fixed" : "sub-rates");
9191 mutex_lock(&priv->mutex);
9192 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9193 priv->config &= ~CFG_FIXED_RATE;
9194 ipw_set_fixed_rate(priv, priv->ieee->mode);
9195 } else
9196 priv->config |= CFG_FIXED_RATE;
9197
9198 if (priv->rates_mask == mask) {
9199 IPW_DEBUG_WX("Mask set to current mask.\n");
9200 mutex_unlock(&priv->mutex);
9201 return 0;
9202 }
9203
9204 priv->rates_mask = mask;
9205
9206 /* Network configuration changed -- force [re]association */
9207 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9208 if (!ipw_disassociate(priv))
9209 ipw_associate(priv);
9210
9211 mutex_unlock(&priv->mutex);
9212 return 0;
9213 }
9214
9215 static int ipw_wx_get_rate(struct net_device *dev,
9216 struct iw_request_info *info,
9217 union iwreq_data *wrqu, char *extra)
9218 {
9219 struct ipw_priv *priv = ieee80211_priv(dev);
9220 mutex_lock(&priv->mutex);
9221 wrqu->bitrate.value = priv->last_rate;
9222 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9223 mutex_unlock(&priv->mutex);
9224 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9225 return 0;
9226 }
9227
9228 static int ipw_wx_set_rts(struct net_device *dev,
9229 struct iw_request_info *info,
9230 union iwreq_data *wrqu, char *extra)
9231 {
9232 struct ipw_priv *priv = ieee80211_priv(dev);
9233 mutex_lock(&priv->mutex);
9234 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9235 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9236 else {
9237 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9238 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9239 mutex_unlock(&priv->mutex);
9240 return -EINVAL;
9241 }
9242 priv->rts_threshold = wrqu->rts.value;
9243 }
9244
9245 ipw_send_rts_threshold(priv, priv->rts_threshold);
9246 mutex_unlock(&priv->mutex);
9247 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9248 return 0;
9249 }
9250
9251 static int ipw_wx_get_rts(struct net_device *dev,
9252 struct iw_request_info *info,
9253 union iwreq_data *wrqu, char *extra)
9254 {
9255 struct ipw_priv *priv = ieee80211_priv(dev);
9256 mutex_lock(&priv->mutex);
9257 wrqu->rts.value = priv->rts_threshold;
9258 wrqu->rts.fixed = 0; /* no auto select */
9259 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9260 mutex_unlock(&priv->mutex);
9261 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9262 return 0;
9263 }
9264
9265 static int ipw_wx_set_txpow(struct net_device *dev,
9266 struct iw_request_info *info,
9267 union iwreq_data *wrqu, char *extra)
9268 {
9269 struct ipw_priv *priv = ieee80211_priv(dev);
9270 int err = 0;
9271
9272 mutex_lock(&priv->mutex);
9273 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9274 err = -EINPROGRESS;
9275 goto out;
9276 }
9277
9278 if (!wrqu->power.fixed)
9279 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9280
9281 if (wrqu->power.flags != IW_TXPOW_DBM) {
9282 err = -EINVAL;
9283 goto out;
9284 }
9285
9286 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9287 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9288 err = -EINVAL;
9289 goto out;
9290 }
9291
9292 priv->tx_power = wrqu->power.value;
9293 err = ipw_set_tx_power(priv);
9294 out:
9295 mutex_unlock(&priv->mutex);
9296 return err;
9297 }
9298
9299 static int ipw_wx_get_txpow(struct net_device *dev,
9300 struct iw_request_info *info,
9301 union iwreq_data *wrqu, char *extra)
9302 {
9303 struct ipw_priv *priv = ieee80211_priv(dev);
9304 mutex_lock(&priv->mutex);
9305 wrqu->power.value = priv->tx_power;
9306 wrqu->power.fixed = 1;
9307 wrqu->power.flags = IW_TXPOW_DBM;
9308 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9309 mutex_unlock(&priv->mutex);
9310
9311 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9312 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9313
9314 return 0;
9315 }
9316
9317 static int ipw_wx_set_frag(struct net_device *dev,
9318 struct iw_request_info *info,
9319 union iwreq_data *wrqu, char *extra)
9320 {
9321 struct ipw_priv *priv = ieee80211_priv(dev);
9322 mutex_lock(&priv->mutex);
9323 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9324 priv->ieee->fts = DEFAULT_FTS;
9325 else {
9326 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9327 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9328 mutex_unlock(&priv->mutex);
9329 return -EINVAL;
9330 }
9331
9332 priv->ieee->fts = wrqu->frag.value & ~0x1;
9333 }
9334
9335 ipw_send_frag_threshold(priv, wrqu->frag.value);
9336 mutex_unlock(&priv->mutex);
9337 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9338 return 0;
9339 }
9340
9341 static int ipw_wx_get_frag(struct net_device *dev,
9342 struct iw_request_info *info,
9343 union iwreq_data *wrqu, char *extra)
9344 {
9345 struct ipw_priv *priv = ieee80211_priv(dev);
9346 mutex_lock(&priv->mutex);
9347 wrqu->frag.value = priv->ieee->fts;
9348 wrqu->frag.fixed = 0; /* no auto select */
9349 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9350 mutex_unlock(&priv->mutex);
9351 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9352
9353 return 0;
9354 }
9355
9356 static int ipw_wx_set_retry(struct net_device *dev,
9357 struct iw_request_info *info,
9358 union iwreq_data *wrqu, char *extra)
9359 {
9360 struct ipw_priv *priv = ieee80211_priv(dev);
9361
9362 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9363 return -EINVAL;
9364
9365 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9366 return 0;
9367
9368 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9369 return -EINVAL;
9370
9371 mutex_lock(&priv->mutex);
9372 if (wrqu->retry.flags & IW_RETRY_SHORT)
9373 priv->short_retry_limit = (u8) wrqu->retry.value;
9374 else if (wrqu->retry.flags & IW_RETRY_LONG)
9375 priv->long_retry_limit = (u8) wrqu->retry.value;
9376 else {
9377 priv->short_retry_limit = (u8) wrqu->retry.value;
9378 priv->long_retry_limit = (u8) wrqu->retry.value;
9379 }
9380
9381 ipw_send_retry_limit(priv, priv->short_retry_limit,
9382 priv->long_retry_limit);
9383 mutex_unlock(&priv->mutex);
9384 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9385 priv->short_retry_limit, priv->long_retry_limit);
9386 return 0;
9387 }
9388
9389 static int ipw_wx_get_retry(struct net_device *dev,
9390 struct iw_request_info *info,
9391 union iwreq_data *wrqu, char *extra)
9392 {
9393 struct ipw_priv *priv = ieee80211_priv(dev);
9394
9395 mutex_lock(&priv->mutex);
9396 wrqu->retry.disabled = 0;
9397
9398 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9399 mutex_unlock(&priv->mutex);
9400 return -EINVAL;
9401 }
9402
9403 if (wrqu->retry.flags & IW_RETRY_LONG) {
9404 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9405 wrqu->retry.value = priv->long_retry_limit;
9406 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9407 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9408 wrqu->retry.value = priv->short_retry_limit;
9409 } else {
9410 wrqu->retry.flags = IW_RETRY_LIMIT;
9411 wrqu->retry.value = priv->short_retry_limit;
9412 }
9413 mutex_unlock(&priv->mutex);
9414
9415 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9416
9417 return 0;
9418 }
9419
9420 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
9421 int essid_len)
9422 {
9423 struct ipw_scan_request_ext scan;
9424 int err = 0, scan_type;
9425
9426 if (!(priv->status & STATUS_INIT) ||
9427 (priv->status & STATUS_EXIT_PENDING))
9428 return 0;
9429
9430 mutex_lock(&priv->mutex);
9431
9432 if (priv->status & STATUS_RF_KILL_MASK) {
9433 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
9434 priv->status |= STATUS_SCAN_PENDING;
9435 goto done;
9436 }
9437
9438 IPW_DEBUG_HC("starting request direct scan!\n");
9439
9440 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
9441 /* We should not sleep here; otherwise we will block most
9442 * of the system (for instance, we hold rtnl_lock when we
9443 * get here).
9444 */
9445 err = -EAGAIN;
9446 goto done;
9447 }
9448 memset(&scan, 0, sizeof(scan));
9449
9450 if (priv->config & CFG_SPEED_SCAN)
9451 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9452 cpu_to_le16(30);
9453 else
9454 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9455 cpu_to_le16(20);
9456
9457 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
9458 cpu_to_le16(20);
9459 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
9460 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
9461
9462 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
9463
9464 err = ipw_send_ssid(priv, essid, essid_len);
9465 if (err) {
9466 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9467 goto done;
9468 }
9469 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9470
9471 ipw_add_scan_channels(priv, &scan, scan_type);
9472
9473 err = ipw_send_scan_request_ext(priv, &scan);
9474 if (err) {
9475 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9476 goto done;
9477 }
9478
9479 priv->status |= STATUS_SCANNING;
9480
9481 done:
9482 mutex_unlock(&priv->mutex);
9483 return err;
9484 }
9485
9486 static int ipw_wx_set_scan(struct net_device *dev,
9487 struct iw_request_info *info,
9488 union iwreq_data *wrqu, char *extra)
9489 {
9490 struct ipw_priv *priv = ieee80211_priv(dev);
9491 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9492
9493 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9494 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9495 ipw_request_direct_scan(priv, req->essid,
9496 req->essid_len);
9497 return 0;
9498 }
9499 if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9500 queue_work(priv->workqueue,
9501 &priv->request_passive_scan);
9502 return 0;
9503 }
9504 }
9505
9506 IPW_DEBUG_WX("Start scan\n");
9507
9508 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
9509
9510 return 0;
9511 }
9512
9513 static int ipw_wx_get_scan(struct net_device *dev,
9514 struct iw_request_info *info,
9515 union iwreq_data *wrqu, char *extra)
9516 {
9517 struct ipw_priv *priv = ieee80211_priv(dev);
9518 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9519 }
9520
9521 static int ipw_wx_set_encode(struct net_device *dev,
9522 struct iw_request_info *info,
9523 union iwreq_data *wrqu, char *key)
9524 {
9525 struct ipw_priv *priv = ieee80211_priv(dev);
9526 int ret;
9527 u32 cap = priv->capability;
9528
9529 mutex_lock(&priv->mutex);
9530 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9531
9532 /* In IBSS mode, we need to notify the firmware to update
9533 * the beacon info after we changed the capability. */
9534 if (cap != priv->capability &&
9535 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9536 priv->status & STATUS_ASSOCIATED)
9537 ipw_disassociate(priv);
9538
9539 mutex_unlock(&priv->mutex);
9540 return ret;
9541 }
9542
9543 static int ipw_wx_get_encode(struct net_device *dev,
9544 struct iw_request_info *info,
9545 union iwreq_data *wrqu, char *key)
9546 {
9547 struct ipw_priv *priv = ieee80211_priv(dev);
9548 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9549 }
9550
9551 static int ipw_wx_set_power(struct net_device *dev,
9552 struct iw_request_info *info,
9553 union iwreq_data *wrqu, char *extra)
9554 {
9555 struct ipw_priv *priv = ieee80211_priv(dev);
9556 int err;
9557 mutex_lock(&priv->mutex);
9558 if (wrqu->power.disabled) {
9559 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9560 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9561 if (err) {
9562 IPW_DEBUG_WX("failed setting power mode.\n");
9563 mutex_unlock(&priv->mutex);
9564 return err;
9565 }
9566 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9567 mutex_unlock(&priv->mutex);
9568 return 0;
9569 }
9570
9571 switch (wrqu->power.flags & IW_POWER_MODE) {
9572 case IW_POWER_ON: /* If not specified */
9573 case IW_POWER_MODE: /* If set all mask */
9574 case IW_POWER_ALL_R: /* If explicitely state all */
9575 break;
9576 default: /* Otherwise we don't support it */
9577 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9578 wrqu->power.flags);
9579 mutex_unlock(&priv->mutex);
9580 return -EOPNOTSUPP;
9581 }
9582
9583 /* If the user hasn't specified a power management mode yet, default
9584 * to BATTERY */
9585 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9586 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9587 else
9588 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9589
9590 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9591 if (err) {
9592 IPW_DEBUG_WX("failed setting power mode.\n");
9593 mutex_unlock(&priv->mutex);
9594 return err;
9595 }
9596
9597 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9598 mutex_unlock(&priv->mutex);
9599 return 0;
9600 }
9601
9602 static int ipw_wx_get_power(struct net_device *dev,
9603 struct iw_request_info *info,
9604 union iwreq_data *wrqu, char *extra)
9605 {
9606 struct ipw_priv *priv = ieee80211_priv(dev);
9607 mutex_lock(&priv->mutex);
9608 if (!(priv->power_mode & IPW_POWER_ENABLED))
9609 wrqu->power.disabled = 1;
9610 else
9611 wrqu->power.disabled = 0;
9612
9613 mutex_unlock(&priv->mutex);
9614 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9615
9616 return 0;
9617 }
9618
9619 static int ipw_wx_set_powermode(struct net_device *dev,
9620 struct iw_request_info *info,
9621 union iwreq_data *wrqu, char *extra)
9622 {
9623 struct ipw_priv *priv = ieee80211_priv(dev);
9624 int mode = *(int *)extra;
9625 int err;
9626
9627 mutex_lock(&priv->mutex);
9628 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9629 mode = IPW_POWER_AC;
9630
9631 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9632 err = ipw_send_power_mode(priv, mode);
9633 if (err) {
9634 IPW_DEBUG_WX("failed setting power mode.\n");
9635 mutex_unlock(&priv->mutex);
9636 return err;
9637 }
9638 priv->power_mode = IPW_POWER_ENABLED | mode;
9639 }
9640 mutex_unlock(&priv->mutex);
9641 return 0;
9642 }
9643
9644 #define MAX_WX_STRING 80
9645 static int ipw_wx_get_powermode(struct net_device *dev,
9646 struct iw_request_info *info,
9647 union iwreq_data *wrqu, char *extra)
9648 {
9649 struct ipw_priv *priv = ieee80211_priv(dev);
9650 int level = IPW_POWER_LEVEL(priv->power_mode);
9651 char *p = extra;
9652
9653 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9654
9655 switch (level) {
9656 case IPW_POWER_AC:
9657 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9658 break;
9659 case IPW_POWER_BATTERY:
9660 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9661 break;
9662 default:
9663 p += snprintf(p, MAX_WX_STRING - (p - extra),
9664 "(Timeout %dms, Period %dms)",
9665 timeout_duration[level - 1] / 1000,
9666 period_duration[level - 1] / 1000);
9667 }
9668
9669 if (!(priv->power_mode & IPW_POWER_ENABLED))
9670 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9671
9672 wrqu->data.length = p - extra + 1;
9673
9674 return 0;
9675 }
9676
9677 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9678 struct iw_request_info *info,
9679 union iwreq_data *wrqu, char *extra)
9680 {
9681 struct ipw_priv *priv = ieee80211_priv(dev);
9682 int mode = *(int *)extra;
9683 u8 band = 0, modulation = 0;
9684
9685 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9686 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9687 return -EINVAL;
9688 }
9689 mutex_lock(&priv->mutex);
9690 if (priv->adapter == IPW_2915ABG) {
9691 priv->ieee->abg_true = 1;
9692 if (mode & IEEE_A) {
9693 band |= IEEE80211_52GHZ_BAND;
9694 modulation |= IEEE80211_OFDM_MODULATION;
9695 } else
9696 priv->ieee->abg_true = 0;
9697 } else {
9698 if (mode & IEEE_A) {
9699 IPW_WARNING("Attempt to set 2200BG into "
9700 "802.11a mode\n");
9701 mutex_unlock(&priv->mutex);
9702 return -EINVAL;
9703 }
9704
9705 priv->ieee->abg_true = 0;
9706 }
9707
9708 if (mode & IEEE_B) {
9709 band |= IEEE80211_24GHZ_BAND;
9710 modulation |= IEEE80211_CCK_MODULATION;
9711 } else
9712 priv->ieee->abg_true = 0;
9713
9714 if (mode & IEEE_G) {
9715 band |= IEEE80211_24GHZ_BAND;
9716 modulation |= IEEE80211_OFDM_MODULATION;
9717 } else
9718 priv->ieee->abg_true = 0;
9719
9720 priv->ieee->mode = mode;
9721 priv->ieee->freq_band = band;
9722 priv->ieee->modulation = modulation;
9723 init_supported_rates(priv, &priv->rates);
9724
9725 /* Network configuration changed -- force [re]association */
9726 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9727 if (!ipw_disassociate(priv)) {
9728 ipw_send_supported_rates(priv, &priv->rates);
9729 ipw_associate(priv);
9730 }
9731
9732 /* Update the band LEDs */
9733 ipw_led_band_on(priv);
9734
9735 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9736 mode & IEEE_A ? 'a' : '.',
9737 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9738 mutex_unlock(&priv->mutex);
9739 return 0;
9740 }
9741
9742 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9743 struct iw_request_info *info,
9744 union iwreq_data *wrqu, char *extra)
9745 {
9746 struct ipw_priv *priv = ieee80211_priv(dev);
9747 mutex_lock(&priv->mutex);
9748 switch (priv->ieee->mode) {
9749 case IEEE_A:
9750 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9751 break;
9752 case IEEE_B:
9753 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9754 break;
9755 case IEEE_A | IEEE_B:
9756 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9757 break;
9758 case IEEE_G:
9759 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9760 break;
9761 case IEEE_A | IEEE_G:
9762 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9763 break;
9764 case IEEE_B | IEEE_G:
9765 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9766 break;
9767 case IEEE_A | IEEE_B | IEEE_G:
9768 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9769 break;
9770 default:
9771 strncpy(extra, "unknown", MAX_WX_STRING);
9772 break;
9773 }
9774
9775 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9776
9777 wrqu->data.length = strlen(extra) + 1;
9778 mutex_unlock(&priv->mutex);
9779
9780 return 0;
9781 }
9782
9783 static int ipw_wx_set_preamble(struct net_device *dev,
9784 struct iw_request_info *info,
9785 union iwreq_data *wrqu, char *extra)
9786 {
9787 struct ipw_priv *priv = ieee80211_priv(dev);
9788 int mode = *(int *)extra;
9789 mutex_lock(&priv->mutex);
9790 /* Switching from SHORT -> LONG requires a disassociation */
9791 if (mode == 1) {
9792 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9793 priv->config |= CFG_PREAMBLE_LONG;
9794
9795 /* Network configuration changed -- force [re]association */
9796 IPW_DEBUG_ASSOC
9797 ("[re]association triggered due to preamble change.\n");
9798 if (!ipw_disassociate(priv))
9799 ipw_associate(priv);
9800 }
9801 goto done;
9802 }
9803
9804 if (mode == 0) {
9805 priv->config &= ~CFG_PREAMBLE_LONG;
9806 goto done;
9807 }
9808 mutex_unlock(&priv->mutex);
9809 return -EINVAL;
9810
9811 done:
9812 mutex_unlock(&priv->mutex);
9813 return 0;
9814 }
9815
9816 static int ipw_wx_get_preamble(struct net_device *dev,
9817 struct iw_request_info *info,
9818 union iwreq_data *wrqu, char *extra)
9819 {
9820 struct ipw_priv *priv = ieee80211_priv(dev);
9821 mutex_lock(&priv->mutex);
9822 if (priv->config & CFG_PREAMBLE_LONG)
9823 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9824 else
9825 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9826 mutex_unlock(&priv->mutex);
9827 return 0;
9828 }
9829
9830 #ifdef CONFIG_IPW2200_MONITOR
9831 static int ipw_wx_set_monitor(struct net_device *dev,
9832 struct iw_request_info *info,
9833 union iwreq_data *wrqu, char *extra)
9834 {
9835 struct ipw_priv *priv = ieee80211_priv(dev);
9836 int *parms = (int *)extra;
9837 int enable = (parms[0] > 0);
9838 mutex_lock(&priv->mutex);
9839 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9840 if (enable) {
9841 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9842 #ifdef CONFIG_IPW2200_RADIOTAP
9843 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9844 #else
9845 priv->net_dev->type = ARPHRD_IEEE80211;
9846 #endif
9847 queue_work(priv->workqueue, &priv->adapter_restart);
9848 }
9849
9850 ipw_set_channel(priv, parms[1]);
9851 } else {
9852 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9853 mutex_unlock(&priv->mutex);
9854 return 0;
9855 }
9856 priv->net_dev->type = ARPHRD_ETHER;
9857 queue_work(priv->workqueue, &priv->adapter_restart);
9858 }
9859 mutex_unlock(&priv->mutex);
9860 return 0;
9861 }
9862
9863 #endif /* CONFIG_IPW2200_MONITOR */
9864
9865 static int ipw_wx_reset(struct net_device *dev,
9866 struct iw_request_info *info,
9867 union iwreq_data *wrqu, char *extra)
9868 {
9869 struct ipw_priv *priv = ieee80211_priv(dev);
9870 IPW_DEBUG_WX("RESET\n");
9871 queue_work(priv->workqueue, &priv->adapter_restart);
9872 return 0;
9873 }
9874
9875 static int ipw_wx_sw_reset(struct net_device *dev,
9876 struct iw_request_info *info,
9877 union iwreq_data *wrqu, char *extra)
9878 {
9879 struct ipw_priv *priv = ieee80211_priv(dev);
9880 union iwreq_data wrqu_sec = {
9881 .encoding = {
9882 .flags = IW_ENCODE_DISABLED,
9883 },
9884 };
9885 int ret;
9886
9887 IPW_DEBUG_WX("SW_RESET\n");
9888
9889 mutex_lock(&priv->mutex);
9890
9891 ret = ipw_sw_reset(priv, 2);
9892 if (!ret) {
9893 free_firmware();
9894 ipw_adapter_restart(priv);
9895 }
9896
9897 /* The SW reset bit might have been toggled on by the 'disable'
9898 * module parameter, so take appropriate action */
9899 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9900
9901 mutex_unlock(&priv->mutex);
9902 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9903 mutex_lock(&priv->mutex);
9904
9905 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9906 /* Configuration likely changed -- force [re]association */
9907 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9908 "reset.\n");
9909 if (!ipw_disassociate(priv))
9910 ipw_associate(priv);
9911 }
9912
9913 mutex_unlock(&priv->mutex);
9914
9915 return 0;
9916 }
9917
9918 /* Rebase the WE IOCTLs to zero for the handler array */
9919 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9920 static iw_handler ipw_wx_handlers[] = {
9921 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9922 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9923 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9924 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9925 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9926 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9927 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9928 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9929 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9930 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9931 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9932 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9933 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9934 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9935 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9936 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9937 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9938 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9939 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9940 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9941 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9942 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9943 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9944 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9945 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9946 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9947 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9948 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9949 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9950 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9951 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9952 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9953 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9954 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9955 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9956 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9957 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9958 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9959 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9960 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9961 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9962 };
9963
9964 enum {
9965 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9966 IPW_PRIV_GET_POWER,
9967 IPW_PRIV_SET_MODE,
9968 IPW_PRIV_GET_MODE,
9969 IPW_PRIV_SET_PREAMBLE,
9970 IPW_PRIV_GET_PREAMBLE,
9971 IPW_PRIV_RESET,
9972 IPW_PRIV_SW_RESET,
9973 #ifdef CONFIG_IPW2200_MONITOR
9974 IPW_PRIV_SET_MONITOR,
9975 #endif
9976 };
9977
9978 static struct iw_priv_args ipw_priv_args[] = {
9979 {
9980 .cmd = IPW_PRIV_SET_POWER,
9981 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9982 .name = "set_power"},
9983 {
9984 .cmd = IPW_PRIV_GET_POWER,
9985 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9986 .name = "get_power"},
9987 {
9988 .cmd = IPW_PRIV_SET_MODE,
9989 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9990 .name = "set_mode"},
9991 {
9992 .cmd = IPW_PRIV_GET_MODE,
9993 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9994 .name = "get_mode"},
9995 {
9996 .cmd = IPW_PRIV_SET_PREAMBLE,
9997 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9998 .name = "set_preamble"},
9999 {
10000 .cmd = IPW_PRIV_GET_PREAMBLE,
10001 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10002 .name = "get_preamble"},
10003 {
10004 IPW_PRIV_RESET,
10005 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10006 {
10007 IPW_PRIV_SW_RESET,
10008 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10009 #ifdef CONFIG_IPW2200_MONITOR
10010 {
10011 IPW_PRIV_SET_MONITOR,
10012 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10013 #endif /* CONFIG_IPW2200_MONITOR */
10014 };
10015
10016 static iw_handler ipw_priv_handler[] = {
10017 ipw_wx_set_powermode,
10018 ipw_wx_get_powermode,
10019 ipw_wx_set_wireless_mode,
10020 ipw_wx_get_wireless_mode,
10021 ipw_wx_set_preamble,
10022 ipw_wx_get_preamble,
10023 ipw_wx_reset,
10024 ipw_wx_sw_reset,
10025 #ifdef CONFIG_IPW2200_MONITOR
10026 ipw_wx_set_monitor,
10027 #endif
10028 };
10029
10030 static struct iw_handler_def ipw_wx_handler_def = {
10031 .standard = ipw_wx_handlers,
10032 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10033 .num_private = ARRAY_SIZE(ipw_priv_handler),
10034 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10035 .private = ipw_priv_handler,
10036 .private_args = ipw_priv_args,
10037 .get_wireless_stats = ipw_get_wireless_stats,
10038 };
10039
10040 /*
10041 * Get wireless statistics.
10042 * Called by /proc/net/wireless
10043 * Also called by SIOCGIWSTATS
10044 */
10045 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10046 {
10047 struct ipw_priv *priv = ieee80211_priv(dev);
10048 struct iw_statistics *wstats;
10049
10050 wstats = &priv->wstats;
10051
10052 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10053 * netdev->get_wireless_stats seems to be called before fw is
10054 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10055 * and associated; if not associcated, the values are all meaningless
10056 * anyway, so set them all to NULL and INVALID */
10057 if (!(priv->status & STATUS_ASSOCIATED)) {
10058 wstats->miss.beacon = 0;
10059 wstats->discard.retries = 0;
10060 wstats->qual.qual = 0;
10061 wstats->qual.level = 0;
10062 wstats->qual.noise = 0;
10063 wstats->qual.updated = 7;
10064 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10065 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10066 return wstats;
10067 }
10068
10069 wstats->qual.qual = priv->quality;
10070 wstats->qual.level = priv->exp_avg_rssi;
10071 wstats->qual.noise = priv->exp_avg_noise;
10072 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10073 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10074
10075 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10076 wstats->discard.retries = priv->last_tx_failures;
10077 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10078
10079 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10080 goto fail_get_ordinal;
10081 wstats->discard.retries += tx_retry; */
10082
10083 return wstats;
10084 }
10085
10086 /* net device stuff */
10087
10088 static void init_sys_config(struct ipw_sys_config *sys_config)
10089 {
10090 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10091 sys_config->bt_coexistence = 0;
10092 sys_config->answer_broadcast_ssid_probe = 0;
10093 sys_config->accept_all_data_frames = 0;
10094 sys_config->accept_non_directed_frames = 1;
10095 sys_config->exclude_unicast_unencrypted = 0;
10096 sys_config->disable_unicast_decryption = 1;
10097 sys_config->exclude_multicast_unencrypted = 0;
10098 sys_config->disable_multicast_decryption = 1;
10099 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10100 antenna = CFG_SYS_ANTENNA_BOTH;
10101 sys_config->antenna_diversity = antenna;
10102 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10103 sys_config->dot11g_auto_detection = 0;
10104 sys_config->enable_cts_to_self = 0;
10105 sys_config->bt_coexist_collision_thr = 0;
10106 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10107 sys_config->silence_threshold = 0x1e;
10108 }
10109
10110 static int ipw_net_open(struct net_device *dev)
10111 {
10112 struct ipw_priv *priv = ieee80211_priv(dev);
10113 IPW_DEBUG_INFO("dev->open\n");
10114 /* we should be verifying the device is ready to be opened */
10115 mutex_lock(&priv->mutex);
10116 if (!(priv->status & STATUS_RF_KILL_MASK) &&
10117 (priv->status & STATUS_ASSOCIATED))
10118 netif_start_queue(dev);
10119 mutex_unlock(&priv->mutex);
10120 return 0;
10121 }
10122
10123 static int ipw_net_stop(struct net_device *dev)
10124 {
10125 IPW_DEBUG_INFO("dev->close\n");
10126 netif_stop_queue(dev);
10127 return 0;
10128 }
10129
10130 /*
10131 todo:
10132
10133 modify to send one tfd per fragment instead of using chunking. otherwise
10134 we need to heavily modify the ieee80211_skb_to_txb.
10135 */
10136
10137 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10138 int pri)
10139 {
10140 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10141 txb->fragments[0]->data;
10142 int i = 0;
10143 struct tfd_frame *tfd;
10144 #ifdef CONFIG_IPW2200_QOS
10145 int tx_id = ipw_get_tx_queue_number(priv, pri);
10146 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10147 #else
10148 struct clx2_tx_queue *txq = &priv->txq[0];
10149 #endif
10150 struct clx2_queue *q = &txq->q;
10151 u8 id, hdr_len, unicast;
10152 u16 remaining_bytes;
10153 int fc;
10154 DECLARE_MAC_BUF(mac);
10155
10156 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10157 switch (priv->ieee->iw_mode) {
10158 case IW_MODE_ADHOC:
10159 unicast = !is_multicast_ether_addr(hdr->addr1);
10160 id = ipw_find_station(priv, hdr->addr1);
10161 if (id == IPW_INVALID_STATION) {
10162 id = ipw_add_station(priv, hdr->addr1);
10163 if (id == IPW_INVALID_STATION) {
10164 IPW_WARNING("Attempt to send data to "
10165 "invalid cell: %s\n",
10166 print_mac(mac, hdr->addr1));
10167 goto drop;
10168 }
10169 }
10170 break;
10171
10172 case IW_MODE_INFRA:
10173 default:
10174 unicast = !is_multicast_ether_addr(hdr->addr3);
10175 id = 0;
10176 break;
10177 }
10178
10179 tfd = &txq->bd[q->first_empty];
10180 txq->txb[q->first_empty] = txb;
10181 memset(tfd, 0, sizeof(*tfd));
10182 tfd->u.data.station_number = id;
10183
10184 tfd->control_flags.message_type = TX_FRAME_TYPE;
10185 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10186
10187 tfd->u.data.cmd_id = DINO_CMD_TX;
10188 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10189 remaining_bytes = txb->payload_size;
10190
10191 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10192 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10193 else
10194 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10195
10196 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10197 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10198
10199 fc = le16_to_cpu(hdr->frame_ctl);
10200 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10201
10202 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10203
10204 if (likely(unicast))
10205 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10206
10207 if (txb->encrypted && !priv->ieee->host_encrypt) {
10208 switch (priv->ieee->sec.level) {
10209 case SEC_LEVEL_3:
10210 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10211 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10212 /* XXX: ACK flag must be set for CCMP even if it
10213 * is a multicast/broadcast packet, because CCMP
10214 * group communication encrypted by GTK is
10215 * actually done by the AP. */
10216 if (!unicast)
10217 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10218
10219 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10220 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10221 tfd->u.data.key_index = 0;
10222 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10223 break;
10224 case SEC_LEVEL_2:
10225 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10226 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10227 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10228 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10229 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10230 break;
10231 case SEC_LEVEL_1:
10232 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10233 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10234 tfd->u.data.key_index = priv->ieee->tx_keyidx;
10235 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10236 40)
10237 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10238 else
10239 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10240 break;
10241 case SEC_LEVEL_0:
10242 break;
10243 default:
10244 printk(KERN_ERR "Unknow security level %d\n",
10245 priv->ieee->sec.level);
10246 break;
10247 }
10248 } else
10249 /* No hardware encryption */
10250 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10251
10252 #ifdef CONFIG_IPW2200_QOS
10253 if (fc & IEEE80211_STYPE_QOS_DATA)
10254 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10255 #endif /* CONFIG_IPW2200_QOS */
10256
10257 /* payload */
10258 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10259 txb->nr_frags));
10260 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10261 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10262 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10263 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10264 i, le32_to_cpu(tfd->u.data.num_chunks),
10265 txb->fragments[i]->len - hdr_len);
10266 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10267 i, tfd->u.data.num_chunks,
10268 txb->fragments[i]->len - hdr_len);
10269 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10270 txb->fragments[i]->len - hdr_len);
10271
10272 tfd->u.data.chunk_ptr[i] =
10273 cpu_to_le32(pci_map_single
10274 (priv->pci_dev,
10275 txb->fragments[i]->data + hdr_len,
10276 txb->fragments[i]->len - hdr_len,
10277 PCI_DMA_TODEVICE));
10278 tfd->u.data.chunk_len[i] =
10279 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10280 }
10281
10282 if (i != txb->nr_frags) {
10283 struct sk_buff *skb;
10284 u16 remaining_bytes = 0;
10285 int j;
10286
10287 for (j = i; j < txb->nr_frags; j++)
10288 remaining_bytes += txb->fragments[j]->len - hdr_len;
10289
10290 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10291 remaining_bytes);
10292 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10293 if (skb != NULL) {
10294 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10295 for (j = i; j < txb->nr_frags; j++) {
10296 int size = txb->fragments[j]->len - hdr_len;
10297
10298 printk(KERN_INFO "Adding frag %d %d...\n",
10299 j, size);
10300 memcpy(skb_put(skb, size),
10301 txb->fragments[j]->data + hdr_len, size);
10302 }
10303 dev_kfree_skb_any(txb->fragments[i]);
10304 txb->fragments[i] = skb;
10305 tfd->u.data.chunk_ptr[i] =
10306 cpu_to_le32(pci_map_single
10307 (priv->pci_dev, skb->data,
10308 tfd->u.data.chunk_len[i],
10309 PCI_DMA_TODEVICE));
10310
10311 tfd->u.data.num_chunks =
10312 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
10313 1);
10314 }
10315 }
10316
10317 /* kick DMA */
10318 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10319 ipw_write32(priv, q->reg_w, q->first_empty);
10320
10321 if (ipw_queue_space(q) < q->high_mark)
10322 netif_stop_queue(priv->net_dev);
10323
10324 return NETDEV_TX_OK;
10325
10326 drop:
10327 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10328 ieee80211_txb_free(txb);
10329 return NETDEV_TX_OK;
10330 }
10331
10332 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10333 {
10334 struct ipw_priv *priv = ieee80211_priv(dev);
10335 #ifdef CONFIG_IPW2200_QOS
10336 int tx_id = ipw_get_tx_queue_number(priv, pri);
10337 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10338 #else
10339 struct clx2_tx_queue *txq = &priv->txq[0];
10340 #endif /* CONFIG_IPW2200_QOS */
10341
10342 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
10343 return 1;
10344
10345 return 0;
10346 }
10347
10348 #ifdef CONFIG_IPW2200_PROMISCUOUS
10349 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10350 struct ieee80211_txb *txb)
10351 {
10352 struct ieee80211_rx_stats dummystats;
10353 struct ieee80211_hdr *hdr;
10354 u8 n;
10355 u16 filter = priv->prom_priv->filter;
10356 int hdr_only = 0;
10357
10358 if (filter & IPW_PROM_NO_TX)
10359 return;
10360
10361 memset(&dummystats, 0, sizeof(dummystats));
10362
10363 /* Filtering of fragment chains is done agains the first fragment */
10364 hdr = (void *)txb->fragments[0]->data;
10365 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
10366 if (filter & IPW_PROM_NO_MGMT)
10367 return;
10368 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10369 hdr_only = 1;
10370 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
10371 if (filter & IPW_PROM_NO_CTL)
10372 return;
10373 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10374 hdr_only = 1;
10375 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
10376 if (filter & IPW_PROM_NO_DATA)
10377 return;
10378 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10379 hdr_only = 1;
10380 }
10381
10382 for(n=0; n<txb->nr_frags; ++n) {
10383 struct sk_buff *src = txb->fragments[n];
10384 struct sk_buff *dst;
10385 struct ieee80211_radiotap_header *rt_hdr;
10386 int len;
10387
10388 if (hdr_only) {
10389 hdr = (void *)src->data;
10390 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10391 } else
10392 len = src->len;
10393
10394 dst = alloc_skb(
10395 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10396 if (!dst) continue;
10397
10398 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10399
10400 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10401 rt_hdr->it_pad = 0;
10402 rt_hdr->it_present = 0; /* after all, it's just an idea */
10403 rt_hdr->it_present |= (1 << IEEE80211_RADIOTAP_CHANNEL);
10404
10405 *(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10406 ieee80211chan2mhz(priv->channel));
10407 if (priv->channel > 14) /* 802.11a */
10408 *(u16*)skb_put(dst, sizeof(u16)) =
10409 cpu_to_le16(IEEE80211_CHAN_OFDM |
10410 IEEE80211_CHAN_5GHZ);
10411 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10412 *(u16*)skb_put(dst, sizeof(u16)) =
10413 cpu_to_le16(IEEE80211_CHAN_CCK |
10414 IEEE80211_CHAN_2GHZ);
10415 else /* 802.11g */
10416 *(u16*)skb_put(dst, sizeof(u16)) =
10417 cpu_to_le16(IEEE80211_CHAN_OFDM |
10418 IEEE80211_CHAN_2GHZ);
10419
10420 rt_hdr->it_len = dst->len;
10421
10422 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10423
10424 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10425 dev_kfree_skb_any(dst);
10426 }
10427 }
10428 #endif
10429
10430 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10431 struct net_device *dev, int pri)
10432 {
10433 struct ipw_priv *priv = ieee80211_priv(dev);
10434 unsigned long flags;
10435 int ret;
10436
10437 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10438 spin_lock_irqsave(&priv->lock, flags);
10439
10440 if (!(priv->status & STATUS_ASSOCIATED)) {
10441 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
10442 priv->ieee->stats.tx_carrier_errors++;
10443 netif_stop_queue(dev);
10444 goto fail_unlock;
10445 }
10446
10447 #ifdef CONFIG_IPW2200_PROMISCUOUS
10448 if (rtap_iface && netif_running(priv->prom_net_dev))
10449 ipw_handle_promiscuous_tx(priv, txb);
10450 #endif
10451
10452 ret = ipw_tx_skb(priv, txb, pri);
10453 if (ret == NETDEV_TX_OK)
10454 __ipw_led_activity_on(priv);
10455 spin_unlock_irqrestore(&priv->lock, flags);
10456
10457 return ret;
10458
10459 fail_unlock:
10460 spin_unlock_irqrestore(&priv->lock, flags);
10461 return 1;
10462 }
10463
10464 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10465 {
10466 struct ipw_priv *priv = ieee80211_priv(dev);
10467
10468 priv->ieee->stats.tx_packets = priv->tx_packets;
10469 priv->ieee->stats.rx_packets = priv->rx_packets;
10470 return &priv->ieee->stats;
10471 }
10472
10473 static void ipw_net_set_multicast_list(struct net_device *dev)
10474 {
10475
10476 }
10477
10478 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10479 {
10480 struct ipw_priv *priv = ieee80211_priv(dev);
10481 struct sockaddr *addr = p;
10482 DECLARE_MAC_BUF(mac);
10483
10484 if (!is_valid_ether_addr(addr->sa_data))
10485 return -EADDRNOTAVAIL;
10486 mutex_lock(&priv->mutex);
10487 priv->config |= CFG_CUSTOM_MAC;
10488 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10489 printk(KERN_INFO "%s: Setting MAC to %s\n",
10490 priv->net_dev->name, print_mac(mac, priv->mac_addr));
10491 queue_work(priv->workqueue, &priv->adapter_restart);
10492 mutex_unlock(&priv->mutex);
10493 return 0;
10494 }
10495
10496 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10497 struct ethtool_drvinfo *info)
10498 {
10499 struct ipw_priv *p = ieee80211_priv(dev);
10500 char vers[64];
10501 char date[32];
10502 u32 len;
10503
10504 strcpy(info->driver, DRV_NAME);
10505 strcpy(info->version, DRV_VERSION);
10506
10507 len = sizeof(vers);
10508 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10509 len = sizeof(date);
10510 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10511
10512 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10513 vers, date);
10514 strcpy(info->bus_info, pci_name(p->pci_dev));
10515 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10516 }
10517
10518 static u32 ipw_ethtool_get_link(struct net_device *dev)
10519 {
10520 struct ipw_priv *priv = ieee80211_priv(dev);
10521 return (priv->status & STATUS_ASSOCIATED) != 0;
10522 }
10523
10524 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10525 {
10526 return IPW_EEPROM_IMAGE_SIZE;
10527 }
10528
10529 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10530 struct ethtool_eeprom *eeprom, u8 * bytes)
10531 {
10532 struct ipw_priv *p = ieee80211_priv(dev);
10533
10534 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10535 return -EINVAL;
10536 mutex_lock(&p->mutex);
10537 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10538 mutex_unlock(&p->mutex);
10539 return 0;
10540 }
10541
10542 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10543 struct ethtool_eeprom *eeprom, u8 * bytes)
10544 {
10545 struct ipw_priv *p = ieee80211_priv(dev);
10546 int i;
10547
10548 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10549 return -EINVAL;
10550 mutex_lock(&p->mutex);
10551 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10552 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10553 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10554 mutex_unlock(&p->mutex);
10555 return 0;
10556 }
10557
10558 static const struct ethtool_ops ipw_ethtool_ops = {
10559 .get_link = ipw_ethtool_get_link,
10560 .get_drvinfo = ipw_ethtool_get_drvinfo,
10561 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10562 .get_eeprom = ipw_ethtool_get_eeprom,
10563 .set_eeprom = ipw_ethtool_set_eeprom,
10564 };
10565
10566 static irqreturn_t ipw_isr(int irq, void *data)
10567 {
10568 struct ipw_priv *priv = data;
10569 u32 inta, inta_mask;
10570
10571 if (!priv)
10572 return IRQ_NONE;
10573
10574 spin_lock(&priv->irq_lock);
10575
10576 if (!(priv->status & STATUS_INT_ENABLED)) {
10577 /* IRQ is disabled */
10578 goto none;
10579 }
10580
10581 inta = ipw_read32(priv, IPW_INTA_RW);
10582 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10583
10584 if (inta == 0xFFFFFFFF) {
10585 /* Hardware disappeared */
10586 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10587 goto none;
10588 }
10589
10590 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10591 /* Shared interrupt */
10592 goto none;
10593 }
10594
10595 /* tell the device to stop sending interrupts */
10596 __ipw_disable_interrupts(priv);
10597
10598 /* ack current interrupts */
10599 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10600 ipw_write32(priv, IPW_INTA_RW, inta);
10601
10602 /* Cache INTA value for our tasklet */
10603 priv->isr_inta = inta;
10604
10605 tasklet_schedule(&priv->irq_tasklet);
10606
10607 spin_unlock(&priv->irq_lock);
10608
10609 return IRQ_HANDLED;
10610 none:
10611 spin_unlock(&priv->irq_lock);
10612 return IRQ_NONE;
10613 }
10614
10615 static void ipw_rf_kill(void *adapter)
10616 {
10617 struct ipw_priv *priv = adapter;
10618 unsigned long flags;
10619
10620 spin_lock_irqsave(&priv->lock, flags);
10621
10622 if (rf_kill_active(priv)) {
10623 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10624 if (priv->workqueue)
10625 queue_delayed_work(priv->workqueue,
10626 &priv->rf_kill, 2 * HZ);
10627 goto exit_unlock;
10628 }
10629
10630 /* RF Kill is now disabled, so bring the device back up */
10631
10632 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10633 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10634 "device\n");
10635
10636 /* we can not do an adapter restart while inside an irq lock */
10637 queue_work(priv->workqueue, &priv->adapter_restart);
10638 } else
10639 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10640 "enabled\n");
10641
10642 exit_unlock:
10643 spin_unlock_irqrestore(&priv->lock, flags);
10644 }
10645
10646 static void ipw_bg_rf_kill(struct work_struct *work)
10647 {
10648 struct ipw_priv *priv =
10649 container_of(work, struct ipw_priv, rf_kill.work);
10650 mutex_lock(&priv->mutex);
10651 ipw_rf_kill(priv);
10652 mutex_unlock(&priv->mutex);
10653 }
10654
10655 static void ipw_link_up(struct ipw_priv *priv)
10656 {
10657 priv->last_seq_num = -1;
10658 priv->last_frag_num = -1;
10659 priv->last_packet_time = 0;
10660
10661 netif_carrier_on(priv->net_dev);
10662 if (netif_queue_stopped(priv->net_dev)) {
10663 IPW_DEBUG_NOTIF("waking queue\n");
10664 netif_wake_queue(priv->net_dev);
10665 } else {
10666 IPW_DEBUG_NOTIF("starting queue\n");
10667 netif_start_queue(priv->net_dev);
10668 }
10669
10670 cancel_delayed_work(&priv->request_scan);
10671 ipw_reset_stats(priv);
10672 /* Ensure the rate is updated immediately */
10673 priv->last_rate = ipw_get_current_rate(priv);
10674 ipw_gather_stats(priv);
10675 ipw_led_link_up(priv);
10676 notify_wx_assoc_event(priv);
10677
10678 if (priv->config & CFG_BACKGROUND_SCAN)
10679 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10680 }
10681
10682 static void ipw_bg_link_up(struct work_struct *work)
10683 {
10684 struct ipw_priv *priv =
10685 container_of(work, struct ipw_priv, link_up);
10686 mutex_lock(&priv->mutex);
10687 ipw_link_up(priv);
10688 mutex_unlock(&priv->mutex);
10689 }
10690
10691 static void ipw_link_down(struct ipw_priv *priv)
10692 {
10693 ipw_led_link_down(priv);
10694 netif_carrier_off(priv->net_dev);
10695 netif_stop_queue(priv->net_dev);
10696 notify_wx_assoc_event(priv);
10697
10698 /* Cancel any queued work ... */
10699 cancel_delayed_work(&priv->request_scan);
10700 cancel_delayed_work(&priv->adhoc_check);
10701 cancel_delayed_work(&priv->gather_stats);
10702
10703 ipw_reset_stats(priv);
10704
10705 if (!(priv->status & STATUS_EXIT_PENDING)) {
10706 /* Queue up another scan... */
10707 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10708 }
10709 }
10710
10711 static void ipw_bg_link_down(struct work_struct *work)
10712 {
10713 struct ipw_priv *priv =
10714 container_of(work, struct ipw_priv, link_down);
10715 mutex_lock(&priv->mutex);
10716 ipw_link_down(priv);
10717 mutex_unlock(&priv->mutex);
10718 }
10719
10720 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10721 {
10722 int ret = 0;
10723
10724 priv->workqueue = create_workqueue(DRV_NAME);
10725 init_waitqueue_head(&priv->wait_command_queue);
10726 init_waitqueue_head(&priv->wait_state);
10727
10728 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10729 INIT_WORK(&priv->associate, ipw_bg_associate);
10730 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10731 INIT_WORK(&priv->system_config, ipw_system_config);
10732 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10733 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10734 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10735 INIT_WORK(&priv->up, ipw_bg_up);
10736 INIT_WORK(&priv->down, ipw_bg_down);
10737 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10738 INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10739 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10740 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10741 INIT_WORK(&priv->roam, ipw_bg_roam);
10742 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10743 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10744 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10745 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10746 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10747 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10748 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10749
10750 #ifdef CONFIG_IPW2200_QOS
10751 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10752 #endif /* CONFIG_IPW2200_QOS */
10753
10754 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10755 ipw_irq_tasklet, (unsigned long)priv);
10756
10757 return ret;
10758 }
10759
10760 static void shim__set_security(struct net_device *dev,
10761 struct ieee80211_security *sec)
10762 {
10763 struct ipw_priv *priv = ieee80211_priv(dev);
10764 int i;
10765 for (i = 0; i < 4; i++) {
10766 if (sec->flags & (1 << i)) {
10767 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10768 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10769 if (sec->key_sizes[i] == 0)
10770 priv->ieee->sec.flags &= ~(1 << i);
10771 else {
10772 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10773 sec->key_sizes[i]);
10774 priv->ieee->sec.flags |= (1 << i);
10775 }
10776 priv->status |= STATUS_SECURITY_UPDATED;
10777 } else if (sec->level != SEC_LEVEL_1)
10778 priv->ieee->sec.flags &= ~(1 << i);
10779 }
10780
10781 if (sec->flags & SEC_ACTIVE_KEY) {
10782 if (sec->active_key <= 3) {
10783 priv->ieee->sec.active_key = sec->active_key;
10784 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10785 } else
10786 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10787 priv->status |= STATUS_SECURITY_UPDATED;
10788 } else
10789 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10790
10791 if ((sec->flags & SEC_AUTH_MODE) &&
10792 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10793 priv->ieee->sec.auth_mode = sec->auth_mode;
10794 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10795 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10796 priv->capability |= CAP_SHARED_KEY;
10797 else
10798 priv->capability &= ~CAP_SHARED_KEY;
10799 priv->status |= STATUS_SECURITY_UPDATED;
10800 }
10801
10802 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10803 priv->ieee->sec.flags |= SEC_ENABLED;
10804 priv->ieee->sec.enabled = sec->enabled;
10805 priv->status |= STATUS_SECURITY_UPDATED;
10806 if (sec->enabled)
10807 priv->capability |= CAP_PRIVACY_ON;
10808 else
10809 priv->capability &= ~CAP_PRIVACY_ON;
10810 }
10811
10812 if (sec->flags & SEC_ENCRYPT)
10813 priv->ieee->sec.encrypt = sec->encrypt;
10814
10815 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10816 priv->ieee->sec.level = sec->level;
10817 priv->ieee->sec.flags |= SEC_LEVEL;
10818 priv->status |= STATUS_SECURITY_UPDATED;
10819 }
10820
10821 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10822 ipw_set_hwcrypto_keys(priv);
10823
10824 /* To match current functionality of ipw2100 (which works well w/
10825 * various supplicants, we don't force a disassociate if the
10826 * privacy capability changes ... */
10827 #if 0
10828 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10829 (((priv->assoc_request.capability &
10830 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10831 (!(priv->assoc_request.capability &
10832 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10833 IPW_DEBUG_ASSOC("Disassociating due to capability "
10834 "change.\n");
10835 ipw_disassociate(priv);
10836 }
10837 #endif
10838 }
10839
10840 static int init_supported_rates(struct ipw_priv *priv,
10841 struct ipw_supported_rates *rates)
10842 {
10843 /* TODO: Mask out rates based on priv->rates_mask */
10844
10845 memset(rates, 0, sizeof(*rates));
10846 /* configure supported rates */
10847 switch (priv->ieee->freq_band) {
10848 case IEEE80211_52GHZ_BAND:
10849 rates->ieee_mode = IPW_A_MODE;
10850 rates->purpose = IPW_RATE_CAPABILITIES;
10851 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10852 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10853 break;
10854
10855 default: /* Mixed or 2.4Ghz */
10856 rates->ieee_mode = IPW_G_MODE;
10857 rates->purpose = IPW_RATE_CAPABILITIES;
10858 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10859 IEEE80211_CCK_DEFAULT_RATES_MASK);
10860 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10861 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10862 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10863 }
10864 break;
10865 }
10866
10867 return 0;
10868 }
10869
10870 static int ipw_config(struct ipw_priv *priv)
10871 {
10872 /* This is only called from ipw_up, which resets/reloads the firmware
10873 so, we don't need to first disable the card before we configure
10874 it */
10875 if (ipw_set_tx_power(priv))
10876 goto error;
10877
10878 /* initialize adapter address */
10879 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10880 goto error;
10881
10882 /* set basic system config settings */
10883 init_sys_config(&priv->sys_config);
10884
10885 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10886 * Does not support BT priority yet (don't abort or defer our Tx) */
10887 if (bt_coexist) {
10888 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10889
10890 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10891 priv->sys_config.bt_coexistence
10892 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10893 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10894 priv->sys_config.bt_coexistence
10895 |= CFG_BT_COEXISTENCE_OOB;
10896 }
10897
10898 #ifdef CONFIG_IPW2200_PROMISCUOUS
10899 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10900 priv->sys_config.accept_all_data_frames = 1;
10901 priv->sys_config.accept_non_directed_frames = 1;
10902 priv->sys_config.accept_all_mgmt_bcpr = 1;
10903 priv->sys_config.accept_all_mgmt_frames = 1;
10904 }
10905 #endif
10906
10907 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10908 priv->sys_config.answer_broadcast_ssid_probe = 1;
10909 else
10910 priv->sys_config.answer_broadcast_ssid_probe = 0;
10911
10912 if (ipw_send_system_config(priv))
10913 goto error;
10914
10915 init_supported_rates(priv, &priv->rates);
10916 if (ipw_send_supported_rates(priv, &priv->rates))
10917 goto error;
10918
10919 /* Set request-to-send threshold */
10920 if (priv->rts_threshold) {
10921 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10922 goto error;
10923 }
10924 #ifdef CONFIG_IPW2200_QOS
10925 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10926 ipw_qos_activate(priv, NULL);
10927 #endif /* CONFIG_IPW2200_QOS */
10928
10929 if (ipw_set_random_seed(priv))
10930 goto error;
10931
10932 /* final state transition to the RUN state */
10933 if (ipw_send_host_complete(priv))
10934 goto error;
10935
10936 priv->status |= STATUS_INIT;
10937
10938 ipw_led_init(priv);
10939 ipw_led_radio_on(priv);
10940 priv->notif_missed_beacons = 0;
10941
10942 /* Set hardware WEP key if it is configured. */
10943 if ((priv->capability & CAP_PRIVACY_ON) &&
10944 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10945 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10946 ipw_set_hwcrypto_keys(priv);
10947
10948 return 0;
10949
10950 error:
10951 return -EIO;
10952 }
10953
10954 /*
10955 * NOTE:
10956 *
10957 * These tables have been tested in conjunction with the
10958 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10959 *
10960 * Altering this values, using it on other hardware, or in geographies
10961 * not intended for resale of the above mentioned Intel adapters has
10962 * not been tested.
10963 *
10964 * Remember to update the table in README.ipw2200 when changing this
10965 * table.
10966 *
10967 */
10968 static const struct ieee80211_geo ipw_geos[] = {
10969 { /* Restricted */
10970 "---",
10971 .bg_channels = 11,
10972 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10973 {2427, 4}, {2432, 5}, {2437, 6},
10974 {2442, 7}, {2447, 8}, {2452, 9},
10975 {2457, 10}, {2462, 11}},
10976 },
10977
10978 { /* Custom US/Canada */
10979 "ZZF",
10980 .bg_channels = 11,
10981 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10982 {2427, 4}, {2432, 5}, {2437, 6},
10983 {2442, 7}, {2447, 8}, {2452, 9},
10984 {2457, 10}, {2462, 11}},
10985 .a_channels = 8,
10986 .a = {{5180, 36},
10987 {5200, 40},
10988 {5220, 44},
10989 {5240, 48},
10990 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10991 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10992 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10993 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10994 },
10995
10996 { /* Rest of World */
10997 "ZZD",
10998 .bg_channels = 13,
10999 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11000 {2427, 4}, {2432, 5}, {2437, 6},
11001 {2442, 7}, {2447, 8}, {2452, 9},
11002 {2457, 10}, {2462, 11}, {2467, 12},
11003 {2472, 13}},
11004 },
11005
11006 { /* Custom USA & Europe & High */
11007 "ZZA",
11008 .bg_channels = 11,
11009 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11010 {2427, 4}, {2432, 5}, {2437, 6},
11011 {2442, 7}, {2447, 8}, {2452, 9},
11012 {2457, 10}, {2462, 11}},
11013 .a_channels = 13,
11014 .a = {{5180, 36},
11015 {5200, 40},
11016 {5220, 44},
11017 {5240, 48},
11018 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11019 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11020 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11021 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11022 {5745, 149},
11023 {5765, 153},
11024 {5785, 157},
11025 {5805, 161},
11026 {5825, 165}},
11027 },
11028
11029 { /* Custom NA & Europe */
11030 "ZZB",
11031 .bg_channels = 11,
11032 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11033 {2427, 4}, {2432, 5}, {2437, 6},
11034 {2442, 7}, {2447, 8}, {2452, 9},
11035 {2457, 10}, {2462, 11}},
11036 .a_channels = 13,
11037 .a = {{5180, 36},
11038 {5200, 40},
11039 {5220, 44},
11040 {5240, 48},
11041 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11042 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11043 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11044 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11045 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11046 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11047 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11048 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11049 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11050 },
11051
11052 { /* Custom Japan */
11053 "ZZC",
11054 .bg_channels = 11,
11055 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11056 {2427, 4}, {2432, 5}, {2437, 6},
11057 {2442, 7}, {2447, 8}, {2452, 9},
11058 {2457, 10}, {2462, 11}},
11059 .a_channels = 4,
11060 .a = {{5170, 34}, {5190, 38},
11061 {5210, 42}, {5230, 46}},
11062 },
11063
11064 { /* Custom */
11065 "ZZM",
11066 .bg_channels = 11,
11067 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11068 {2427, 4}, {2432, 5}, {2437, 6},
11069 {2442, 7}, {2447, 8}, {2452, 9},
11070 {2457, 10}, {2462, 11}},
11071 },
11072
11073 { /* Europe */
11074 "ZZE",
11075 .bg_channels = 13,
11076 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11077 {2427, 4}, {2432, 5}, {2437, 6},
11078 {2442, 7}, {2447, 8}, {2452, 9},
11079 {2457, 10}, {2462, 11}, {2467, 12},
11080 {2472, 13}},
11081 .a_channels = 19,
11082 .a = {{5180, 36},
11083 {5200, 40},
11084 {5220, 44},
11085 {5240, 48},
11086 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11087 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11088 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11089 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11090 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11091 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11092 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11093 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11094 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11095 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11096 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11097 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11098 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11099 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11100 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11101 },
11102
11103 { /* Custom Japan */
11104 "ZZJ",
11105 .bg_channels = 14,
11106 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11107 {2427, 4}, {2432, 5}, {2437, 6},
11108 {2442, 7}, {2447, 8}, {2452, 9},
11109 {2457, 10}, {2462, 11}, {2467, 12},
11110 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11111 .a_channels = 4,
11112 .a = {{5170, 34}, {5190, 38},
11113 {5210, 42}, {5230, 46}},
11114 },
11115
11116 { /* Rest of World */
11117 "ZZR",
11118 .bg_channels = 14,
11119 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11120 {2427, 4}, {2432, 5}, {2437, 6},
11121 {2442, 7}, {2447, 8}, {2452, 9},
11122 {2457, 10}, {2462, 11}, {2467, 12},
11123 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11124 IEEE80211_CH_PASSIVE_ONLY}},
11125 },
11126
11127 { /* High Band */
11128 "ZZH",
11129 .bg_channels = 13,
11130 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11131 {2427, 4}, {2432, 5}, {2437, 6},
11132 {2442, 7}, {2447, 8}, {2452, 9},
11133 {2457, 10}, {2462, 11},
11134 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11135 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11136 .a_channels = 4,
11137 .a = {{5745, 149}, {5765, 153},
11138 {5785, 157}, {5805, 161}},
11139 },
11140
11141 { /* Custom Europe */
11142 "ZZG",
11143 .bg_channels = 13,
11144 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11145 {2427, 4}, {2432, 5}, {2437, 6},
11146 {2442, 7}, {2447, 8}, {2452, 9},
11147 {2457, 10}, {2462, 11},
11148 {2467, 12}, {2472, 13}},
11149 .a_channels = 4,
11150 .a = {{5180, 36}, {5200, 40},
11151 {5220, 44}, {5240, 48}},
11152 },
11153
11154 { /* Europe */
11155 "ZZK",
11156 .bg_channels = 13,
11157 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11158 {2427, 4}, {2432, 5}, {2437, 6},
11159 {2442, 7}, {2447, 8}, {2452, 9},
11160 {2457, 10}, {2462, 11},
11161 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11162 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11163 .a_channels = 24,
11164 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11165 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11166 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11167 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11168 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11169 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11170 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11171 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11172 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11173 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11174 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11175 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11176 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11177 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11178 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11179 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11180 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11181 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11182 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11183 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11184 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11185 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11186 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11187 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11188 },
11189
11190 { /* Europe */
11191 "ZZL",
11192 .bg_channels = 11,
11193 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11194 {2427, 4}, {2432, 5}, {2437, 6},
11195 {2442, 7}, {2447, 8}, {2452, 9},
11196 {2457, 10}, {2462, 11}},
11197 .a_channels = 13,
11198 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11199 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11200 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11201 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11202 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11203 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11204 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11205 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11206 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11207 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11208 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11209 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11210 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11211 }
11212 };
11213
11214 #define MAX_HW_RESTARTS 5
11215 static int ipw_up(struct ipw_priv *priv)
11216 {
11217 int rc, i, j;
11218
11219 if (priv->status & STATUS_EXIT_PENDING)
11220 return -EIO;
11221
11222 if (cmdlog && !priv->cmdlog) {
11223 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11224 GFP_KERNEL);
11225 if (priv->cmdlog == NULL) {
11226 IPW_ERROR("Error allocating %d command log entries.\n",
11227 cmdlog);
11228 return -ENOMEM;
11229 } else {
11230 priv->cmdlog_len = cmdlog;
11231 }
11232 }
11233
11234 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11235 /* Load the microcode, firmware, and eeprom.
11236 * Also start the clocks. */
11237 rc = ipw_load(priv);
11238 if (rc) {
11239 IPW_ERROR("Unable to load firmware: %d\n", rc);
11240 return rc;
11241 }
11242
11243 ipw_init_ordinals(priv);
11244 if (!(priv->config & CFG_CUSTOM_MAC))
11245 eeprom_parse_mac(priv, priv->mac_addr);
11246 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11247
11248 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11249 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11250 ipw_geos[j].name, 3))
11251 break;
11252 }
11253 if (j == ARRAY_SIZE(ipw_geos)) {
11254 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11255 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11256 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11257 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11258 j = 0;
11259 }
11260 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11261 IPW_WARNING("Could not set geography.");
11262 return 0;
11263 }
11264
11265 if (priv->status & STATUS_RF_KILL_SW) {
11266 IPW_WARNING("Radio disabled by module parameter.\n");
11267 return 0;
11268 } else if (rf_kill_active(priv)) {
11269 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11270 "Kill switch must be turned off for "
11271 "wireless networking to work.\n");
11272 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11273 2 * HZ);
11274 return 0;
11275 }
11276
11277 rc = ipw_config(priv);
11278 if (!rc) {
11279 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11280
11281 /* If configure to try and auto-associate, kick
11282 * off a scan. */
11283 queue_delayed_work(priv->workqueue,
11284 &priv->request_scan, 0);
11285
11286 return 0;
11287 }
11288
11289 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11290 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11291 i, MAX_HW_RESTARTS);
11292
11293 /* We had an error bringing up the hardware, so take it
11294 * all the way back down so we can try again */
11295 ipw_down(priv);
11296 }
11297
11298 /* tried to restart and config the device for as long as our
11299 * patience could withstand */
11300 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11301
11302 return -EIO;
11303 }
11304
11305 static void ipw_bg_up(struct work_struct *work)
11306 {
11307 struct ipw_priv *priv =
11308 container_of(work, struct ipw_priv, up);
11309 mutex_lock(&priv->mutex);
11310 ipw_up(priv);
11311 mutex_unlock(&priv->mutex);
11312 }
11313
11314 static void ipw_deinit(struct ipw_priv *priv)
11315 {
11316 int i;
11317
11318 if (priv->status & STATUS_SCANNING) {
11319 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11320 ipw_abort_scan(priv);
11321 }
11322
11323 if (priv->status & STATUS_ASSOCIATED) {
11324 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11325 ipw_disassociate(priv);
11326 }
11327
11328 ipw_led_shutdown(priv);
11329
11330 /* Wait up to 1s for status to change to not scanning and not
11331 * associated (disassociation can take a while for a ful 802.11
11332 * exchange */
11333 for (i = 1000; i && (priv->status &
11334 (STATUS_DISASSOCIATING |
11335 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11336 udelay(10);
11337
11338 if (priv->status & (STATUS_DISASSOCIATING |
11339 STATUS_ASSOCIATED | STATUS_SCANNING))
11340 IPW_DEBUG_INFO("Still associated or scanning...\n");
11341 else
11342 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11343
11344 /* Attempt to disable the card */
11345 ipw_send_card_disable(priv, 0);
11346
11347 priv->status &= ~STATUS_INIT;
11348 }
11349
11350 static void ipw_down(struct ipw_priv *priv)
11351 {
11352 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11353
11354 priv->status |= STATUS_EXIT_PENDING;
11355
11356 if (ipw_is_init(priv))
11357 ipw_deinit(priv);
11358
11359 /* Wipe out the EXIT_PENDING status bit if we are not actually
11360 * exiting the module */
11361 if (!exit_pending)
11362 priv->status &= ~STATUS_EXIT_PENDING;
11363
11364 /* tell the device to stop sending interrupts */
11365 ipw_disable_interrupts(priv);
11366
11367 /* Clear all bits but the RF Kill */
11368 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11369 netif_carrier_off(priv->net_dev);
11370 netif_stop_queue(priv->net_dev);
11371
11372 ipw_stop_nic(priv);
11373
11374 ipw_led_radio_off(priv);
11375 }
11376
11377 static void ipw_bg_down(struct work_struct *work)
11378 {
11379 struct ipw_priv *priv =
11380 container_of(work, struct ipw_priv, down);
11381 mutex_lock(&priv->mutex);
11382 ipw_down(priv);
11383 mutex_unlock(&priv->mutex);
11384 }
11385
11386 /* Called by register_netdev() */
11387 static int ipw_net_init(struct net_device *dev)
11388 {
11389 struct ipw_priv *priv = ieee80211_priv(dev);
11390 mutex_lock(&priv->mutex);
11391
11392 if (ipw_up(priv)) {
11393 mutex_unlock(&priv->mutex);
11394 return -EIO;
11395 }
11396
11397 mutex_unlock(&priv->mutex);
11398 return 0;
11399 }
11400
11401 /* PCI driver stuff */
11402 static struct pci_device_id card_ids[] = {
11403 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11404 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11405 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11406 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11407 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11408 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11409 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11410 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11411 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11412 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11413 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11414 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11415 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11416 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11417 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11418 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11419 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11420 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11421 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11422 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11423 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11424 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11425
11426 /* required last entry */
11427 {0,}
11428 };
11429
11430 MODULE_DEVICE_TABLE(pci, card_ids);
11431
11432 static struct attribute *ipw_sysfs_entries[] = {
11433 &dev_attr_rf_kill.attr,
11434 &dev_attr_direct_dword.attr,
11435 &dev_attr_indirect_byte.attr,
11436 &dev_attr_indirect_dword.attr,
11437 &dev_attr_mem_gpio_reg.attr,
11438 &dev_attr_command_event_reg.attr,
11439 &dev_attr_nic_type.attr,
11440 &dev_attr_status.attr,
11441 &dev_attr_cfg.attr,
11442 &dev_attr_error.attr,
11443 &dev_attr_event_log.attr,
11444 &dev_attr_cmd_log.attr,
11445 &dev_attr_eeprom_delay.attr,
11446 &dev_attr_ucode_version.attr,
11447 &dev_attr_rtc.attr,
11448 &dev_attr_scan_age.attr,
11449 &dev_attr_led.attr,
11450 &dev_attr_speed_scan.attr,
11451 &dev_attr_net_stats.attr,
11452 &dev_attr_channels.attr,
11453 #ifdef CONFIG_IPW2200_PROMISCUOUS
11454 &dev_attr_rtap_iface.attr,
11455 &dev_attr_rtap_filter.attr,
11456 #endif
11457 NULL
11458 };
11459
11460 static struct attribute_group ipw_attribute_group = {
11461 .name = NULL, /* put in device directory */
11462 .attrs = ipw_sysfs_entries,
11463 };
11464
11465 #ifdef CONFIG_IPW2200_PROMISCUOUS
11466 static int ipw_prom_open(struct net_device *dev)
11467 {
11468 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11469 struct ipw_priv *priv = prom_priv->priv;
11470
11471 IPW_DEBUG_INFO("prom dev->open\n");
11472 netif_carrier_off(dev);
11473 netif_stop_queue(dev);
11474
11475 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11476 priv->sys_config.accept_all_data_frames = 1;
11477 priv->sys_config.accept_non_directed_frames = 1;
11478 priv->sys_config.accept_all_mgmt_bcpr = 1;
11479 priv->sys_config.accept_all_mgmt_frames = 1;
11480
11481 ipw_send_system_config(priv);
11482 }
11483
11484 return 0;
11485 }
11486
11487 static int ipw_prom_stop(struct net_device *dev)
11488 {
11489 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11490 struct ipw_priv *priv = prom_priv->priv;
11491
11492 IPW_DEBUG_INFO("prom dev->stop\n");
11493
11494 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11495 priv->sys_config.accept_all_data_frames = 0;
11496 priv->sys_config.accept_non_directed_frames = 0;
11497 priv->sys_config.accept_all_mgmt_bcpr = 0;
11498 priv->sys_config.accept_all_mgmt_frames = 0;
11499
11500 ipw_send_system_config(priv);
11501 }
11502
11503 return 0;
11504 }
11505
11506 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11507 {
11508 IPW_DEBUG_INFO("prom dev->xmit\n");
11509 netif_stop_queue(dev);
11510 return -EOPNOTSUPP;
11511 }
11512
11513 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11514 {
11515 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11516 return &prom_priv->ieee->stats;
11517 }
11518
11519 static int ipw_prom_alloc(struct ipw_priv *priv)
11520 {
11521 int rc = 0;
11522
11523 if (priv->prom_net_dev)
11524 return -EPERM;
11525
11526 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11527 if (priv->prom_net_dev == NULL)
11528 return -ENOMEM;
11529
11530 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11531 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11532 priv->prom_priv->priv = priv;
11533
11534 strcpy(priv->prom_net_dev->name, "rtap%d");
11535
11536 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11537 priv->prom_net_dev->open = ipw_prom_open;
11538 priv->prom_net_dev->stop = ipw_prom_stop;
11539 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11540 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11541
11542 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11543
11544 rc = register_netdev(priv->prom_net_dev);
11545 if (rc) {
11546 free_ieee80211(priv->prom_net_dev);
11547 priv->prom_net_dev = NULL;
11548 return rc;
11549 }
11550
11551 return 0;
11552 }
11553
11554 static void ipw_prom_free(struct ipw_priv *priv)
11555 {
11556 if (!priv->prom_net_dev)
11557 return;
11558
11559 unregister_netdev(priv->prom_net_dev);
11560 free_ieee80211(priv->prom_net_dev);
11561
11562 priv->prom_net_dev = NULL;
11563 }
11564
11565 #endif
11566
11567
11568 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11569 {
11570 int err = 0;
11571 struct net_device *net_dev;
11572 void __iomem *base;
11573 u32 length, val;
11574 struct ipw_priv *priv;
11575 int i;
11576
11577 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11578 if (net_dev == NULL) {
11579 err = -ENOMEM;
11580 goto out;
11581 }
11582
11583 priv = ieee80211_priv(net_dev);
11584 priv->ieee = netdev_priv(net_dev);
11585
11586 priv->net_dev = net_dev;
11587 priv->pci_dev = pdev;
11588 ipw_debug_level = debug;
11589 spin_lock_init(&priv->irq_lock);
11590 spin_lock_init(&priv->lock);
11591 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11592 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11593
11594 mutex_init(&priv->mutex);
11595 if (pci_enable_device(pdev)) {
11596 err = -ENODEV;
11597 goto out_free_ieee80211;
11598 }
11599
11600 pci_set_master(pdev);
11601
11602 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11603 if (!err)
11604 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11605 if (err) {
11606 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11607 goto out_pci_disable_device;
11608 }
11609
11610 pci_set_drvdata(pdev, priv);
11611
11612 err = pci_request_regions(pdev, DRV_NAME);
11613 if (err)
11614 goto out_pci_disable_device;
11615
11616 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11617 * PCI Tx retries from interfering with C3 CPU state */
11618 pci_read_config_dword(pdev, 0x40, &val);
11619 if ((val & 0x0000ff00) != 0)
11620 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11621
11622 length = pci_resource_len(pdev, 0);
11623 priv->hw_len = length;
11624
11625 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11626 if (!base) {
11627 err = -ENODEV;
11628 goto out_pci_release_regions;
11629 }
11630
11631 priv->hw_base = base;
11632 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11633 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11634
11635 err = ipw_setup_deferred_work(priv);
11636 if (err) {
11637 IPW_ERROR("Unable to setup deferred work\n");
11638 goto out_iounmap;
11639 }
11640
11641 ipw_sw_reset(priv, 1);
11642
11643 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11644 if (err) {
11645 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11646 goto out_destroy_workqueue;
11647 }
11648
11649 SET_NETDEV_DEV(net_dev, &pdev->dev);
11650
11651 mutex_lock(&priv->mutex);
11652
11653 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11654 priv->ieee->set_security = shim__set_security;
11655 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11656
11657 #ifdef CONFIG_IPW2200_QOS
11658 priv->ieee->is_qos_active = ipw_is_qos_active;
11659 priv->ieee->handle_probe_response = ipw_handle_beacon;
11660 priv->ieee->handle_beacon = ipw_handle_probe_response;
11661 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11662 #endif /* CONFIG_IPW2200_QOS */
11663
11664 priv->ieee->perfect_rssi = -20;
11665 priv->ieee->worst_rssi = -85;
11666
11667 net_dev->open = ipw_net_open;
11668 net_dev->stop = ipw_net_stop;
11669 net_dev->init = ipw_net_init;
11670 net_dev->get_stats = ipw_net_get_stats;
11671 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11672 net_dev->set_mac_address = ipw_net_set_mac_address;
11673 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11674 net_dev->wireless_data = &priv->wireless_data;
11675 net_dev->wireless_handlers = &ipw_wx_handler_def;
11676 net_dev->ethtool_ops = &ipw_ethtool_ops;
11677 net_dev->irq = pdev->irq;
11678 net_dev->base_addr = (unsigned long)priv->hw_base;
11679 net_dev->mem_start = pci_resource_start(pdev, 0);
11680 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11681
11682 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11683 if (err) {
11684 IPW_ERROR("failed to create sysfs device attributes\n");
11685 mutex_unlock(&priv->mutex);
11686 goto out_release_irq;
11687 }
11688
11689 mutex_unlock(&priv->mutex);
11690 err = register_netdev(net_dev);
11691 if (err) {
11692 IPW_ERROR("failed to register network device\n");
11693 goto out_remove_sysfs;
11694 }
11695
11696 #ifdef CONFIG_IPW2200_PROMISCUOUS
11697 if (rtap_iface) {
11698 err = ipw_prom_alloc(priv);
11699 if (err) {
11700 IPW_ERROR("Failed to register promiscuous network "
11701 "device (error %d).\n", err);
11702 unregister_netdev(priv->net_dev);
11703 goto out_remove_sysfs;
11704 }
11705 }
11706 #endif
11707
11708 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11709 "channels, %d 802.11a channels)\n",
11710 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11711 priv->ieee->geo.a_channels);
11712
11713 return 0;
11714
11715 out_remove_sysfs:
11716 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11717 out_release_irq:
11718 free_irq(pdev->irq, priv);
11719 out_destroy_workqueue:
11720 destroy_workqueue(priv->workqueue);
11721 priv->workqueue = NULL;
11722 out_iounmap:
11723 iounmap(priv->hw_base);
11724 out_pci_release_regions:
11725 pci_release_regions(pdev);
11726 out_pci_disable_device:
11727 pci_disable_device(pdev);
11728 pci_set_drvdata(pdev, NULL);
11729 out_free_ieee80211:
11730 free_ieee80211(priv->net_dev);
11731 out:
11732 return err;
11733 }
11734
11735 static void ipw_pci_remove(struct pci_dev *pdev)
11736 {
11737 struct ipw_priv *priv = pci_get_drvdata(pdev);
11738 struct list_head *p, *q;
11739 int i;
11740
11741 if (!priv)
11742 return;
11743
11744 mutex_lock(&priv->mutex);
11745
11746 priv->status |= STATUS_EXIT_PENDING;
11747 ipw_down(priv);
11748 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11749
11750 mutex_unlock(&priv->mutex);
11751
11752 unregister_netdev(priv->net_dev);
11753
11754 if (priv->rxq) {
11755 ipw_rx_queue_free(priv, priv->rxq);
11756 priv->rxq = NULL;
11757 }
11758 ipw_tx_queue_free(priv);
11759
11760 if (priv->cmdlog) {
11761 kfree(priv->cmdlog);
11762 priv->cmdlog = NULL;
11763 }
11764 /* ipw_down will ensure that there is no more pending work
11765 * in the workqueue's, so we can safely remove them now. */
11766 cancel_delayed_work(&priv->adhoc_check);
11767 cancel_delayed_work(&priv->gather_stats);
11768 cancel_delayed_work(&priv->request_scan);
11769 cancel_delayed_work(&priv->rf_kill);
11770 cancel_delayed_work(&priv->scan_check);
11771 destroy_workqueue(priv->workqueue);
11772 priv->workqueue = NULL;
11773
11774 /* Free MAC hash list for ADHOC */
11775 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11776 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11777 list_del(p);
11778 kfree(list_entry(p, struct ipw_ibss_seq, list));
11779 }
11780 }
11781
11782 kfree(priv->error);
11783 priv->error = NULL;
11784
11785 #ifdef CONFIG_IPW2200_PROMISCUOUS
11786 ipw_prom_free(priv);
11787 #endif
11788
11789 free_irq(pdev->irq, priv);
11790 iounmap(priv->hw_base);
11791 pci_release_regions(pdev);
11792 pci_disable_device(pdev);
11793 pci_set_drvdata(pdev, NULL);
11794 free_ieee80211(priv->net_dev);
11795 free_firmware();
11796 }
11797
11798 #ifdef CONFIG_PM
11799 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11800 {
11801 struct ipw_priv *priv = pci_get_drvdata(pdev);
11802 struct net_device *dev = priv->net_dev;
11803
11804 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11805
11806 /* Take down the device; powers it off, etc. */
11807 ipw_down(priv);
11808
11809 /* Remove the PRESENT state of the device */
11810 netif_device_detach(dev);
11811
11812 pci_save_state(pdev);
11813 pci_disable_device(pdev);
11814 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11815
11816 return 0;
11817 }
11818
11819 static int ipw_pci_resume(struct pci_dev *pdev)
11820 {
11821 struct ipw_priv *priv = pci_get_drvdata(pdev);
11822 struct net_device *dev = priv->net_dev;
11823 int err;
11824 u32 val;
11825
11826 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11827
11828 pci_set_power_state(pdev, PCI_D0);
11829 err = pci_enable_device(pdev);
11830 if (err) {
11831 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11832 dev->name);
11833 return err;
11834 }
11835 pci_restore_state(pdev);
11836
11837 /*
11838 * Suspend/Resume resets the PCI configuration space, so we have to
11839 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11840 * from interfering with C3 CPU state. pci_restore_state won't help
11841 * here since it only restores the first 64 bytes pci config header.
11842 */
11843 pci_read_config_dword(pdev, 0x40, &val);
11844 if ((val & 0x0000ff00) != 0)
11845 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11846
11847 /* Set the device back into the PRESENT state; this will also wake
11848 * the queue of needed */
11849 netif_device_attach(dev);
11850
11851 /* Bring the device back up */
11852 queue_work(priv->workqueue, &priv->up);
11853
11854 return 0;
11855 }
11856 #endif
11857
11858 static void ipw_pci_shutdown(struct pci_dev *pdev)
11859 {
11860 struct ipw_priv *priv = pci_get_drvdata(pdev);
11861
11862 /* Take down the device; powers it off, etc. */
11863 ipw_down(priv);
11864
11865 pci_disable_device(pdev);
11866 }
11867
11868 /* driver initialization stuff */
11869 static struct pci_driver ipw_driver = {
11870 .name = DRV_NAME,
11871 .id_table = card_ids,
11872 .probe = ipw_pci_probe,
11873 .remove = __devexit_p(ipw_pci_remove),
11874 #ifdef CONFIG_PM
11875 .suspend = ipw_pci_suspend,
11876 .resume = ipw_pci_resume,
11877 #endif
11878 .shutdown = ipw_pci_shutdown,
11879 };
11880
11881 static int __init ipw_init(void)
11882 {
11883 int ret;
11884
11885 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11886 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11887
11888 ret = pci_register_driver(&ipw_driver);
11889 if (ret) {
11890 IPW_ERROR("Unable to initialize PCI module\n");
11891 return ret;
11892 }
11893
11894 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11895 if (ret) {
11896 IPW_ERROR("Unable to create driver sysfs file\n");
11897 pci_unregister_driver(&ipw_driver);
11898 return ret;
11899 }
11900
11901 return ret;
11902 }
11903
11904 static void __exit ipw_exit(void)
11905 {
11906 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11907 pci_unregister_driver(&ipw_driver);
11908 }
11909
11910 module_param(disable, int, 0444);
11911 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11912
11913 module_param(associate, int, 0444);
11914 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11915
11916 module_param(auto_create, int, 0444);
11917 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11918
11919 module_param(led, int, 0444);
11920 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11921
11922 module_param(debug, int, 0444);
11923 MODULE_PARM_DESC(debug, "debug output mask");
11924
11925 module_param(channel, int, 0444);
11926 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11927
11928 #ifdef CONFIG_IPW2200_PROMISCUOUS
11929 module_param(rtap_iface, int, 0444);
11930 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11931 #endif
11932
11933 #ifdef CONFIG_IPW2200_QOS
11934 module_param(qos_enable, int, 0444);
11935 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11936
11937 module_param(qos_burst_enable, int, 0444);
11938 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11939
11940 module_param(qos_no_ack_mask, int, 0444);
11941 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11942
11943 module_param(burst_duration_CCK, int, 0444);
11944 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11945
11946 module_param(burst_duration_OFDM, int, 0444);
11947 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11948 #endif /* CONFIG_IPW2200_QOS */
11949
11950 #ifdef CONFIG_IPW2200_MONITOR
11951 module_param(mode, int, 0444);
11952 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11953 #else
11954 module_param(mode, int, 0444);
11955 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11956 #endif
11957
11958 module_param(bt_coexist, int, 0444);
11959 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11960
11961 module_param(hwcrypto, int, 0444);
11962 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11963
11964 module_param(cmdlog, int, 0444);
11965 MODULE_PARM_DESC(cmdlog,
11966 "allocate a ring buffer for logging firmware commands");
11967
11968 module_param(roaming, int, 0444);
11969 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11970
11971 module_param(antenna, int, 0444);
11972 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11973
11974 module_exit(ipw_exit);
11975 module_init(ipw_init);
This page took 0.291191 seconds and 5 git commands to generate.