BUG_ON() Conversion in drivers/net/
[deliverable/linux.git] / drivers / net / wireless / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34 #include <linux/version.h>
35
36 #define IPW2200_VERSION "git-1.1.1"
37 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
38 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
39 #define DRV_VERSION IPW2200_VERSION
40
41 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
42
43 MODULE_DESCRIPTION(DRV_DESCRIPTION);
44 MODULE_VERSION(DRV_VERSION);
45 MODULE_AUTHOR(DRV_COPYRIGHT);
46 MODULE_LICENSE("GPL");
47
48 static int cmdlog = 0;
49 static int debug = 0;
50 static int channel = 0;
51 static int mode = 0;
52
53 static u32 ipw_debug_level;
54 static int associate = 1;
55 static int auto_create = 1;
56 static int led = 0;
57 static int disable = 0;
58 static int bt_coexist = 0;
59 static int hwcrypto = 0;
60 static int roaming = 1;
61 static const char ipw_modes[] = {
62 'a', 'b', 'g', '?'
63 };
64
65 #ifdef CONFIG_IPW_QOS
66 static int qos_enable = 0;
67 static int qos_burst_enable = 0;
68 static int qos_no_ack_mask = 0;
69 static int burst_duration_CCK = 0;
70 static int burst_duration_OFDM = 0;
71
72 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
73 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
74 QOS_TX3_CW_MIN_OFDM},
75 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
76 QOS_TX3_CW_MAX_OFDM},
77 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
78 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
79 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
80 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
81 };
82
83 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
84 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
85 QOS_TX3_CW_MIN_CCK},
86 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
87 QOS_TX3_CW_MAX_CCK},
88 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
89 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
90 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
91 QOS_TX3_TXOP_LIMIT_CCK}
92 };
93
94 static struct ieee80211_qos_parameters def_parameters_OFDM = {
95 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
96 DEF_TX3_CW_MIN_OFDM},
97 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
98 DEF_TX3_CW_MAX_OFDM},
99 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
100 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
101 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
102 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
103 };
104
105 static struct ieee80211_qos_parameters def_parameters_CCK = {
106 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
107 DEF_TX3_CW_MIN_CCK},
108 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
109 DEF_TX3_CW_MAX_CCK},
110 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
111 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
112 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
113 DEF_TX3_TXOP_LIMIT_CCK}
114 };
115
116 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
117
118 static int from_priority_to_tx_queue[] = {
119 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
120 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
121 };
122
123 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
124
125 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
126 *qos_param);
127 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
128 *qos_param);
129 #endif /* CONFIG_IPW_QOS */
130
131 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
132 static void ipw_remove_current_network(struct ipw_priv *priv);
133 static void ipw_rx(struct ipw_priv *priv);
134 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
135 struct clx2_tx_queue *txq, int qindex);
136 static int ipw_queue_reset(struct ipw_priv *priv);
137
138 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
139 int len, int sync);
140
141 static void ipw_tx_queue_free(struct ipw_priv *);
142
143 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
144 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
145 static void ipw_rx_queue_replenish(void *);
146 static int ipw_up(struct ipw_priv *);
147 static void ipw_bg_up(void *);
148 static void ipw_down(struct ipw_priv *);
149 static void ipw_bg_down(void *);
150 static int ipw_config(struct ipw_priv *);
151 static int init_supported_rates(struct ipw_priv *priv,
152 struct ipw_supported_rates *prates);
153 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
154 static void ipw_send_wep_keys(struct ipw_priv *, int);
155
156 static int snprint_line(char *buf, size_t count,
157 const u8 * data, u32 len, u32 ofs)
158 {
159 int out, i, j, l;
160 char c;
161
162 out = snprintf(buf, count, "%08X", ofs);
163
164 for (l = 0, i = 0; i < 2; i++) {
165 out += snprintf(buf + out, count - out, " ");
166 for (j = 0; j < 8 && l < len; j++, l++)
167 out += snprintf(buf + out, count - out, "%02X ",
168 data[(i * 8 + j)]);
169 for (; j < 8; j++)
170 out += snprintf(buf + out, count - out, " ");
171 }
172
173 out += snprintf(buf + out, count - out, " ");
174 for (l = 0, i = 0; i < 2; i++) {
175 out += snprintf(buf + out, count - out, " ");
176 for (j = 0; j < 8 && l < len; j++, l++) {
177 c = data[(i * 8 + j)];
178 if (!isascii(c) || !isprint(c))
179 c = '.';
180
181 out += snprintf(buf + out, count - out, "%c", c);
182 }
183
184 for (; j < 8; j++)
185 out += snprintf(buf + out, count - out, " ");
186 }
187
188 return out;
189 }
190
191 static void printk_buf(int level, const u8 * data, u32 len)
192 {
193 char line[81];
194 u32 ofs = 0;
195 if (!(ipw_debug_level & level))
196 return;
197
198 while (len) {
199 snprint_line(line, sizeof(line), &data[ofs],
200 min(len, 16U), ofs);
201 printk(KERN_DEBUG "%s\n", line);
202 ofs += 16;
203 len -= min(len, 16U);
204 }
205 }
206
207 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
208 {
209 size_t out = size;
210 u32 ofs = 0;
211 int total = 0;
212
213 while (size && len) {
214 out = snprint_line(output, size, &data[ofs],
215 min_t(size_t, len, 16U), ofs);
216
217 ofs += 16;
218 output += out;
219 size -= out;
220 len -= min_t(size_t, len, 16U);
221 total += out;
222 }
223 return total;
224 }
225
226 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
227 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
228 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
229
230 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
231 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
232 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
233
234 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
235 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
236 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
237 {
238 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
239 __LINE__, (u32) (b), (u32) (c));
240 _ipw_write_reg8(a, b, c);
241 }
242
243 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
244 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
245 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
246 {
247 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
248 __LINE__, (u32) (b), (u32) (c));
249 _ipw_write_reg16(a, b, c);
250 }
251
252 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
253 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
254 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
255 {
256 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
257 __LINE__, (u32) (b), (u32) (c));
258 _ipw_write_reg32(a, b, c);
259 }
260
261 /* 8-bit direct write (low 4K) */
262 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
263
264 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
265 #define ipw_write8(ipw, ofs, val) \
266 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
267 _ipw_write8(ipw, ofs, val)
268
269 /* 16-bit direct write (low 4K) */
270 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
271
272 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
273 #define ipw_write16(ipw, ofs, val) \
274 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
275 _ipw_write16(ipw, ofs, val)
276
277 /* 32-bit direct write (low 4K) */
278 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
279
280 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
281 #define ipw_write32(ipw, ofs, val) \
282 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
283 _ipw_write32(ipw, ofs, val)
284
285 /* 8-bit direct read (low 4K) */
286 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
287
288 /* 8-bit direct read (low 4K), with debug wrapper */
289 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
290 {
291 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
292 return _ipw_read8(ipw, ofs);
293 }
294
295 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
296 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
297
298 /* 16-bit direct read (low 4K) */
299 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
300
301 /* 16-bit direct read (low 4K), with debug wrapper */
302 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
303 {
304 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
305 return _ipw_read16(ipw, ofs);
306 }
307
308 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
309 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
310
311 /* 32-bit direct read (low 4K) */
312 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
313
314 /* 32-bit direct read (low 4K), with debug wrapper */
315 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
316 {
317 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
318 return _ipw_read32(ipw, ofs);
319 }
320
321 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
322 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
323
324 /* multi-byte read (above 4K), with debug wrapper */
325 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
326 static inline void __ipw_read_indirect(const char *f, int l,
327 struct ipw_priv *a, u32 b, u8 * c, int d)
328 {
329 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
330 d);
331 _ipw_read_indirect(a, b, c, d);
332 }
333
334 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
335 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
336
337 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
338 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
339 int num);
340 #define ipw_write_indirect(a, b, c, d) \
341 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
342 _ipw_write_indirect(a, b, c, d)
343
344 /* 32-bit indirect write (above 4K) */
345 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
346 {
347 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
348 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
349 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
350 }
351
352 /* 8-bit indirect write (above 4K) */
353 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
354 {
355 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
356 u32 dif_len = reg - aligned_addr;
357
358 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
359 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
360 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
361 }
362
363 /* 16-bit indirect write (above 4K) */
364 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
365 {
366 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
367 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
368
369 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
370 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
371 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
372 }
373
374 /* 8-bit indirect read (above 4K) */
375 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
376 {
377 u32 word;
378 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
379 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
380 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
381 return (word >> ((reg & 0x3) * 8)) & 0xff;
382 }
383
384 /* 32-bit indirect read (above 4K) */
385 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
386 {
387 u32 value;
388
389 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
390
391 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
393 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
394 return value;
395 }
396
397 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
398 /* for area above 1st 4K of SRAM/reg space */
399 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
400 int num)
401 {
402 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
403 u32 dif_len = addr - aligned_addr;
404 u32 i;
405
406 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
407
408 if (num <= 0) {
409 return;
410 }
411
412 /* Read the first dword (or portion) byte by byte */
413 if (unlikely(dif_len)) {
414 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
415 /* Start reading at aligned_addr + dif_len */
416 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
417 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
418 aligned_addr += 4;
419 }
420
421 /* Read all of the middle dwords as dwords, with auto-increment */
422 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
423 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
424 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
425
426 /* Read the last dword (or portion) byte by byte */
427 if (unlikely(num)) {
428 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
429 for (i = 0; num > 0; i++, num--)
430 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
431 }
432 }
433
434 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
435 /* for area above 1st 4K of SRAM/reg space */
436 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
437 int num)
438 {
439 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
440 u32 dif_len = addr - aligned_addr;
441 u32 i;
442
443 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
444
445 if (num <= 0) {
446 return;
447 }
448
449 /* Write the first dword (or portion) byte by byte */
450 if (unlikely(dif_len)) {
451 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
452 /* Start writing at aligned_addr + dif_len */
453 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
454 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
455 aligned_addr += 4;
456 }
457
458 /* Write all of the middle dwords as dwords, with auto-increment */
459 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
460 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
461 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
462
463 /* Write the last dword (or portion) byte by byte */
464 if (unlikely(num)) {
465 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
466 for (i = 0; num > 0; i++, num--, buf++)
467 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
468 }
469 }
470
471 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
472 /* for 1st 4K of SRAM/regs space */
473 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
474 int num)
475 {
476 memcpy_toio((priv->hw_base + addr), buf, num);
477 }
478
479 /* Set bit(s) in low 4K of SRAM/regs */
480 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
481 {
482 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
483 }
484
485 /* Clear bit(s) in low 4K of SRAM/regs */
486 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
487 {
488 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
489 }
490
491 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
492 {
493 if (priv->status & STATUS_INT_ENABLED)
494 return;
495 priv->status |= STATUS_INT_ENABLED;
496 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
497 }
498
499 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
500 {
501 if (!(priv->status & STATUS_INT_ENABLED))
502 return;
503 priv->status &= ~STATUS_INT_ENABLED;
504 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
505 }
506
507 #ifdef CONFIG_IPW2200_DEBUG
508 static char *ipw_error_desc(u32 val)
509 {
510 switch (val) {
511 case IPW_FW_ERROR_OK:
512 return "ERROR_OK";
513 case IPW_FW_ERROR_FAIL:
514 return "ERROR_FAIL";
515 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
516 return "MEMORY_UNDERFLOW";
517 case IPW_FW_ERROR_MEMORY_OVERFLOW:
518 return "MEMORY_OVERFLOW";
519 case IPW_FW_ERROR_BAD_PARAM:
520 return "BAD_PARAM";
521 case IPW_FW_ERROR_BAD_CHECKSUM:
522 return "BAD_CHECKSUM";
523 case IPW_FW_ERROR_NMI_INTERRUPT:
524 return "NMI_INTERRUPT";
525 case IPW_FW_ERROR_BAD_DATABASE:
526 return "BAD_DATABASE";
527 case IPW_FW_ERROR_ALLOC_FAIL:
528 return "ALLOC_FAIL";
529 case IPW_FW_ERROR_DMA_UNDERRUN:
530 return "DMA_UNDERRUN";
531 case IPW_FW_ERROR_DMA_STATUS:
532 return "DMA_STATUS";
533 case IPW_FW_ERROR_DINO_ERROR:
534 return "DINO_ERROR";
535 case IPW_FW_ERROR_EEPROM_ERROR:
536 return "EEPROM_ERROR";
537 case IPW_FW_ERROR_SYSASSERT:
538 return "SYSASSERT";
539 case IPW_FW_ERROR_FATAL_ERROR:
540 return "FATAL_ERROR";
541 default:
542 return "UNKNOWN_ERROR";
543 }
544 }
545
546 static void ipw_dump_error_log(struct ipw_priv *priv,
547 struct ipw_fw_error *error)
548 {
549 u32 i;
550
551 if (!error) {
552 IPW_ERROR("Error allocating and capturing error log. "
553 "Nothing to dump.\n");
554 return;
555 }
556
557 IPW_ERROR("Start IPW Error Log Dump:\n");
558 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
559 error->status, error->config);
560
561 for (i = 0; i < error->elem_len; i++)
562 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
563 ipw_error_desc(error->elem[i].desc),
564 error->elem[i].time,
565 error->elem[i].blink1,
566 error->elem[i].blink2,
567 error->elem[i].link1,
568 error->elem[i].link2, error->elem[i].data);
569 for (i = 0; i < error->log_len; i++)
570 IPW_ERROR("%i\t0x%08x\t%i\n",
571 error->log[i].time,
572 error->log[i].data, error->log[i].event);
573 }
574 #endif
575
576 static inline int ipw_is_init(struct ipw_priv *priv)
577 {
578 return (priv->status & STATUS_INIT) ? 1 : 0;
579 }
580
581 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
582 {
583 u32 addr, field_info, field_len, field_count, total_len;
584
585 IPW_DEBUG_ORD("ordinal = %i\n", ord);
586
587 if (!priv || !val || !len) {
588 IPW_DEBUG_ORD("Invalid argument\n");
589 return -EINVAL;
590 }
591
592 /* verify device ordinal tables have been initialized */
593 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
594 IPW_DEBUG_ORD("Access ordinals before initialization\n");
595 return -EINVAL;
596 }
597
598 switch (IPW_ORD_TABLE_ID_MASK & ord) {
599 case IPW_ORD_TABLE_0_MASK:
600 /*
601 * TABLE 0: Direct access to a table of 32 bit values
602 *
603 * This is a very simple table with the data directly
604 * read from the table
605 */
606
607 /* remove the table id from the ordinal */
608 ord &= IPW_ORD_TABLE_VALUE_MASK;
609
610 /* boundary check */
611 if (ord > priv->table0_len) {
612 IPW_DEBUG_ORD("ordinal value (%i) longer then "
613 "max (%i)\n", ord, priv->table0_len);
614 return -EINVAL;
615 }
616
617 /* verify we have enough room to store the value */
618 if (*len < sizeof(u32)) {
619 IPW_DEBUG_ORD("ordinal buffer length too small, "
620 "need %zd\n", sizeof(u32));
621 return -EINVAL;
622 }
623
624 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
625 ord, priv->table0_addr + (ord << 2));
626
627 *len = sizeof(u32);
628 ord <<= 2;
629 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
630 break;
631
632 case IPW_ORD_TABLE_1_MASK:
633 /*
634 * TABLE 1: Indirect access to a table of 32 bit values
635 *
636 * This is a fairly large table of u32 values each
637 * representing starting addr for the data (which is
638 * also a u32)
639 */
640
641 /* remove the table id from the ordinal */
642 ord &= IPW_ORD_TABLE_VALUE_MASK;
643
644 /* boundary check */
645 if (ord > priv->table1_len) {
646 IPW_DEBUG_ORD("ordinal value too long\n");
647 return -EINVAL;
648 }
649
650 /* verify we have enough room to store the value */
651 if (*len < sizeof(u32)) {
652 IPW_DEBUG_ORD("ordinal buffer length too small, "
653 "need %zd\n", sizeof(u32));
654 return -EINVAL;
655 }
656
657 *((u32 *) val) =
658 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
659 *len = sizeof(u32);
660 break;
661
662 case IPW_ORD_TABLE_2_MASK:
663 /*
664 * TABLE 2: Indirect access to a table of variable sized values
665 *
666 * This table consist of six values, each containing
667 * - dword containing the starting offset of the data
668 * - dword containing the lengh in the first 16bits
669 * and the count in the second 16bits
670 */
671
672 /* remove the table id from the ordinal */
673 ord &= IPW_ORD_TABLE_VALUE_MASK;
674
675 /* boundary check */
676 if (ord > priv->table2_len) {
677 IPW_DEBUG_ORD("ordinal value too long\n");
678 return -EINVAL;
679 }
680
681 /* get the address of statistic */
682 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
683
684 /* get the second DW of statistics ;
685 * two 16-bit words - first is length, second is count */
686 field_info =
687 ipw_read_reg32(priv,
688 priv->table2_addr + (ord << 3) +
689 sizeof(u32));
690
691 /* get each entry length */
692 field_len = *((u16 *) & field_info);
693
694 /* get number of entries */
695 field_count = *(((u16 *) & field_info) + 1);
696
697 /* abort if not enought memory */
698 total_len = field_len * field_count;
699 if (total_len > *len) {
700 *len = total_len;
701 return -EINVAL;
702 }
703
704 *len = total_len;
705 if (!total_len)
706 return 0;
707
708 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
709 "field_info = 0x%08x\n",
710 addr, total_len, field_info);
711 ipw_read_indirect(priv, addr, val, total_len);
712 break;
713
714 default:
715 IPW_DEBUG_ORD("Invalid ordinal!\n");
716 return -EINVAL;
717
718 }
719
720 return 0;
721 }
722
723 static void ipw_init_ordinals(struct ipw_priv *priv)
724 {
725 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
726 priv->table0_len = ipw_read32(priv, priv->table0_addr);
727
728 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
729 priv->table0_addr, priv->table0_len);
730
731 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
732 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
733
734 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
735 priv->table1_addr, priv->table1_len);
736
737 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
738 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
739 priv->table2_len &= 0x0000ffff; /* use first two bytes */
740
741 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
742 priv->table2_addr, priv->table2_len);
743
744 }
745
746 static u32 ipw_register_toggle(u32 reg)
747 {
748 reg &= ~IPW_START_STANDBY;
749 if (reg & IPW_GATE_ODMA)
750 reg &= ~IPW_GATE_ODMA;
751 if (reg & IPW_GATE_IDMA)
752 reg &= ~IPW_GATE_IDMA;
753 if (reg & IPW_GATE_ADMA)
754 reg &= ~IPW_GATE_ADMA;
755 return reg;
756 }
757
758 /*
759 * LED behavior:
760 * - On radio ON, turn on any LEDs that require to be on during start
761 * - On initialization, start unassociated blink
762 * - On association, disable unassociated blink
763 * - On disassociation, start unassociated blink
764 * - On radio OFF, turn off any LEDs started during radio on
765 *
766 */
767 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
768 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
769 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
770
771 static void ipw_led_link_on(struct ipw_priv *priv)
772 {
773 unsigned long flags;
774 u32 led;
775
776 /* If configured to not use LEDs, or nic_type is 1,
777 * then we don't toggle a LINK led */
778 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
779 return;
780
781 spin_lock_irqsave(&priv->lock, flags);
782
783 if (!(priv->status & STATUS_RF_KILL_MASK) &&
784 !(priv->status & STATUS_LED_LINK_ON)) {
785 IPW_DEBUG_LED("Link LED On\n");
786 led = ipw_read_reg32(priv, IPW_EVENT_REG);
787 led |= priv->led_association_on;
788
789 led = ipw_register_toggle(led);
790
791 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
792 ipw_write_reg32(priv, IPW_EVENT_REG, led);
793
794 priv->status |= STATUS_LED_LINK_ON;
795
796 /* If we aren't associated, schedule turning the LED off */
797 if (!(priv->status & STATUS_ASSOCIATED))
798 queue_delayed_work(priv->workqueue,
799 &priv->led_link_off,
800 LD_TIME_LINK_ON);
801 }
802
803 spin_unlock_irqrestore(&priv->lock, flags);
804 }
805
806 static void ipw_bg_led_link_on(void *data)
807 {
808 struct ipw_priv *priv = data;
809 mutex_lock(&priv->mutex);
810 ipw_led_link_on(data);
811 mutex_unlock(&priv->mutex);
812 }
813
814 static void ipw_led_link_off(struct ipw_priv *priv)
815 {
816 unsigned long flags;
817 u32 led;
818
819 /* If configured not to use LEDs, or nic type is 1,
820 * then we don't goggle the LINK led. */
821 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
822 return;
823
824 spin_lock_irqsave(&priv->lock, flags);
825
826 if (priv->status & STATUS_LED_LINK_ON) {
827 led = ipw_read_reg32(priv, IPW_EVENT_REG);
828 led &= priv->led_association_off;
829 led = ipw_register_toggle(led);
830
831 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
832 ipw_write_reg32(priv, IPW_EVENT_REG, led);
833
834 IPW_DEBUG_LED("Link LED Off\n");
835
836 priv->status &= ~STATUS_LED_LINK_ON;
837
838 /* If we aren't associated and the radio is on, schedule
839 * turning the LED on (blink while unassociated) */
840 if (!(priv->status & STATUS_RF_KILL_MASK) &&
841 !(priv->status & STATUS_ASSOCIATED))
842 queue_delayed_work(priv->workqueue, &priv->led_link_on,
843 LD_TIME_LINK_OFF);
844
845 }
846
847 spin_unlock_irqrestore(&priv->lock, flags);
848 }
849
850 static void ipw_bg_led_link_off(void *data)
851 {
852 struct ipw_priv *priv = data;
853 mutex_lock(&priv->mutex);
854 ipw_led_link_off(data);
855 mutex_unlock(&priv->mutex);
856 }
857
858 static void __ipw_led_activity_on(struct ipw_priv *priv)
859 {
860 u32 led;
861
862 if (priv->config & CFG_NO_LED)
863 return;
864
865 if (priv->status & STATUS_RF_KILL_MASK)
866 return;
867
868 if (!(priv->status & STATUS_LED_ACT_ON)) {
869 led = ipw_read_reg32(priv, IPW_EVENT_REG);
870 led |= priv->led_activity_on;
871
872 led = ipw_register_toggle(led);
873
874 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
875 ipw_write_reg32(priv, IPW_EVENT_REG, led);
876
877 IPW_DEBUG_LED("Activity LED On\n");
878
879 priv->status |= STATUS_LED_ACT_ON;
880
881 cancel_delayed_work(&priv->led_act_off);
882 queue_delayed_work(priv->workqueue, &priv->led_act_off,
883 LD_TIME_ACT_ON);
884 } else {
885 /* Reschedule LED off for full time period */
886 cancel_delayed_work(&priv->led_act_off);
887 queue_delayed_work(priv->workqueue, &priv->led_act_off,
888 LD_TIME_ACT_ON);
889 }
890 }
891
892 #if 0
893 void ipw_led_activity_on(struct ipw_priv *priv)
894 {
895 unsigned long flags;
896 spin_lock_irqsave(&priv->lock, flags);
897 __ipw_led_activity_on(priv);
898 spin_unlock_irqrestore(&priv->lock, flags);
899 }
900 #endif /* 0 */
901
902 static void ipw_led_activity_off(struct ipw_priv *priv)
903 {
904 unsigned long flags;
905 u32 led;
906
907 if (priv->config & CFG_NO_LED)
908 return;
909
910 spin_lock_irqsave(&priv->lock, flags);
911
912 if (priv->status & STATUS_LED_ACT_ON) {
913 led = ipw_read_reg32(priv, IPW_EVENT_REG);
914 led &= priv->led_activity_off;
915
916 led = ipw_register_toggle(led);
917
918 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
919 ipw_write_reg32(priv, IPW_EVENT_REG, led);
920
921 IPW_DEBUG_LED("Activity LED Off\n");
922
923 priv->status &= ~STATUS_LED_ACT_ON;
924 }
925
926 spin_unlock_irqrestore(&priv->lock, flags);
927 }
928
929 static void ipw_bg_led_activity_off(void *data)
930 {
931 struct ipw_priv *priv = data;
932 mutex_lock(&priv->mutex);
933 ipw_led_activity_off(data);
934 mutex_unlock(&priv->mutex);
935 }
936
937 static void ipw_led_band_on(struct ipw_priv *priv)
938 {
939 unsigned long flags;
940 u32 led;
941
942 /* Only nic type 1 supports mode LEDs */
943 if (priv->config & CFG_NO_LED ||
944 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
945 return;
946
947 spin_lock_irqsave(&priv->lock, flags);
948
949 led = ipw_read_reg32(priv, IPW_EVENT_REG);
950 if (priv->assoc_network->mode == IEEE_A) {
951 led |= priv->led_ofdm_on;
952 led &= priv->led_association_off;
953 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
954 } else if (priv->assoc_network->mode == IEEE_G) {
955 led |= priv->led_ofdm_on;
956 led |= priv->led_association_on;
957 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
958 } else {
959 led &= priv->led_ofdm_off;
960 led |= priv->led_association_on;
961 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
962 }
963
964 led = ipw_register_toggle(led);
965
966 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
967 ipw_write_reg32(priv, IPW_EVENT_REG, led);
968
969 spin_unlock_irqrestore(&priv->lock, flags);
970 }
971
972 static void ipw_led_band_off(struct ipw_priv *priv)
973 {
974 unsigned long flags;
975 u32 led;
976
977 /* Only nic type 1 supports mode LEDs */
978 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
979 return;
980
981 spin_lock_irqsave(&priv->lock, flags);
982
983 led = ipw_read_reg32(priv, IPW_EVENT_REG);
984 led &= priv->led_ofdm_off;
985 led &= priv->led_association_off;
986
987 led = ipw_register_toggle(led);
988
989 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
990 ipw_write_reg32(priv, IPW_EVENT_REG, led);
991
992 spin_unlock_irqrestore(&priv->lock, flags);
993 }
994
995 static void ipw_led_radio_on(struct ipw_priv *priv)
996 {
997 ipw_led_link_on(priv);
998 }
999
1000 static void ipw_led_radio_off(struct ipw_priv *priv)
1001 {
1002 ipw_led_activity_off(priv);
1003 ipw_led_link_off(priv);
1004 }
1005
1006 static void ipw_led_link_up(struct ipw_priv *priv)
1007 {
1008 /* Set the Link Led on for all nic types */
1009 ipw_led_link_on(priv);
1010 }
1011
1012 static void ipw_led_link_down(struct ipw_priv *priv)
1013 {
1014 ipw_led_activity_off(priv);
1015 ipw_led_link_off(priv);
1016
1017 if (priv->status & STATUS_RF_KILL_MASK)
1018 ipw_led_radio_off(priv);
1019 }
1020
1021 static void ipw_led_init(struct ipw_priv *priv)
1022 {
1023 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1024
1025 /* Set the default PINs for the link and activity leds */
1026 priv->led_activity_on = IPW_ACTIVITY_LED;
1027 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1028
1029 priv->led_association_on = IPW_ASSOCIATED_LED;
1030 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1031
1032 /* Set the default PINs for the OFDM leds */
1033 priv->led_ofdm_on = IPW_OFDM_LED;
1034 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1035
1036 switch (priv->nic_type) {
1037 case EEPROM_NIC_TYPE_1:
1038 /* In this NIC type, the LEDs are reversed.... */
1039 priv->led_activity_on = IPW_ASSOCIATED_LED;
1040 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1041 priv->led_association_on = IPW_ACTIVITY_LED;
1042 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1043
1044 if (!(priv->config & CFG_NO_LED))
1045 ipw_led_band_on(priv);
1046
1047 /* And we don't blink link LEDs for this nic, so
1048 * just return here */
1049 return;
1050
1051 case EEPROM_NIC_TYPE_3:
1052 case EEPROM_NIC_TYPE_2:
1053 case EEPROM_NIC_TYPE_4:
1054 case EEPROM_NIC_TYPE_0:
1055 break;
1056
1057 default:
1058 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1059 priv->nic_type);
1060 priv->nic_type = EEPROM_NIC_TYPE_0;
1061 break;
1062 }
1063
1064 if (!(priv->config & CFG_NO_LED)) {
1065 if (priv->status & STATUS_ASSOCIATED)
1066 ipw_led_link_on(priv);
1067 else
1068 ipw_led_link_off(priv);
1069 }
1070 }
1071
1072 static void ipw_led_shutdown(struct ipw_priv *priv)
1073 {
1074 ipw_led_activity_off(priv);
1075 ipw_led_link_off(priv);
1076 ipw_led_band_off(priv);
1077 cancel_delayed_work(&priv->led_link_on);
1078 cancel_delayed_work(&priv->led_link_off);
1079 cancel_delayed_work(&priv->led_act_off);
1080 }
1081
1082 /*
1083 * The following adds a new attribute to the sysfs representation
1084 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1085 * used for controling the debug level.
1086 *
1087 * See the level definitions in ipw for details.
1088 */
1089 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1090 {
1091 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1092 }
1093
1094 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1095 size_t count)
1096 {
1097 char *p = (char *)buf;
1098 u32 val;
1099
1100 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1101 p++;
1102 if (p[0] == 'x' || p[0] == 'X')
1103 p++;
1104 val = simple_strtoul(p, &p, 16);
1105 } else
1106 val = simple_strtoul(p, &p, 10);
1107 if (p == buf)
1108 printk(KERN_INFO DRV_NAME
1109 ": %s is not in hex or decimal form.\n", buf);
1110 else
1111 ipw_debug_level = val;
1112
1113 return strnlen(buf, count);
1114 }
1115
1116 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1117 show_debug_level, store_debug_level);
1118
1119 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1120 {
1121 /* length = 1st dword in log */
1122 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1123 }
1124
1125 static void ipw_capture_event_log(struct ipw_priv *priv,
1126 u32 log_len, struct ipw_event *log)
1127 {
1128 u32 base;
1129
1130 if (log_len) {
1131 base = ipw_read32(priv, IPW_EVENT_LOG);
1132 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1133 (u8 *) log, sizeof(*log) * log_len);
1134 }
1135 }
1136
1137 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1138 {
1139 struct ipw_fw_error *error;
1140 u32 log_len = ipw_get_event_log_len(priv);
1141 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1142 u32 elem_len = ipw_read_reg32(priv, base);
1143
1144 error = kmalloc(sizeof(*error) +
1145 sizeof(*error->elem) * elem_len +
1146 sizeof(*error->log) * log_len, GFP_ATOMIC);
1147 if (!error) {
1148 IPW_ERROR("Memory allocation for firmware error log "
1149 "failed.\n");
1150 return NULL;
1151 }
1152 error->jiffies = jiffies;
1153 error->status = priv->status;
1154 error->config = priv->config;
1155 error->elem_len = elem_len;
1156 error->log_len = log_len;
1157 error->elem = (struct ipw_error_elem *)error->payload;
1158 error->log = (struct ipw_event *)(error->elem + elem_len);
1159
1160 ipw_capture_event_log(priv, log_len, error->log);
1161
1162 if (elem_len)
1163 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1164 sizeof(*error->elem) * elem_len);
1165
1166 return error;
1167 }
1168
1169 static void ipw_free_error_log(struct ipw_fw_error *error)
1170 {
1171 if (error)
1172 kfree(error);
1173 }
1174
1175 static ssize_t show_event_log(struct device *d,
1176 struct device_attribute *attr, char *buf)
1177 {
1178 struct ipw_priv *priv = dev_get_drvdata(d);
1179 u32 log_len = ipw_get_event_log_len(priv);
1180 struct ipw_event log[log_len];
1181 u32 len = 0, i;
1182
1183 ipw_capture_event_log(priv, log_len, log);
1184
1185 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1186 for (i = 0; i < log_len; i++)
1187 len += snprintf(buf + len, PAGE_SIZE - len,
1188 "\n%08X%08X%08X",
1189 log[i].time, log[i].event, log[i].data);
1190 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1191 return len;
1192 }
1193
1194 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1195
1196 static ssize_t show_error(struct device *d,
1197 struct device_attribute *attr, char *buf)
1198 {
1199 struct ipw_priv *priv = dev_get_drvdata(d);
1200 u32 len = 0, i;
1201 if (!priv->error)
1202 return 0;
1203 len += snprintf(buf + len, PAGE_SIZE - len,
1204 "%08lX%08X%08X%08X",
1205 priv->error->jiffies,
1206 priv->error->status,
1207 priv->error->config, priv->error->elem_len);
1208 for (i = 0; i < priv->error->elem_len; i++)
1209 len += snprintf(buf + len, PAGE_SIZE - len,
1210 "\n%08X%08X%08X%08X%08X%08X%08X",
1211 priv->error->elem[i].time,
1212 priv->error->elem[i].desc,
1213 priv->error->elem[i].blink1,
1214 priv->error->elem[i].blink2,
1215 priv->error->elem[i].link1,
1216 priv->error->elem[i].link2,
1217 priv->error->elem[i].data);
1218
1219 len += snprintf(buf + len, PAGE_SIZE - len,
1220 "\n%08X", priv->error->log_len);
1221 for (i = 0; i < priv->error->log_len; i++)
1222 len += snprintf(buf + len, PAGE_SIZE - len,
1223 "\n%08X%08X%08X",
1224 priv->error->log[i].time,
1225 priv->error->log[i].event,
1226 priv->error->log[i].data);
1227 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1228 return len;
1229 }
1230
1231 static ssize_t clear_error(struct device *d,
1232 struct device_attribute *attr,
1233 const char *buf, size_t count)
1234 {
1235 struct ipw_priv *priv = dev_get_drvdata(d);
1236 if (priv->error) {
1237 ipw_free_error_log(priv->error);
1238 priv->error = NULL;
1239 }
1240 return count;
1241 }
1242
1243 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1244
1245 static ssize_t show_cmd_log(struct device *d,
1246 struct device_attribute *attr, char *buf)
1247 {
1248 struct ipw_priv *priv = dev_get_drvdata(d);
1249 u32 len = 0, i;
1250 if (!priv->cmdlog)
1251 return 0;
1252 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1253 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1254 i = (i + 1) % priv->cmdlog_len) {
1255 len +=
1256 snprintf(buf + len, PAGE_SIZE - len,
1257 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1258 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1259 priv->cmdlog[i].cmd.len);
1260 len +=
1261 snprintk_buf(buf + len, PAGE_SIZE - len,
1262 (u8 *) priv->cmdlog[i].cmd.param,
1263 priv->cmdlog[i].cmd.len);
1264 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1265 }
1266 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1267 return len;
1268 }
1269
1270 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1271
1272 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1273 char *buf)
1274 {
1275 struct ipw_priv *priv = dev_get_drvdata(d);
1276 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1277 }
1278
1279 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1280 const char *buf, size_t count)
1281 {
1282 struct ipw_priv *priv = dev_get_drvdata(d);
1283 #ifdef CONFIG_IPW2200_DEBUG
1284 struct net_device *dev = priv->net_dev;
1285 #endif
1286 char buffer[] = "00000000";
1287 unsigned long len =
1288 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1289 unsigned long val;
1290 char *p = buffer;
1291
1292 IPW_DEBUG_INFO("enter\n");
1293
1294 strncpy(buffer, buf, len);
1295 buffer[len] = 0;
1296
1297 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1298 p++;
1299 if (p[0] == 'x' || p[0] == 'X')
1300 p++;
1301 val = simple_strtoul(p, &p, 16);
1302 } else
1303 val = simple_strtoul(p, &p, 10);
1304 if (p == buffer) {
1305 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1306 } else {
1307 priv->ieee->scan_age = val;
1308 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1309 }
1310
1311 IPW_DEBUG_INFO("exit\n");
1312 return len;
1313 }
1314
1315 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1316
1317 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1318 char *buf)
1319 {
1320 struct ipw_priv *priv = dev_get_drvdata(d);
1321 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1322 }
1323
1324 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1325 const char *buf, size_t count)
1326 {
1327 struct ipw_priv *priv = dev_get_drvdata(d);
1328
1329 IPW_DEBUG_INFO("enter\n");
1330
1331 if (count == 0)
1332 return 0;
1333
1334 if (*buf == 0) {
1335 IPW_DEBUG_LED("Disabling LED control.\n");
1336 priv->config |= CFG_NO_LED;
1337 ipw_led_shutdown(priv);
1338 } else {
1339 IPW_DEBUG_LED("Enabling LED control.\n");
1340 priv->config &= ~CFG_NO_LED;
1341 ipw_led_init(priv);
1342 }
1343
1344 IPW_DEBUG_INFO("exit\n");
1345 return count;
1346 }
1347
1348 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1349
1350 static ssize_t show_status(struct device *d,
1351 struct device_attribute *attr, char *buf)
1352 {
1353 struct ipw_priv *p = d->driver_data;
1354 return sprintf(buf, "0x%08x\n", (int)p->status);
1355 }
1356
1357 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1358
1359 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1360 char *buf)
1361 {
1362 struct ipw_priv *p = d->driver_data;
1363 return sprintf(buf, "0x%08x\n", (int)p->config);
1364 }
1365
1366 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1367
1368 static ssize_t show_nic_type(struct device *d,
1369 struct device_attribute *attr, char *buf)
1370 {
1371 struct ipw_priv *priv = d->driver_data;
1372 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1373 }
1374
1375 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1376
1377 static ssize_t show_ucode_version(struct device *d,
1378 struct device_attribute *attr, char *buf)
1379 {
1380 u32 len = sizeof(u32), tmp = 0;
1381 struct ipw_priv *p = d->driver_data;
1382
1383 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1384 return 0;
1385
1386 return sprintf(buf, "0x%08x\n", tmp);
1387 }
1388
1389 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1390
1391 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1392 char *buf)
1393 {
1394 u32 len = sizeof(u32), tmp = 0;
1395 struct ipw_priv *p = d->driver_data;
1396
1397 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1398 return 0;
1399
1400 return sprintf(buf, "0x%08x\n", tmp);
1401 }
1402
1403 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1404
1405 /*
1406 * Add a device attribute to view/control the delay between eeprom
1407 * operations.
1408 */
1409 static ssize_t show_eeprom_delay(struct device *d,
1410 struct device_attribute *attr, char *buf)
1411 {
1412 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1413 return sprintf(buf, "%i\n", n);
1414 }
1415 static ssize_t store_eeprom_delay(struct device *d,
1416 struct device_attribute *attr,
1417 const char *buf, size_t count)
1418 {
1419 struct ipw_priv *p = d->driver_data;
1420 sscanf(buf, "%i", &p->eeprom_delay);
1421 return strnlen(buf, count);
1422 }
1423
1424 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1425 show_eeprom_delay, store_eeprom_delay);
1426
1427 static ssize_t show_command_event_reg(struct device *d,
1428 struct device_attribute *attr, char *buf)
1429 {
1430 u32 reg = 0;
1431 struct ipw_priv *p = d->driver_data;
1432
1433 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1434 return sprintf(buf, "0x%08x\n", reg);
1435 }
1436 static ssize_t store_command_event_reg(struct device *d,
1437 struct device_attribute *attr,
1438 const char *buf, size_t count)
1439 {
1440 u32 reg;
1441 struct ipw_priv *p = d->driver_data;
1442
1443 sscanf(buf, "%x", &reg);
1444 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1445 return strnlen(buf, count);
1446 }
1447
1448 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1449 show_command_event_reg, store_command_event_reg);
1450
1451 static ssize_t show_mem_gpio_reg(struct device *d,
1452 struct device_attribute *attr, char *buf)
1453 {
1454 u32 reg = 0;
1455 struct ipw_priv *p = d->driver_data;
1456
1457 reg = ipw_read_reg32(p, 0x301100);
1458 return sprintf(buf, "0x%08x\n", reg);
1459 }
1460 static ssize_t store_mem_gpio_reg(struct device *d,
1461 struct device_attribute *attr,
1462 const char *buf, size_t count)
1463 {
1464 u32 reg;
1465 struct ipw_priv *p = d->driver_data;
1466
1467 sscanf(buf, "%x", &reg);
1468 ipw_write_reg32(p, 0x301100, reg);
1469 return strnlen(buf, count);
1470 }
1471
1472 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1473 show_mem_gpio_reg, store_mem_gpio_reg);
1474
1475 static ssize_t show_indirect_dword(struct device *d,
1476 struct device_attribute *attr, char *buf)
1477 {
1478 u32 reg = 0;
1479 struct ipw_priv *priv = d->driver_data;
1480
1481 if (priv->status & STATUS_INDIRECT_DWORD)
1482 reg = ipw_read_reg32(priv, priv->indirect_dword);
1483 else
1484 reg = 0;
1485
1486 return sprintf(buf, "0x%08x\n", reg);
1487 }
1488 static ssize_t store_indirect_dword(struct device *d,
1489 struct device_attribute *attr,
1490 const char *buf, size_t count)
1491 {
1492 struct ipw_priv *priv = d->driver_data;
1493
1494 sscanf(buf, "%x", &priv->indirect_dword);
1495 priv->status |= STATUS_INDIRECT_DWORD;
1496 return strnlen(buf, count);
1497 }
1498
1499 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1500 show_indirect_dword, store_indirect_dword);
1501
1502 static ssize_t show_indirect_byte(struct device *d,
1503 struct device_attribute *attr, char *buf)
1504 {
1505 u8 reg = 0;
1506 struct ipw_priv *priv = d->driver_data;
1507
1508 if (priv->status & STATUS_INDIRECT_BYTE)
1509 reg = ipw_read_reg8(priv, priv->indirect_byte);
1510 else
1511 reg = 0;
1512
1513 return sprintf(buf, "0x%02x\n", reg);
1514 }
1515 static ssize_t store_indirect_byte(struct device *d,
1516 struct device_attribute *attr,
1517 const char *buf, size_t count)
1518 {
1519 struct ipw_priv *priv = d->driver_data;
1520
1521 sscanf(buf, "%x", &priv->indirect_byte);
1522 priv->status |= STATUS_INDIRECT_BYTE;
1523 return strnlen(buf, count);
1524 }
1525
1526 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1527 show_indirect_byte, store_indirect_byte);
1528
1529 static ssize_t show_direct_dword(struct device *d,
1530 struct device_attribute *attr, char *buf)
1531 {
1532 u32 reg = 0;
1533 struct ipw_priv *priv = d->driver_data;
1534
1535 if (priv->status & STATUS_DIRECT_DWORD)
1536 reg = ipw_read32(priv, priv->direct_dword);
1537 else
1538 reg = 0;
1539
1540 return sprintf(buf, "0x%08x\n", reg);
1541 }
1542 static ssize_t store_direct_dword(struct device *d,
1543 struct device_attribute *attr,
1544 const char *buf, size_t count)
1545 {
1546 struct ipw_priv *priv = d->driver_data;
1547
1548 sscanf(buf, "%x", &priv->direct_dword);
1549 priv->status |= STATUS_DIRECT_DWORD;
1550 return strnlen(buf, count);
1551 }
1552
1553 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1554 show_direct_dword, store_direct_dword);
1555
1556 static int rf_kill_active(struct ipw_priv *priv)
1557 {
1558 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1559 priv->status |= STATUS_RF_KILL_HW;
1560 else
1561 priv->status &= ~STATUS_RF_KILL_HW;
1562
1563 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1564 }
1565
1566 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1567 char *buf)
1568 {
1569 /* 0 - RF kill not enabled
1570 1 - SW based RF kill active (sysfs)
1571 2 - HW based RF kill active
1572 3 - Both HW and SW baed RF kill active */
1573 struct ipw_priv *priv = d->driver_data;
1574 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1575 (rf_kill_active(priv) ? 0x2 : 0x0);
1576 return sprintf(buf, "%i\n", val);
1577 }
1578
1579 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1580 {
1581 if ((disable_radio ? 1 : 0) ==
1582 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1583 return 0;
1584
1585 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1586 disable_radio ? "OFF" : "ON");
1587
1588 if (disable_radio) {
1589 priv->status |= STATUS_RF_KILL_SW;
1590
1591 if (priv->workqueue)
1592 cancel_delayed_work(&priv->request_scan);
1593 queue_work(priv->workqueue, &priv->down);
1594 } else {
1595 priv->status &= ~STATUS_RF_KILL_SW;
1596 if (rf_kill_active(priv)) {
1597 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1598 "disabled by HW switch\n");
1599 /* Make sure the RF_KILL check timer is running */
1600 cancel_delayed_work(&priv->rf_kill);
1601 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1602 2 * HZ);
1603 } else
1604 queue_work(priv->workqueue, &priv->up);
1605 }
1606
1607 return 1;
1608 }
1609
1610 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1611 const char *buf, size_t count)
1612 {
1613 struct ipw_priv *priv = d->driver_data;
1614
1615 ipw_radio_kill_sw(priv, buf[0] == '1');
1616
1617 return count;
1618 }
1619
1620 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1621
1622 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1623 char *buf)
1624 {
1625 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1626 int pos = 0, len = 0;
1627 if (priv->config & CFG_SPEED_SCAN) {
1628 while (priv->speed_scan[pos] != 0)
1629 len += sprintf(&buf[len], "%d ",
1630 priv->speed_scan[pos++]);
1631 return len + sprintf(&buf[len], "\n");
1632 }
1633
1634 return sprintf(buf, "0\n");
1635 }
1636
1637 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1638 const char *buf, size_t count)
1639 {
1640 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1641 int channel, pos = 0;
1642 const char *p = buf;
1643
1644 /* list of space separated channels to scan, optionally ending with 0 */
1645 while ((channel = simple_strtol(p, NULL, 0))) {
1646 if (pos == MAX_SPEED_SCAN - 1) {
1647 priv->speed_scan[pos] = 0;
1648 break;
1649 }
1650
1651 if (ieee80211_is_valid_channel(priv->ieee, channel))
1652 priv->speed_scan[pos++] = channel;
1653 else
1654 IPW_WARNING("Skipping invalid channel request: %d\n",
1655 channel);
1656 p = strchr(p, ' ');
1657 if (!p)
1658 break;
1659 while (*p == ' ' || *p == '\t')
1660 p++;
1661 }
1662
1663 if (pos == 0)
1664 priv->config &= ~CFG_SPEED_SCAN;
1665 else {
1666 priv->speed_scan_pos = 0;
1667 priv->config |= CFG_SPEED_SCAN;
1668 }
1669
1670 return count;
1671 }
1672
1673 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1674 store_speed_scan);
1675
1676 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1677 char *buf)
1678 {
1679 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1680 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1681 }
1682
1683 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1684 const char *buf, size_t count)
1685 {
1686 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1687 if (buf[0] == '1')
1688 priv->config |= CFG_NET_STATS;
1689 else
1690 priv->config &= ~CFG_NET_STATS;
1691
1692 return count;
1693 }
1694
1695 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1696 show_net_stats, store_net_stats);
1697
1698 static void notify_wx_assoc_event(struct ipw_priv *priv)
1699 {
1700 union iwreq_data wrqu;
1701 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1702 if (priv->status & STATUS_ASSOCIATED)
1703 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1704 else
1705 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1706 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1707 }
1708
1709 static void ipw_irq_tasklet(struct ipw_priv *priv)
1710 {
1711 u32 inta, inta_mask, handled = 0;
1712 unsigned long flags;
1713 int rc = 0;
1714
1715 spin_lock_irqsave(&priv->lock, flags);
1716
1717 inta = ipw_read32(priv, IPW_INTA_RW);
1718 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1719 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1720
1721 /* Add any cached INTA values that need to be handled */
1722 inta |= priv->isr_inta;
1723
1724 /* handle all the justifications for the interrupt */
1725 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1726 ipw_rx(priv);
1727 handled |= IPW_INTA_BIT_RX_TRANSFER;
1728 }
1729
1730 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1731 IPW_DEBUG_HC("Command completed.\n");
1732 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1733 priv->status &= ~STATUS_HCMD_ACTIVE;
1734 wake_up_interruptible(&priv->wait_command_queue);
1735 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1736 }
1737
1738 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1739 IPW_DEBUG_TX("TX_QUEUE_1\n");
1740 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1741 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1742 }
1743
1744 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1745 IPW_DEBUG_TX("TX_QUEUE_2\n");
1746 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1747 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1748 }
1749
1750 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1751 IPW_DEBUG_TX("TX_QUEUE_3\n");
1752 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1753 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1754 }
1755
1756 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1757 IPW_DEBUG_TX("TX_QUEUE_4\n");
1758 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1759 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1760 }
1761
1762 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1763 IPW_WARNING("STATUS_CHANGE\n");
1764 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1765 }
1766
1767 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1768 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1769 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1770 }
1771
1772 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1773 IPW_WARNING("HOST_CMD_DONE\n");
1774 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1775 }
1776
1777 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1778 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1779 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1780 }
1781
1782 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1783 IPW_WARNING("PHY_OFF_DONE\n");
1784 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1785 }
1786
1787 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1788 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1789 priv->status |= STATUS_RF_KILL_HW;
1790 wake_up_interruptible(&priv->wait_command_queue);
1791 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1792 cancel_delayed_work(&priv->request_scan);
1793 schedule_work(&priv->link_down);
1794 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1795 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1796 }
1797
1798 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1799 IPW_WARNING("Firmware error detected. Restarting.\n");
1800 if (priv->error) {
1801 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1802 #ifdef CONFIG_IPW2200_DEBUG
1803 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1804 struct ipw_fw_error *error =
1805 ipw_alloc_error_log(priv);
1806 ipw_dump_error_log(priv, error);
1807 if (error)
1808 ipw_free_error_log(error);
1809 }
1810 #endif
1811 } else {
1812 priv->error = ipw_alloc_error_log(priv);
1813 if (priv->error)
1814 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
1815 else
1816 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1817 "log.\n");
1818 #ifdef CONFIG_IPW2200_DEBUG
1819 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1820 ipw_dump_error_log(priv, priv->error);
1821 #endif
1822 }
1823
1824 /* XXX: If hardware encryption is for WPA/WPA2,
1825 * we have to notify the supplicant. */
1826 if (priv->ieee->sec.encrypt) {
1827 priv->status &= ~STATUS_ASSOCIATED;
1828 notify_wx_assoc_event(priv);
1829 }
1830
1831 /* Keep the restart process from trying to send host
1832 * commands by clearing the INIT status bit */
1833 priv->status &= ~STATUS_INIT;
1834
1835 /* Cancel currently queued command. */
1836 priv->status &= ~STATUS_HCMD_ACTIVE;
1837 wake_up_interruptible(&priv->wait_command_queue);
1838
1839 queue_work(priv->workqueue, &priv->adapter_restart);
1840 handled |= IPW_INTA_BIT_FATAL_ERROR;
1841 }
1842
1843 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1844 IPW_ERROR("Parity error\n");
1845 handled |= IPW_INTA_BIT_PARITY_ERROR;
1846 }
1847
1848 if (handled != inta) {
1849 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1850 }
1851
1852 /* enable all interrupts */
1853 ipw_enable_interrupts(priv);
1854
1855 spin_unlock_irqrestore(&priv->lock, flags);
1856 }
1857
1858 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1859 static char *get_cmd_string(u8 cmd)
1860 {
1861 switch (cmd) {
1862 IPW_CMD(HOST_COMPLETE);
1863 IPW_CMD(POWER_DOWN);
1864 IPW_CMD(SYSTEM_CONFIG);
1865 IPW_CMD(MULTICAST_ADDRESS);
1866 IPW_CMD(SSID);
1867 IPW_CMD(ADAPTER_ADDRESS);
1868 IPW_CMD(PORT_TYPE);
1869 IPW_CMD(RTS_THRESHOLD);
1870 IPW_CMD(FRAG_THRESHOLD);
1871 IPW_CMD(POWER_MODE);
1872 IPW_CMD(WEP_KEY);
1873 IPW_CMD(TGI_TX_KEY);
1874 IPW_CMD(SCAN_REQUEST);
1875 IPW_CMD(SCAN_REQUEST_EXT);
1876 IPW_CMD(ASSOCIATE);
1877 IPW_CMD(SUPPORTED_RATES);
1878 IPW_CMD(SCAN_ABORT);
1879 IPW_CMD(TX_FLUSH);
1880 IPW_CMD(QOS_PARAMETERS);
1881 IPW_CMD(DINO_CONFIG);
1882 IPW_CMD(RSN_CAPABILITIES);
1883 IPW_CMD(RX_KEY);
1884 IPW_CMD(CARD_DISABLE);
1885 IPW_CMD(SEED_NUMBER);
1886 IPW_CMD(TX_POWER);
1887 IPW_CMD(COUNTRY_INFO);
1888 IPW_CMD(AIRONET_INFO);
1889 IPW_CMD(AP_TX_POWER);
1890 IPW_CMD(CCKM_INFO);
1891 IPW_CMD(CCX_VER_INFO);
1892 IPW_CMD(SET_CALIBRATION);
1893 IPW_CMD(SENSITIVITY_CALIB);
1894 IPW_CMD(RETRY_LIMIT);
1895 IPW_CMD(IPW_PRE_POWER_DOWN);
1896 IPW_CMD(VAP_BEACON_TEMPLATE);
1897 IPW_CMD(VAP_DTIM_PERIOD);
1898 IPW_CMD(EXT_SUPPORTED_RATES);
1899 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1900 IPW_CMD(VAP_QUIET_INTERVALS);
1901 IPW_CMD(VAP_CHANNEL_SWITCH);
1902 IPW_CMD(VAP_MANDATORY_CHANNELS);
1903 IPW_CMD(VAP_CELL_PWR_LIMIT);
1904 IPW_CMD(VAP_CF_PARAM_SET);
1905 IPW_CMD(VAP_SET_BEACONING_STATE);
1906 IPW_CMD(MEASUREMENT);
1907 IPW_CMD(POWER_CAPABILITY);
1908 IPW_CMD(SUPPORTED_CHANNELS);
1909 IPW_CMD(TPC_REPORT);
1910 IPW_CMD(WME_INFO);
1911 IPW_CMD(PRODUCTION_COMMAND);
1912 default:
1913 return "UNKNOWN";
1914 }
1915 }
1916
1917 #define HOST_COMPLETE_TIMEOUT HZ
1918
1919 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1920 {
1921 int rc = 0;
1922 unsigned long flags;
1923
1924 spin_lock_irqsave(&priv->lock, flags);
1925 if (priv->status & STATUS_HCMD_ACTIVE) {
1926 IPW_ERROR("Failed to send %s: Already sending a command.\n",
1927 get_cmd_string(cmd->cmd));
1928 spin_unlock_irqrestore(&priv->lock, flags);
1929 return -EAGAIN;
1930 }
1931
1932 priv->status |= STATUS_HCMD_ACTIVE;
1933
1934 if (priv->cmdlog) {
1935 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
1936 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
1937 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
1938 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
1939 cmd->len);
1940 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
1941 }
1942
1943 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
1944 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
1945 priv->status);
1946
1947 #ifndef DEBUG_CMD_WEP_KEY
1948 if (cmd->cmd == IPW_CMD_WEP_KEY)
1949 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
1950 else
1951 #endif
1952 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1953
1954 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
1955 if (rc) {
1956 priv->status &= ~STATUS_HCMD_ACTIVE;
1957 IPW_ERROR("Failed to send %s: Reason %d\n",
1958 get_cmd_string(cmd->cmd), rc);
1959 spin_unlock_irqrestore(&priv->lock, flags);
1960 goto exit;
1961 }
1962 spin_unlock_irqrestore(&priv->lock, flags);
1963
1964 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1965 !(priv->
1966 status & STATUS_HCMD_ACTIVE),
1967 HOST_COMPLETE_TIMEOUT);
1968 if (rc == 0) {
1969 spin_lock_irqsave(&priv->lock, flags);
1970 if (priv->status & STATUS_HCMD_ACTIVE) {
1971 IPW_ERROR("Failed to send %s: Command timed out.\n",
1972 get_cmd_string(cmd->cmd));
1973 priv->status &= ~STATUS_HCMD_ACTIVE;
1974 spin_unlock_irqrestore(&priv->lock, flags);
1975 rc = -EIO;
1976 goto exit;
1977 }
1978 spin_unlock_irqrestore(&priv->lock, flags);
1979 } else
1980 rc = 0;
1981
1982 if (priv->status & STATUS_RF_KILL_HW) {
1983 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
1984 get_cmd_string(cmd->cmd));
1985 rc = -EIO;
1986 goto exit;
1987 }
1988
1989 exit:
1990 if (priv->cmdlog) {
1991 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
1992 priv->cmdlog_pos %= priv->cmdlog_len;
1993 }
1994 return rc;
1995 }
1996
1997 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
1998 {
1999 struct host_cmd cmd = {
2000 .cmd = command,
2001 };
2002
2003 return __ipw_send_cmd(priv, &cmd);
2004 }
2005
2006 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2007 void *data)
2008 {
2009 struct host_cmd cmd = {
2010 .cmd = command,
2011 .len = len,
2012 .param = data,
2013 };
2014
2015 return __ipw_send_cmd(priv, &cmd);
2016 }
2017
2018 static int ipw_send_host_complete(struct ipw_priv *priv)
2019 {
2020 if (!priv) {
2021 IPW_ERROR("Invalid args\n");
2022 return -1;
2023 }
2024
2025 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2026 }
2027
2028 static int ipw_send_system_config(struct ipw_priv *priv,
2029 struct ipw_sys_config *config)
2030 {
2031 if (!priv || !config) {
2032 IPW_ERROR("Invalid args\n");
2033 return -1;
2034 }
2035
2036 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config),
2037 config);
2038 }
2039
2040 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2041 {
2042 if (!priv || !ssid) {
2043 IPW_ERROR("Invalid args\n");
2044 return -1;
2045 }
2046
2047 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2048 ssid);
2049 }
2050
2051 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2052 {
2053 if (!priv || !mac) {
2054 IPW_ERROR("Invalid args\n");
2055 return -1;
2056 }
2057
2058 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2059 priv->net_dev->name, MAC_ARG(mac));
2060
2061 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2062 }
2063
2064 /*
2065 * NOTE: This must be executed from our workqueue as it results in udelay
2066 * being called which may corrupt the keyboard if executed on default
2067 * workqueue
2068 */
2069 static void ipw_adapter_restart(void *adapter)
2070 {
2071 struct ipw_priv *priv = adapter;
2072
2073 if (priv->status & STATUS_RF_KILL_MASK)
2074 return;
2075
2076 ipw_down(priv);
2077
2078 if (priv->assoc_network &&
2079 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2080 ipw_remove_current_network(priv);
2081
2082 if (ipw_up(priv)) {
2083 IPW_ERROR("Failed to up device\n");
2084 return;
2085 }
2086 }
2087
2088 static void ipw_bg_adapter_restart(void *data)
2089 {
2090 struct ipw_priv *priv = data;
2091 mutex_lock(&priv->mutex);
2092 ipw_adapter_restart(data);
2093 mutex_unlock(&priv->mutex);
2094 }
2095
2096 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2097
2098 static void ipw_scan_check(void *data)
2099 {
2100 struct ipw_priv *priv = data;
2101 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2102 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2103 "adapter after (%dms).\n",
2104 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2105 queue_work(priv->workqueue, &priv->adapter_restart);
2106 }
2107 }
2108
2109 static void ipw_bg_scan_check(void *data)
2110 {
2111 struct ipw_priv *priv = data;
2112 mutex_lock(&priv->mutex);
2113 ipw_scan_check(data);
2114 mutex_unlock(&priv->mutex);
2115 }
2116
2117 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2118 struct ipw_scan_request_ext *request)
2119 {
2120 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2121 sizeof(*request), request);
2122 }
2123
2124 static int ipw_send_scan_abort(struct ipw_priv *priv)
2125 {
2126 if (!priv) {
2127 IPW_ERROR("Invalid args\n");
2128 return -1;
2129 }
2130
2131 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2132 }
2133
2134 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2135 {
2136 struct ipw_sensitivity_calib calib = {
2137 .beacon_rssi_raw = sens,
2138 };
2139
2140 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2141 &calib);
2142 }
2143
2144 static int ipw_send_associate(struct ipw_priv *priv,
2145 struct ipw_associate *associate)
2146 {
2147 struct ipw_associate tmp_associate;
2148
2149 if (!priv || !associate) {
2150 IPW_ERROR("Invalid args\n");
2151 return -1;
2152 }
2153
2154 memcpy(&tmp_associate, associate, sizeof(*associate));
2155 tmp_associate.policy_support =
2156 cpu_to_le16(tmp_associate.policy_support);
2157 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2158 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2159 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2160 tmp_associate.listen_interval =
2161 cpu_to_le16(tmp_associate.listen_interval);
2162 tmp_associate.beacon_interval =
2163 cpu_to_le16(tmp_associate.beacon_interval);
2164 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2165
2166 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2167 &tmp_associate);
2168 }
2169
2170 static int ipw_send_supported_rates(struct ipw_priv *priv,
2171 struct ipw_supported_rates *rates)
2172 {
2173 if (!priv || !rates) {
2174 IPW_ERROR("Invalid args\n");
2175 return -1;
2176 }
2177
2178 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2179 rates);
2180 }
2181
2182 static int ipw_set_random_seed(struct ipw_priv *priv)
2183 {
2184 u32 val;
2185
2186 if (!priv) {
2187 IPW_ERROR("Invalid args\n");
2188 return -1;
2189 }
2190
2191 get_random_bytes(&val, sizeof(val));
2192
2193 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2194 }
2195
2196 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2197 {
2198 if (!priv) {
2199 IPW_ERROR("Invalid args\n");
2200 return -1;
2201 }
2202
2203 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2204 &phy_off);
2205 }
2206
2207 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2208 {
2209 if (!priv || !power) {
2210 IPW_ERROR("Invalid args\n");
2211 return -1;
2212 }
2213
2214 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2215 }
2216
2217 static int ipw_set_tx_power(struct ipw_priv *priv)
2218 {
2219 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2220 struct ipw_tx_power tx_power;
2221 s8 max_power;
2222 int i;
2223
2224 memset(&tx_power, 0, sizeof(tx_power));
2225
2226 /* configure device for 'G' band */
2227 tx_power.ieee_mode = IPW_G_MODE;
2228 tx_power.num_channels = geo->bg_channels;
2229 for (i = 0; i < geo->bg_channels; i++) {
2230 max_power = geo->bg[i].max_power;
2231 tx_power.channels_tx_power[i].channel_number =
2232 geo->bg[i].channel;
2233 tx_power.channels_tx_power[i].tx_power = max_power ?
2234 min(max_power, priv->tx_power) : priv->tx_power;
2235 }
2236 if (ipw_send_tx_power(priv, &tx_power))
2237 return -EIO;
2238
2239 /* configure device to also handle 'B' band */
2240 tx_power.ieee_mode = IPW_B_MODE;
2241 if (ipw_send_tx_power(priv, &tx_power))
2242 return -EIO;
2243
2244 /* configure device to also handle 'A' band */
2245 if (priv->ieee->abg_true) {
2246 tx_power.ieee_mode = IPW_A_MODE;
2247 tx_power.num_channels = geo->a_channels;
2248 for (i = 0; i < tx_power.num_channels; i++) {
2249 max_power = geo->a[i].max_power;
2250 tx_power.channels_tx_power[i].channel_number =
2251 geo->a[i].channel;
2252 tx_power.channels_tx_power[i].tx_power = max_power ?
2253 min(max_power, priv->tx_power) : priv->tx_power;
2254 }
2255 if (ipw_send_tx_power(priv, &tx_power))
2256 return -EIO;
2257 }
2258 return 0;
2259 }
2260
2261 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2262 {
2263 struct ipw_rts_threshold rts_threshold = {
2264 .rts_threshold = rts,
2265 };
2266
2267 if (!priv) {
2268 IPW_ERROR("Invalid args\n");
2269 return -1;
2270 }
2271
2272 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2273 sizeof(rts_threshold), &rts_threshold);
2274 }
2275
2276 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2277 {
2278 struct ipw_frag_threshold frag_threshold = {
2279 .frag_threshold = frag,
2280 };
2281
2282 if (!priv) {
2283 IPW_ERROR("Invalid args\n");
2284 return -1;
2285 }
2286
2287 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2288 sizeof(frag_threshold), &frag_threshold);
2289 }
2290
2291 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2292 {
2293 u32 param;
2294
2295 if (!priv) {
2296 IPW_ERROR("Invalid args\n");
2297 return -1;
2298 }
2299
2300 /* If on battery, set to 3, if AC set to CAM, else user
2301 * level */
2302 switch (mode) {
2303 case IPW_POWER_BATTERY:
2304 param = IPW_POWER_INDEX_3;
2305 break;
2306 case IPW_POWER_AC:
2307 param = IPW_POWER_MODE_CAM;
2308 break;
2309 default:
2310 param = mode;
2311 break;
2312 }
2313
2314 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2315 &param);
2316 }
2317
2318 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2319 {
2320 struct ipw_retry_limit retry_limit = {
2321 .short_retry_limit = slimit,
2322 .long_retry_limit = llimit
2323 };
2324
2325 if (!priv) {
2326 IPW_ERROR("Invalid args\n");
2327 return -1;
2328 }
2329
2330 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2331 &retry_limit);
2332 }
2333
2334 /*
2335 * The IPW device contains a Microwire compatible EEPROM that stores
2336 * various data like the MAC address. Usually the firmware has exclusive
2337 * access to the eeprom, but during device initialization (before the
2338 * device driver has sent the HostComplete command to the firmware) the
2339 * device driver has read access to the EEPROM by way of indirect addressing
2340 * through a couple of memory mapped registers.
2341 *
2342 * The following is a simplified implementation for pulling data out of the
2343 * the eeprom, along with some helper functions to find information in
2344 * the per device private data's copy of the eeprom.
2345 *
2346 * NOTE: To better understand how these functions work (i.e what is a chip
2347 * select and why do have to keep driving the eeprom clock?), read
2348 * just about any data sheet for a Microwire compatible EEPROM.
2349 */
2350
2351 /* write a 32 bit value into the indirect accessor register */
2352 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2353 {
2354 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2355
2356 /* the eeprom requires some time to complete the operation */
2357 udelay(p->eeprom_delay);
2358
2359 return;
2360 }
2361
2362 /* perform a chip select operation */
2363 static void eeprom_cs(struct ipw_priv *priv)
2364 {
2365 eeprom_write_reg(priv, 0);
2366 eeprom_write_reg(priv, EEPROM_BIT_CS);
2367 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2368 eeprom_write_reg(priv, EEPROM_BIT_CS);
2369 }
2370
2371 /* perform a chip select operation */
2372 static void eeprom_disable_cs(struct ipw_priv *priv)
2373 {
2374 eeprom_write_reg(priv, EEPROM_BIT_CS);
2375 eeprom_write_reg(priv, 0);
2376 eeprom_write_reg(priv, EEPROM_BIT_SK);
2377 }
2378
2379 /* push a single bit down to the eeprom */
2380 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2381 {
2382 int d = (bit ? EEPROM_BIT_DI : 0);
2383 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2384 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2385 }
2386
2387 /* push an opcode followed by an address down to the eeprom */
2388 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2389 {
2390 int i;
2391
2392 eeprom_cs(priv);
2393 eeprom_write_bit(priv, 1);
2394 eeprom_write_bit(priv, op & 2);
2395 eeprom_write_bit(priv, op & 1);
2396 for (i = 7; i >= 0; i--) {
2397 eeprom_write_bit(priv, addr & (1 << i));
2398 }
2399 }
2400
2401 /* pull 16 bits off the eeprom, one bit at a time */
2402 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2403 {
2404 int i;
2405 u16 r = 0;
2406
2407 /* Send READ Opcode */
2408 eeprom_op(priv, EEPROM_CMD_READ, addr);
2409
2410 /* Send dummy bit */
2411 eeprom_write_reg(priv, EEPROM_BIT_CS);
2412
2413 /* Read the byte off the eeprom one bit at a time */
2414 for (i = 0; i < 16; i++) {
2415 u32 data = 0;
2416 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2417 eeprom_write_reg(priv, EEPROM_BIT_CS);
2418 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2419 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2420 }
2421
2422 /* Send another dummy bit */
2423 eeprom_write_reg(priv, 0);
2424 eeprom_disable_cs(priv);
2425
2426 return r;
2427 }
2428
2429 /* helper function for pulling the mac address out of the private */
2430 /* data's copy of the eeprom data */
2431 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2432 {
2433 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2434 }
2435
2436 /*
2437 * Either the device driver (i.e. the host) or the firmware can
2438 * load eeprom data into the designated region in SRAM. If neither
2439 * happens then the FW will shutdown with a fatal error.
2440 *
2441 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2442 * bit needs region of shared SRAM needs to be non-zero.
2443 */
2444 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2445 {
2446 int i;
2447 u16 *eeprom = (u16 *) priv->eeprom;
2448
2449 IPW_DEBUG_TRACE(">>\n");
2450
2451 /* read entire contents of eeprom into private buffer */
2452 for (i = 0; i < 128; i++)
2453 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2454
2455 /*
2456 If the data looks correct, then copy it to our private
2457 copy. Otherwise let the firmware know to perform the operation
2458 on its own.
2459 */
2460 if (priv->eeprom[EEPROM_VERSION] != 0) {
2461 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2462
2463 /* write the eeprom data to sram */
2464 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2465 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2466
2467 /* Do not load eeprom data on fatal error or suspend */
2468 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2469 } else {
2470 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2471
2472 /* Load eeprom data on fatal error or suspend */
2473 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2474 }
2475
2476 IPW_DEBUG_TRACE("<<\n");
2477 }
2478
2479 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2480 {
2481 count >>= 2;
2482 if (!count)
2483 return;
2484 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2485 while (count--)
2486 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2487 }
2488
2489 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2490 {
2491 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2492 CB_NUMBER_OF_ELEMENTS_SMALL *
2493 sizeof(struct command_block));
2494 }
2495
2496 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2497 { /* start dma engine but no transfers yet */
2498
2499 IPW_DEBUG_FW(">> : \n");
2500
2501 /* Start the dma */
2502 ipw_fw_dma_reset_command_blocks(priv);
2503
2504 /* Write CB base address */
2505 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2506
2507 IPW_DEBUG_FW("<< : \n");
2508 return 0;
2509 }
2510
2511 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2512 {
2513 u32 control = 0;
2514
2515 IPW_DEBUG_FW(">> :\n");
2516
2517 //set the Stop and Abort bit
2518 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2519 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2520 priv->sram_desc.last_cb_index = 0;
2521
2522 IPW_DEBUG_FW("<< \n");
2523 }
2524
2525 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2526 struct command_block *cb)
2527 {
2528 u32 address =
2529 IPW_SHARED_SRAM_DMA_CONTROL +
2530 (sizeof(struct command_block) * index);
2531 IPW_DEBUG_FW(">> :\n");
2532
2533 ipw_write_indirect(priv, address, (u8 *) cb,
2534 (int)sizeof(struct command_block));
2535
2536 IPW_DEBUG_FW("<< :\n");
2537 return 0;
2538
2539 }
2540
2541 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2542 {
2543 u32 control = 0;
2544 u32 index = 0;
2545
2546 IPW_DEBUG_FW(">> :\n");
2547
2548 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2549 ipw_fw_dma_write_command_block(priv, index,
2550 &priv->sram_desc.cb_list[index]);
2551
2552 /* Enable the DMA in the CSR register */
2553 ipw_clear_bit(priv, IPW_RESET_REG,
2554 IPW_RESET_REG_MASTER_DISABLED |
2555 IPW_RESET_REG_STOP_MASTER);
2556
2557 /* Set the Start bit. */
2558 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2559 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2560
2561 IPW_DEBUG_FW("<< :\n");
2562 return 0;
2563 }
2564
2565 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2566 {
2567 u32 address;
2568 u32 register_value = 0;
2569 u32 cb_fields_address = 0;
2570
2571 IPW_DEBUG_FW(">> :\n");
2572 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2573 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2574
2575 /* Read the DMA Controlor register */
2576 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2577 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2578
2579 /* Print the CB values */
2580 cb_fields_address = address;
2581 register_value = ipw_read_reg32(priv, cb_fields_address);
2582 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2583
2584 cb_fields_address += sizeof(u32);
2585 register_value = ipw_read_reg32(priv, cb_fields_address);
2586 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2587
2588 cb_fields_address += sizeof(u32);
2589 register_value = ipw_read_reg32(priv, cb_fields_address);
2590 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2591 register_value);
2592
2593 cb_fields_address += sizeof(u32);
2594 register_value = ipw_read_reg32(priv, cb_fields_address);
2595 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2596
2597 IPW_DEBUG_FW(">> :\n");
2598 }
2599
2600 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2601 {
2602 u32 current_cb_address = 0;
2603 u32 current_cb_index = 0;
2604
2605 IPW_DEBUG_FW("<< :\n");
2606 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2607
2608 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2609 sizeof(struct command_block);
2610
2611 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2612 current_cb_index, current_cb_address);
2613
2614 IPW_DEBUG_FW(">> :\n");
2615 return current_cb_index;
2616
2617 }
2618
2619 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2620 u32 src_address,
2621 u32 dest_address,
2622 u32 length,
2623 int interrupt_enabled, int is_last)
2624 {
2625
2626 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2627 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2628 CB_DEST_SIZE_LONG;
2629 struct command_block *cb;
2630 u32 last_cb_element = 0;
2631
2632 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2633 src_address, dest_address, length);
2634
2635 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2636 return -1;
2637
2638 last_cb_element = priv->sram_desc.last_cb_index;
2639 cb = &priv->sram_desc.cb_list[last_cb_element];
2640 priv->sram_desc.last_cb_index++;
2641
2642 /* Calculate the new CB control word */
2643 if (interrupt_enabled)
2644 control |= CB_INT_ENABLED;
2645
2646 if (is_last)
2647 control |= CB_LAST_VALID;
2648
2649 control |= length;
2650
2651 /* Calculate the CB Element's checksum value */
2652 cb->status = control ^ src_address ^ dest_address;
2653
2654 /* Copy the Source and Destination addresses */
2655 cb->dest_addr = dest_address;
2656 cb->source_addr = src_address;
2657
2658 /* Copy the Control Word last */
2659 cb->control = control;
2660
2661 return 0;
2662 }
2663
2664 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2665 u32 src_phys, u32 dest_address, u32 length)
2666 {
2667 u32 bytes_left = length;
2668 u32 src_offset = 0;
2669 u32 dest_offset = 0;
2670 int status = 0;
2671 IPW_DEBUG_FW(">> \n");
2672 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2673 src_phys, dest_address, length);
2674 while (bytes_left > CB_MAX_LENGTH) {
2675 status = ipw_fw_dma_add_command_block(priv,
2676 src_phys + src_offset,
2677 dest_address +
2678 dest_offset,
2679 CB_MAX_LENGTH, 0, 0);
2680 if (status) {
2681 IPW_DEBUG_FW_INFO(": Failed\n");
2682 return -1;
2683 } else
2684 IPW_DEBUG_FW_INFO(": Added new cb\n");
2685
2686 src_offset += CB_MAX_LENGTH;
2687 dest_offset += CB_MAX_LENGTH;
2688 bytes_left -= CB_MAX_LENGTH;
2689 }
2690
2691 /* add the buffer tail */
2692 if (bytes_left > 0) {
2693 status =
2694 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2695 dest_address + dest_offset,
2696 bytes_left, 0, 0);
2697 if (status) {
2698 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2699 return -1;
2700 } else
2701 IPW_DEBUG_FW_INFO
2702 (": Adding new cb - the buffer tail\n");
2703 }
2704
2705 IPW_DEBUG_FW("<< \n");
2706 return 0;
2707 }
2708
2709 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2710 {
2711 u32 current_index = 0, previous_index;
2712 u32 watchdog = 0;
2713
2714 IPW_DEBUG_FW(">> : \n");
2715
2716 current_index = ipw_fw_dma_command_block_index(priv);
2717 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2718 (int)priv->sram_desc.last_cb_index);
2719
2720 while (current_index < priv->sram_desc.last_cb_index) {
2721 udelay(50);
2722 previous_index = current_index;
2723 current_index = ipw_fw_dma_command_block_index(priv);
2724
2725 if (previous_index < current_index) {
2726 watchdog = 0;
2727 continue;
2728 }
2729 if (++watchdog > 400) {
2730 IPW_DEBUG_FW_INFO("Timeout\n");
2731 ipw_fw_dma_dump_command_block(priv);
2732 ipw_fw_dma_abort(priv);
2733 return -1;
2734 }
2735 }
2736
2737 ipw_fw_dma_abort(priv);
2738
2739 /*Disable the DMA in the CSR register */
2740 ipw_set_bit(priv, IPW_RESET_REG,
2741 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2742
2743 IPW_DEBUG_FW("<< dmaWaitSync \n");
2744 return 0;
2745 }
2746
2747 static void ipw_remove_current_network(struct ipw_priv *priv)
2748 {
2749 struct list_head *element, *safe;
2750 struct ieee80211_network *network = NULL;
2751 unsigned long flags;
2752
2753 spin_lock_irqsave(&priv->ieee->lock, flags);
2754 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2755 network = list_entry(element, struct ieee80211_network, list);
2756 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2757 list_del(element);
2758 list_add_tail(&network->list,
2759 &priv->ieee->network_free_list);
2760 }
2761 }
2762 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2763 }
2764
2765 /**
2766 * Check that card is still alive.
2767 * Reads debug register from domain0.
2768 * If card is present, pre-defined value should
2769 * be found there.
2770 *
2771 * @param priv
2772 * @return 1 if card is present, 0 otherwise
2773 */
2774 static inline int ipw_alive(struct ipw_priv *priv)
2775 {
2776 return ipw_read32(priv, 0x90) == 0xd55555d5;
2777 }
2778
2779 /* timeout in msec, attempted in 10-msec quanta */
2780 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2781 int timeout)
2782 {
2783 int i = 0;
2784
2785 do {
2786 if ((ipw_read32(priv, addr) & mask) == mask)
2787 return i;
2788 mdelay(10);
2789 i += 10;
2790 } while (i < timeout);
2791
2792 return -ETIME;
2793 }
2794
2795 /* These functions load the firmware and micro code for the operation of
2796 * the ipw hardware. It assumes the buffer has all the bits for the
2797 * image and the caller is handling the memory allocation and clean up.
2798 */
2799
2800 static int ipw_stop_master(struct ipw_priv *priv)
2801 {
2802 int rc;
2803
2804 IPW_DEBUG_TRACE(">> \n");
2805 /* stop master. typical delay - 0 */
2806 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2807
2808 /* timeout is in msec, polled in 10-msec quanta */
2809 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2810 IPW_RESET_REG_MASTER_DISABLED, 100);
2811 if (rc < 0) {
2812 IPW_ERROR("wait for stop master failed after 100ms\n");
2813 return -1;
2814 }
2815
2816 IPW_DEBUG_INFO("stop master %dms\n", rc);
2817
2818 return rc;
2819 }
2820
2821 static void ipw_arc_release(struct ipw_priv *priv)
2822 {
2823 IPW_DEBUG_TRACE(">> \n");
2824 mdelay(5);
2825
2826 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2827
2828 /* no one knows timing, for safety add some delay */
2829 mdelay(5);
2830 }
2831
2832 struct fw_chunk {
2833 u32 address;
2834 u32 length;
2835 };
2836
2837 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2838 {
2839 int rc = 0, i, addr;
2840 u8 cr = 0;
2841 u16 *image;
2842
2843 image = (u16 *) data;
2844
2845 IPW_DEBUG_TRACE(">> \n");
2846
2847 rc = ipw_stop_master(priv);
2848
2849 if (rc < 0)
2850 return rc;
2851
2852 // spin_lock_irqsave(&priv->lock, flags);
2853
2854 for (addr = IPW_SHARED_LOWER_BOUND;
2855 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
2856 ipw_write32(priv, addr, 0);
2857 }
2858
2859 /* no ucode (yet) */
2860 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2861 /* destroy DMA queues */
2862 /* reset sequence */
2863
2864 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
2865 ipw_arc_release(priv);
2866 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
2867 mdelay(1);
2868
2869 /* reset PHY */
2870 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
2871 mdelay(1);
2872
2873 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
2874 mdelay(1);
2875
2876 /* enable ucode store */
2877 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
2878 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
2879 mdelay(1);
2880
2881 /* write ucode */
2882 /**
2883 * @bug
2884 * Do NOT set indirect address register once and then
2885 * store data to indirect data register in the loop.
2886 * It seems very reasonable, but in this case DINO do not
2887 * accept ucode. It is essential to set address each time.
2888 */
2889 /* load new ipw uCode */
2890 for (i = 0; i < len / 2; i++)
2891 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
2892 cpu_to_le16(image[i]));
2893
2894 /* enable DINO */
2895 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2896 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2897
2898 /* this is where the igx / win driver deveates from the VAP driver. */
2899
2900 /* wait for alive response */
2901 for (i = 0; i < 100; i++) {
2902 /* poll for incoming data */
2903 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
2904 if (cr & DINO_RXFIFO_DATA)
2905 break;
2906 mdelay(1);
2907 }
2908
2909 if (cr & DINO_RXFIFO_DATA) {
2910 /* alive_command_responce size is NOT multiple of 4 */
2911 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2912
2913 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2914 response_buffer[i] =
2915 le32_to_cpu(ipw_read_reg32(priv,
2916 IPW_BASEBAND_RX_FIFO_READ));
2917 memcpy(&priv->dino_alive, response_buffer,
2918 sizeof(priv->dino_alive));
2919 if (priv->dino_alive.alive_command == 1
2920 && priv->dino_alive.ucode_valid == 1) {
2921 rc = 0;
2922 IPW_DEBUG_INFO
2923 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2924 "of %02d/%02d/%02d %02d:%02d\n",
2925 priv->dino_alive.software_revision,
2926 priv->dino_alive.software_revision,
2927 priv->dino_alive.device_identifier,
2928 priv->dino_alive.device_identifier,
2929 priv->dino_alive.time_stamp[0],
2930 priv->dino_alive.time_stamp[1],
2931 priv->dino_alive.time_stamp[2],
2932 priv->dino_alive.time_stamp[3],
2933 priv->dino_alive.time_stamp[4]);
2934 } else {
2935 IPW_DEBUG_INFO("Microcode is not alive\n");
2936 rc = -EINVAL;
2937 }
2938 } else {
2939 IPW_DEBUG_INFO("No alive response from DINO\n");
2940 rc = -ETIME;
2941 }
2942
2943 /* disable DINO, otherwise for some reason
2944 firmware have problem getting alive resp. */
2945 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2946
2947 // spin_unlock_irqrestore(&priv->lock, flags);
2948
2949 return rc;
2950 }
2951
2952 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
2953 {
2954 int rc = -1;
2955 int offset = 0;
2956 struct fw_chunk *chunk;
2957 dma_addr_t shared_phys;
2958 u8 *shared_virt;
2959
2960 IPW_DEBUG_TRACE("<< : \n");
2961 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2962
2963 if (!shared_virt)
2964 return -ENOMEM;
2965
2966 memmove(shared_virt, data, len);
2967
2968 /* Start the Dma */
2969 rc = ipw_fw_dma_enable(priv);
2970
2971 if (priv->sram_desc.last_cb_index > 0) {
2972 /* the DMA is already ready this would be a bug. */
2973 BUG();
2974 goto out;
2975 }
2976
2977 do {
2978 chunk = (struct fw_chunk *)(data + offset);
2979 offset += sizeof(struct fw_chunk);
2980 /* build DMA packet and queue up for sending */
2981 /* dma to chunk->address, the chunk->length bytes from data +
2982 * offeset*/
2983 /* Dma loading */
2984 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
2985 le32_to_cpu(chunk->address),
2986 le32_to_cpu(chunk->length));
2987 if (rc) {
2988 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
2989 goto out;
2990 }
2991
2992 offset += le32_to_cpu(chunk->length);
2993 } while (offset < len);
2994
2995 /* Run the DMA and wait for the answer */
2996 rc = ipw_fw_dma_kick(priv);
2997 if (rc) {
2998 IPW_ERROR("dmaKick Failed\n");
2999 goto out;
3000 }
3001
3002 rc = ipw_fw_dma_wait(priv);
3003 if (rc) {
3004 IPW_ERROR("dmaWaitSync Failed\n");
3005 goto out;
3006 }
3007 out:
3008 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3009 return rc;
3010 }
3011
3012 /* stop nic */
3013 static int ipw_stop_nic(struct ipw_priv *priv)
3014 {
3015 int rc = 0;
3016
3017 /* stop */
3018 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3019
3020 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3021 IPW_RESET_REG_MASTER_DISABLED, 500);
3022 if (rc < 0) {
3023 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3024 return rc;
3025 }
3026
3027 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3028
3029 return rc;
3030 }
3031
3032 static void ipw_start_nic(struct ipw_priv *priv)
3033 {
3034 IPW_DEBUG_TRACE(">>\n");
3035
3036 /* prvHwStartNic release ARC */
3037 ipw_clear_bit(priv, IPW_RESET_REG,
3038 IPW_RESET_REG_MASTER_DISABLED |
3039 IPW_RESET_REG_STOP_MASTER |
3040 CBD_RESET_REG_PRINCETON_RESET);
3041
3042 /* enable power management */
3043 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3044 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3045
3046 IPW_DEBUG_TRACE("<<\n");
3047 }
3048
3049 static int ipw_init_nic(struct ipw_priv *priv)
3050 {
3051 int rc;
3052
3053 IPW_DEBUG_TRACE(">>\n");
3054 /* reset */
3055 /*prvHwInitNic */
3056 /* set "initialization complete" bit to move adapter to D0 state */
3057 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3058
3059 /* low-level PLL activation */
3060 ipw_write32(priv, IPW_READ_INT_REGISTER,
3061 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3062
3063 /* wait for clock stabilization */
3064 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3065 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3066 if (rc < 0)
3067 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3068
3069 /* assert SW reset */
3070 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3071
3072 udelay(10);
3073
3074 /* set "initialization complete" bit to move adapter to D0 state */
3075 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3076
3077 IPW_DEBUG_TRACE(">>\n");
3078 return 0;
3079 }
3080
3081 /* Call this function from process context, it will sleep in request_firmware.
3082 * Probe is an ok place to call this from.
3083 */
3084 static int ipw_reset_nic(struct ipw_priv *priv)
3085 {
3086 int rc = 0;
3087 unsigned long flags;
3088
3089 IPW_DEBUG_TRACE(">>\n");
3090
3091 rc = ipw_init_nic(priv);
3092
3093 spin_lock_irqsave(&priv->lock, flags);
3094 /* Clear the 'host command active' bit... */
3095 priv->status &= ~STATUS_HCMD_ACTIVE;
3096 wake_up_interruptible(&priv->wait_command_queue);
3097 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3098 wake_up_interruptible(&priv->wait_state);
3099 spin_unlock_irqrestore(&priv->lock, flags);
3100
3101 IPW_DEBUG_TRACE("<<\n");
3102 return rc;
3103 }
3104
3105
3106 struct ipw_fw {
3107 u32 ver;
3108 u32 boot_size;
3109 u32 ucode_size;
3110 u32 fw_size;
3111 u8 data[0];
3112 };
3113
3114 static int ipw_get_fw(struct ipw_priv *priv,
3115 const struct firmware **raw, const char *name)
3116 {
3117 struct ipw_fw *fw;
3118 int rc;
3119
3120 /* ask firmware_class module to get the boot firmware off disk */
3121 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3122 if (rc < 0) {
3123 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3124 return rc;
3125 }
3126
3127 if ((*raw)->size < sizeof(*fw)) {
3128 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3129 return -EINVAL;
3130 }
3131
3132 fw = (void *)(*raw)->data;
3133
3134 if ((*raw)->size < sizeof(*fw) +
3135 fw->boot_size + fw->ucode_size + fw->fw_size) {
3136 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3137 name, (*raw)->size);
3138 return -EINVAL;
3139 }
3140
3141 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3142 name,
3143 le32_to_cpu(fw->ver) >> 16,
3144 le32_to_cpu(fw->ver) & 0xff,
3145 (*raw)->size - sizeof(*fw));
3146 return 0;
3147 }
3148
3149 #define IPW_RX_BUF_SIZE (3000)
3150
3151 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3152 struct ipw_rx_queue *rxq)
3153 {
3154 unsigned long flags;
3155 int i;
3156
3157 spin_lock_irqsave(&rxq->lock, flags);
3158
3159 INIT_LIST_HEAD(&rxq->rx_free);
3160 INIT_LIST_HEAD(&rxq->rx_used);
3161
3162 /* Fill the rx_used queue with _all_ of the Rx buffers */
3163 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3164 /* In the reset function, these buffers may have been allocated
3165 * to an SKB, so we need to unmap and free potential storage */
3166 if (rxq->pool[i].skb != NULL) {
3167 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3168 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3169 dev_kfree_skb(rxq->pool[i].skb);
3170 rxq->pool[i].skb = NULL;
3171 }
3172 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3173 }
3174
3175 /* Set us so that we have processed and used all buffers, but have
3176 * not restocked the Rx queue with fresh buffers */
3177 rxq->read = rxq->write = 0;
3178 rxq->processed = RX_QUEUE_SIZE - 1;
3179 rxq->free_count = 0;
3180 spin_unlock_irqrestore(&rxq->lock, flags);
3181 }
3182
3183 #ifdef CONFIG_PM
3184 static int fw_loaded = 0;
3185 static const struct firmware *raw = NULL;
3186
3187 static void free_firmware(void)
3188 {
3189 if (fw_loaded) {
3190 release_firmware(raw);
3191 raw = NULL;
3192 fw_loaded = 0;
3193 }
3194 }
3195 #else
3196 #define free_firmware() do {} while (0)
3197 #endif
3198
3199 static int ipw_load(struct ipw_priv *priv)
3200 {
3201 #ifndef CONFIG_PM
3202 const struct firmware *raw = NULL;
3203 #endif
3204 struct ipw_fw *fw;
3205 u8 *boot_img, *ucode_img, *fw_img;
3206 u8 *name = NULL;
3207 int rc = 0, retries = 3;
3208
3209 switch (priv->ieee->iw_mode) {
3210 case IW_MODE_ADHOC:
3211 name = "ipw2200-ibss.fw";
3212 break;
3213 #ifdef CONFIG_IPW2200_MONITOR
3214 case IW_MODE_MONITOR:
3215 name = "ipw2200-sniffer.fw";
3216 break;
3217 #endif
3218 case IW_MODE_INFRA:
3219 name = "ipw2200-bss.fw";
3220 break;
3221 }
3222
3223 if (!name) {
3224 rc = -EINVAL;
3225 goto error;
3226 }
3227
3228 #ifdef CONFIG_PM
3229 if (!fw_loaded) {
3230 #endif
3231 rc = ipw_get_fw(priv, &raw, name);
3232 if (rc < 0)
3233 goto error;
3234 #ifdef CONFIG_PM
3235 }
3236 #endif
3237
3238 fw = (void *)raw->data;
3239 boot_img = &fw->data[0];
3240 ucode_img = &fw->data[fw->boot_size];
3241 fw_img = &fw->data[fw->boot_size + fw->ucode_size];
3242
3243 if (rc < 0)
3244 goto error;
3245
3246 if (!priv->rxq)
3247 priv->rxq = ipw_rx_queue_alloc(priv);
3248 else
3249 ipw_rx_queue_reset(priv, priv->rxq);
3250 if (!priv->rxq) {
3251 IPW_ERROR("Unable to initialize Rx queue\n");
3252 goto error;
3253 }
3254
3255 retry:
3256 /* Ensure interrupts are disabled */
3257 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3258 priv->status &= ~STATUS_INT_ENABLED;
3259
3260 /* ack pending interrupts */
3261 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3262
3263 ipw_stop_nic(priv);
3264
3265 rc = ipw_reset_nic(priv);
3266 if (rc < 0) {
3267 IPW_ERROR("Unable to reset NIC\n");
3268 goto error;
3269 }
3270
3271 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3272 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3273
3274 /* DMA the initial boot firmware into the device */
3275 rc = ipw_load_firmware(priv, boot_img, fw->boot_size);
3276 if (rc < 0) {
3277 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3278 goto error;
3279 }
3280
3281 /* kick start the device */
3282 ipw_start_nic(priv);
3283
3284 /* wait for the device to finish its initial startup sequence */
3285 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3286 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3287 if (rc < 0) {
3288 IPW_ERROR("device failed to boot initial fw image\n");
3289 goto error;
3290 }
3291 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3292
3293 /* ack fw init done interrupt */
3294 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3295
3296 /* DMA the ucode into the device */
3297 rc = ipw_load_ucode(priv, ucode_img, fw->ucode_size);
3298 if (rc < 0) {
3299 IPW_ERROR("Unable to load ucode: %d\n", rc);
3300 goto error;
3301 }
3302
3303 /* stop nic */
3304 ipw_stop_nic(priv);
3305
3306 /* DMA bss firmware into the device */
3307 rc = ipw_load_firmware(priv, fw_img, fw->fw_size);
3308 if (rc < 0) {
3309 IPW_ERROR("Unable to load firmware: %d\n", rc);
3310 goto error;
3311 }
3312 #ifdef CONFIG_PM
3313 fw_loaded = 1;
3314 #endif
3315
3316 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3317
3318 rc = ipw_queue_reset(priv);
3319 if (rc < 0) {
3320 IPW_ERROR("Unable to initialize queues\n");
3321 goto error;
3322 }
3323
3324 /* Ensure interrupts are disabled */
3325 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3326 /* ack pending interrupts */
3327 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3328
3329 /* kick start the device */
3330 ipw_start_nic(priv);
3331
3332 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3333 if (retries > 0) {
3334 IPW_WARNING("Parity error. Retrying init.\n");
3335 retries--;
3336 goto retry;
3337 }
3338
3339 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3340 rc = -EIO;
3341 goto error;
3342 }
3343
3344 /* wait for the device */
3345 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3346 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3347 if (rc < 0) {
3348 IPW_ERROR("device failed to start within 500ms\n");
3349 goto error;
3350 }
3351 IPW_DEBUG_INFO("device response after %dms\n", rc);
3352
3353 /* ack fw init done interrupt */
3354 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3355
3356 /* read eeprom data and initialize the eeprom region of sram */
3357 priv->eeprom_delay = 1;
3358 ipw_eeprom_init_sram(priv);
3359
3360 /* enable interrupts */
3361 ipw_enable_interrupts(priv);
3362
3363 /* Ensure our queue has valid packets */
3364 ipw_rx_queue_replenish(priv);
3365
3366 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3367
3368 /* ack pending interrupts */
3369 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3370
3371 #ifndef CONFIG_PM
3372 release_firmware(raw);
3373 #endif
3374 return 0;
3375
3376 error:
3377 if (priv->rxq) {
3378 ipw_rx_queue_free(priv, priv->rxq);
3379 priv->rxq = NULL;
3380 }
3381 ipw_tx_queue_free(priv);
3382 if (raw)
3383 release_firmware(raw);
3384 #ifdef CONFIG_PM
3385 fw_loaded = 0;
3386 raw = NULL;
3387 #endif
3388
3389 return rc;
3390 }
3391
3392 /**
3393 * DMA services
3394 *
3395 * Theory of operation
3396 *
3397 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3398 * 2 empty entries always kept in the buffer to protect from overflow.
3399 *
3400 * For Tx queue, there are low mark and high mark limits. If, after queuing
3401 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3402 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3403 * Tx queue resumed.
3404 *
3405 * The IPW operates with six queues, one receive queue in the device's
3406 * sram, one transmit queue for sending commands to the device firmware,
3407 * and four transmit queues for data.
3408 *
3409 * The four transmit queues allow for performing quality of service (qos)
3410 * transmissions as per the 802.11 protocol. Currently Linux does not
3411 * provide a mechanism to the user for utilizing prioritized queues, so
3412 * we only utilize the first data transmit queue (queue1).
3413 */
3414
3415 /**
3416 * Driver allocates buffers of this size for Rx
3417 */
3418
3419 static inline int ipw_queue_space(const struct clx2_queue *q)
3420 {
3421 int s = q->last_used - q->first_empty;
3422 if (s <= 0)
3423 s += q->n_bd;
3424 s -= 2; /* keep some reserve to not confuse empty and full situations */
3425 if (s < 0)
3426 s = 0;
3427 return s;
3428 }
3429
3430 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3431 {
3432 return (++index == n_bd) ? 0 : index;
3433 }
3434
3435 /**
3436 * Initialize common DMA queue structure
3437 *
3438 * @param q queue to init
3439 * @param count Number of BD's to allocate. Should be power of 2
3440 * @param read_register Address for 'read' register
3441 * (not offset within BAR, full address)
3442 * @param write_register Address for 'write' register
3443 * (not offset within BAR, full address)
3444 * @param base_register Address for 'base' register
3445 * (not offset within BAR, full address)
3446 * @param size Address for 'size' register
3447 * (not offset within BAR, full address)
3448 */
3449 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3450 int count, u32 read, u32 write, u32 base, u32 size)
3451 {
3452 q->n_bd = count;
3453
3454 q->low_mark = q->n_bd / 4;
3455 if (q->low_mark < 4)
3456 q->low_mark = 4;
3457
3458 q->high_mark = q->n_bd / 8;
3459 if (q->high_mark < 2)
3460 q->high_mark = 2;
3461
3462 q->first_empty = q->last_used = 0;
3463 q->reg_r = read;
3464 q->reg_w = write;
3465
3466 ipw_write32(priv, base, q->dma_addr);
3467 ipw_write32(priv, size, count);
3468 ipw_write32(priv, read, 0);
3469 ipw_write32(priv, write, 0);
3470
3471 _ipw_read32(priv, 0x90);
3472 }
3473
3474 static int ipw_queue_tx_init(struct ipw_priv *priv,
3475 struct clx2_tx_queue *q,
3476 int count, u32 read, u32 write, u32 base, u32 size)
3477 {
3478 struct pci_dev *dev = priv->pci_dev;
3479
3480 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3481 if (!q->txb) {
3482 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3483 return -ENOMEM;
3484 }
3485
3486 q->bd =
3487 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3488 if (!q->bd) {
3489 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3490 sizeof(q->bd[0]) * count);
3491 kfree(q->txb);
3492 q->txb = NULL;
3493 return -ENOMEM;
3494 }
3495
3496 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3497 return 0;
3498 }
3499
3500 /**
3501 * Free one TFD, those at index [txq->q.last_used].
3502 * Do NOT advance any indexes
3503 *
3504 * @param dev
3505 * @param txq
3506 */
3507 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3508 struct clx2_tx_queue *txq)
3509 {
3510 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3511 struct pci_dev *dev = priv->pci_dev;
3512 int i;
3513
3514 /* classify bd */
3515 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3516 /* nothing to cleanup after for host commands */
3517 return;
3518
3519 /* sanity check */
3520 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3521 IPW_ERROR("Too many chunks: %i\n",
3522 le32_to_cpu(bd->u.data.num_chunks));
3523 /** @todo issue fatal error, it is quite serious situation */
3524 return;
3525 }
3526
3527 /* unmap chunks if any */
3528 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3529 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3530 le16_to_cpu(bd->u.data.chunk_len[i]),
3531 PCI_DMA_TODEVICE);
3532 if (txq->txb[txq->q.last_used]) {
3533 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3534 txq->txb[txq->q.last_used] = NULL;
3535 }
3536 }
3537 }
3538
3539 /**
3540 * Deallocate DMA queue.
3541 *
3542 * Empty queue by removing and destroying all BD's.
3543 * Free all buffers.
3544 *
3545 * @param dev
3546 * @param q
3547 */
3548 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3549 {
3550 struct clx2_queue *q = &txq->q;
3551 struct pci_dev *dev = priv->pci_dev;
3552
3553 if (q->n_bd == 0)
3554 return;
3555
3556 /* first, empty all BD's */
3557 for (; q->first_empty != q->last_used;
3558 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3559 ipw_queue_tx_free_tfd(priv, txq);
3560 }
3561
3562 /* free buffers belonging to queue itself */
3563 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3564 q->dma_addr);
3565 kfree(txq->txb);
3566
3567 /* 0 fill whole structure */
3568 memset(txq, 0, sizeof(*txq));
3569 }
3570
3571 /**
3572 * Destroy all DMA queues and structures
3573 *
3574 * @param priv
3575 */
3576 static void ipw_tx_queue_free(struct ipw_priv *priv)
3577 {
3578 /* Tx CMD queue */
3579 ipw_queue_tx_free(priv, &priv->txq_cmd);
3580
3581 /* Tx queues */
3582 ipw_queue_tx_free(priv, &priv->txq[0]);
3583 ipw_queue_tx_free(priv, &priv->txq[1]);
3584 ipw_queue_tx_free(priv, &priv->txq[2]);
3585 ipw_queue_tx_free(priv, &priv->txq[3]);
3586 }
3587
3588 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3589 {
3590 /* First 3 bytes are manufacturer */
3591 bssid[0] = priv->mac_addr[0];
3592 bssid[1] = priv->mac_addr[1];
3593 bssid[2] = priv->mac_addr[2];
3594
3595 /* Last bytes are random */
3596 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3597
3598 bssid[0] &= 0xfe; /* clear multicast bit */
3599 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3600 }
3601
3602 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3603 {
3604 struct ipw_station_entry entry;
3605 int i;
3606
3607 for (i = 0; i < priv->num_stations; i++) {
3608 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3609 /* Another node is active in network */
3610 priv->missed_adhoc_beacons = 0;
3611 if (!(priv->config & CFG_STATIC_CHANNEL))
3612 /* when other nodes drop out, we drop out */
3613 priv->config &= ~CFG_ADHOC_PERSIST;
3614
3615 return i;
3616 }
3617 }
3618
3619 if (i == MAX_STATIONS)
3620 return IPW_INVALID_STATION;
3621
3622 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3623
3624 entry.reserved = 0;
3625 entry.support_mode = 0;
3626 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3627 memcpy(priv->stations[i], bssid, ETH_ALEN);
3628 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3629 &entry, sizeof(entry));
3630 priv->num_stations++;
3631
3632 return i;
3633 }
3634
3635 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3636 {
3637 int i;
3638
3639 for (i = 0; i < priv->num_stations; i++)
3640 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3641 return i;
3642
3643 return IPW_INVALID_STATION;
3644 }
3645
3646 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3647 {
3648 int err;
3649
3650 if (priv->status & STATUS_ASSOCIATING) {
3651 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3652 queue_work(priv->workqueue, &priv->disassociate);
3653 return;
3654 }
3655
3656 if (!(priv->status & STATUS_ASSOCIATED)) {
3657 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3658 return;
3659 }
3660
3661 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3662 "on channel %d.\n",
3663 MAC_ARG(priv->assoc_request.bssid),
3664 priv->assoc_request.channel);
3665
3666 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3667 priv->status |= STATUS_DISASSOCIATING;
3668
3669 if (quiet)
3670 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3671 else
3672 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3673
3674 err = ipw_send_associate(priv, &priv->assoc_request);
3675 if (err) {
3676 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3677 "failed.\n");
3678 return;
3679 }
3680
3681 }
3682
3683 static int ipw_disassociate(void *data)
3684 {
3685 struct ipw_priv *priv = data;
3686 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3687 return 0;
3688 ipw_send_disassociate(data, 0);
3689 return 1;
3690 }
3691
3692 static void ipw_bg_disassociate(void *data)
3693 {
3694 struct ipw_priv *priv = data;
3695 mutex_lock(&priv->mutex);
3696 ipw_disassociate(data);
3697 mutex_unlock(&priv->mutex);
3698 }
3699
3700 static void ipw_system_config(void *data)
3701 {
3702 struct ipw_priv *priv = data;
3703 ipw_send_system_config(priv, &priv->sys_config);
3704 }
3705
3706 struct ipw_status_code {
3707 u16 status;
3708 const char *reason;
3709 };
3710
3711 static const struct ipw_status_code ipw_status_codes[] = {
3712 {0x00, "Successful"},
3713 {0x01, "Unspecified failure"},
3714 {0x0A, "Cannot support all requested capabilities in the "
3715 "Capability information field"},
3716 {0x0B, "Reassociation denied due to inability to confirm that "
3717 "association exists"},
3718 {0x0C, "Association denied due to reason outside the scope of this "
3719 "standard"},
3720 {0x0D,
3721 "Responding station does not support the specified authentication "
3722 "algorithm"},
3723 {0x0E,
3724 "Received an Authentication frame with authentication sequence "
3725 "transaction sequence number out of expected sequence"},
3726 {0x0F, "Authentication rejected because of challenge failure"},
3727 {0x10, "Authentication rejected due to timeout waiting for next "
3728 "frame in sequence"},
3729 {0x11, "Association denied because AP is unable to handle additional "
3730 "associated stations"},
3731 {0x12,
3732 "Association denied due to requesting station not supporting all "
3733 "of the datarates in the BSSBasicServiceSet Parameter"},
3734 {0x13,
3735 "Association denied due to requesting station not supporting "
3736 "short preamble operation"},
3737 {0x14,
3738 "Association denied due to requesting station not supporting "
3739 "PBCC encoding"},
3740 {0x15,
3741 "Association denied due to requesting station not supporting "
3742 "channel agility"},
3743 {0x19,
3744 "Association denied due to requesting station not supporting "
3745 "short slot operation"},
3746 {0x1A,
3747 "Association denied due to requesting station not supporting "
3748 "DSSS-OFDM operation"},
3749 {0x28, "Invalid Information Element"},
3750 {0x29, "Group Cipher is not valid"},
3751 {0x2A, "Pairwise Cipher is not valid"},
3752 {0x2B, "AKMP is not valid"},
3753 {0x2C, "Unsupported RSN IE version"},
3754 {0x2D, "Invalid RSN IE Capabilities"},
3755 {0x2E, "Cipher suite is rejected per security policy"},
3756 };
3757
3758 #ifdef CONFIG_IPW2200_DEBUG
3759 static const char *ipw_get_status_code(u16 status)
3760 {
3761 int i;
3762 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3763 if (ipw_status_codes[i].status == (status & 0xff))
3764 return ipw_status_codes[i].reason;
3765 return "Unknown status value.";
3766 }
3767 #endif
3768
3769 static void inline average_init(struct average *avg)
3770 {
3771 memset(avg, 0, sizeof(*avg));
3772 }
3773
3774 static void average_add(struct average *avg, s16 val)
3775 {
3776 avg->sum -= avg->entries[avg->pos];
3777 avg->sum += val;
3778 avg->entries[avg->pos++] = val;
3779 if (unlikely(avg->pos == AVG_ENTRIES)) {
3780 avg->init = 1;
3781 avg->pos = 0;
3782 }
3783 }
3784
3785 static s16 average_value(struct average *avg)
3786 {
3787 if (!unlikely(avg->init)) {
3788 if (avg->pos)
3789 return avg->sum / avg->pos;
3790 return 0;
3791 }
3792
3793 return avg->sum / AVG_ENTRIES;
3794 }
3795
3796 static void ipw_reset_stats(struct ipw_priv *priv)
3797 {
3798 u32 len = sizeof(u32);
3799
3800 priv->quality = 0;
3801
3802 average_init(&priv->average_missed_beacons);
3803 average_init(&priv->average_rssi);
3804 average_init(&priv->average_noise);
3805
3806 priv->last_rate = 0;
3807 priv->last_missed_beacons = 0;
3808 priv->last_rx_packets = 0;
3809 priv->last_tx_packets = 0;
3810 priv->last_tx_failures = 0;
3811
3812 /* Firmware managed, reset only when NIC is restarted, so we have to
3813 * normalize on the current value */
3814 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3815 &priv->last_rx_err, &len);
3816 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3817 &priv->last_tx_failures, &len);
3818
3819 /* Driver managed, reset with each association */
3820 priv->missed_adhoc_beacons = 0;
3821 priv->missed_beacons = 0;
3822 priv->tx_packets = 0;
3823 priv->rx_packets = 0;
3824
3825 }
3826
3827 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3828 {
3829 u32 i = 0x80000000;
3830 u32 mask = priv->rates_mask;
3831 /* If currently associated in B mode, restrict the maximum
3832 * rate match to B rates */
3833 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3834 mask &= IEEE80211_CCK_RATES_MASK;
3835
3836 /* TODO: Verify that the rate is supported by the current rates
3837 * list. */
3838
3839 while (i && !(mask & i))
3840 i >>= 1;
3841 switch (i) {
3842 case IEEE80211_CCK_RATE_1MB_MASK:
3843 return 1000000;
3844 case IEEE80211_CCK_RATE_2MB_MASK:
3845 return 2000000;
3846 case IEEE80211_CCK_RATE_5MB_MASK:
3847 return 5500000;
3848 case IEEE80211_OFDM_RATE_6MB_MASK:
3849 return 6000000;
3850 case IEEE80211_OFDM_RATE_9MB_MASK:
3851 return 9000000;
3852 case IEEE80211_CCK_RATE_11MB_MASK:
3853 return 11000000;
3854 case IEEE80211_OFDM_RATE_12MB_MASK:
3855 return 12000000;
3856 case IEEE80211_OFDM_RATE_18MB_MASK:
3857 return 18000000;
3858 case IEEE80211_OFDM_RATE_24MB_MASK:
3859 return 24000000;
3860 case IEEE80211_OFDM_RATE_36MB_MASK:
3861 return 36000000;
3862 case IEEE80211_OFDM_RATE_48MB_MASK:
3863 return 48000000;
3864 case IEEE80211_OFDM_RATE_54MB_MASK:
3865 return 54000000;
3866 }
3867
3868 if (priv->ieee->mode == IEEE_B)
3869 return 11000000;
3870 else
3871 return 54000000;
3872 }
3873
3874 static u32 ipw_get_current_rate(struct ipw_priv *priv)
3875 {
3876 u32 rate, len = sizeof(rate);
3877 int err;
3878
3879 if (!(priv->status & STATUS_ASSOCIATED))
3880 return 0;
3881
3882 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3883 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3884 &len);
3885 if (err) {
3886 IPW_DEBUG_INFO("failed querying ordinals.\n");
3887 return 0;
3888 }
3889 } else
3890 return ipw_get_max_rate(priv);
3891
3892 switch (rate) {
3893 case IPW_TX_RATE_1MB:
3894 return 1000000;
3895 case IPW_TX_RATE_2MB:
3896 return 2000000;
3897 case IPW_TX_RATE_5MB:
3898 return 5500000;
3899 case IPW_TX_RATE_6MB:
3900 return 6000000;
3901 case IPW_TX_RATE_9MB:
3902 return 9000000;
3903 case IPW_TX_RATE_11MB:
3904 return 11000000;
3905 case IPW_TX_RATE_12MB:
3906 return 12000000;
3907 case IPW_TX_RATE_18MB:
3908 return 18000000;
3909 case IPW_TX_RATE_24MB:
3910 return 24000000;
3911 case IPW_TX_RATE_36MB:
3912 return 36000000;
3913 case IPW_TX_RATE_48MB:
3914 return 48000000;
3915 case IPW_TX_RATE_54MB:
3916 return 54000000;
3917 }
3918
3919 return 0;
3920 }
3921
3922 #define IPW_STATS_INTERVAL (2 * HZ)
3923 static void ipw_gather_stats(struct ipw_priv *priv)
3924 {
3925 u32 rx_err, rx_err_delta, rx_packets_delta;
3926 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3927 u32 missed_beacons_percent, missed_beacons_delta;
3928 u32 quality = 0;
3929 u32 len = sizeof(u32);
3930 s16 rssi;
3931 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3932 rate_quality;
3933 u32 max_rate;
3934
3935 if (!(priv->status & STATUS_ASSOCIATED)) {
3936 priv->quality = 0;
3937 return;
3938 }
3939
3940 /* Update the statistics */
3941 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3942 &priv->missed_beacons, &len);
3943 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
3944 priv->last_missed_beacons = priv->missed_beacons;
3945 if (priv->assoc_request.beacon_interval) {
3946 missed_beacons_percent = missed_beacons_delta *
3947 (HZ * priv->assoc_request.beacon_interval) /
3948 (IPW_STATS_INTERVAL * 10);
3949 } else {
3950 missed_beacons_percent = 0;
3951 }
3952 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3953
3954 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3955 rx_err_delta = rx_err - priv->last_rx_err;
3956 priv->last_rx_err = rx_err;
3957
3958 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
3959 tx_failures_delta = tx_failures - priv->last_tx_failures;
3960 priv->last_tx_failures = tx_failures;
3961
3962 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
3963 priv->last_rx_packets = priv->rx_packets;
3964
3965 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
3966 priv->last_tx_packets = priv->tx_packets;
3967
3968 /* Calculate quality based on the following:
3969 *
3970 * Missed beacon: 100% = 0, 0% = 70% missed
3971 * Rate: 60% = 1Mbs, 100% = Max
3972 * Rx and Tx errors represent a straight % of total Rx/Tx
3973 * RSSI: 100% = > -50, 0% = < -80
3974 * Rx errors: 100% = 0, 0% = 50% missed
3975 *
3976 * The lowest computed quality is used.
3977 *
3978 */
3979 #define BEACON_THRESHOLD 5
3980 beacon_quality = 100 - missed_beacons_percent;
3981 if (beacon_quality < BEACON_THRESHOLD)
3982 beacon_quality = 0;
3983 else
3984 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
3985 (100 - BEACON_THRESHOLD);
3986 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
3987 beacon_quality, missed_beacons_percent);
3988
3989 priv->last_rate = ipw_get_current_rate(priv);
3990 max_rate = ipw_get_max_rate(priv);
3991 rate_quality = priv->last_rate * 40 / max_rate + 60;
3992 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
3993 rate_quality, priv->last_rate / 1000000);
3994
3995 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
3996 rx_quality = 100 - (rx_err_delta * 100) /
3997 (rx_packets_delta + rx_err_delta);
3998 else
3999 rx_quality = 100;
4000 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4001 rx_quality, rx_err_delta, rx_packets_delta);
4002
4003 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4004 tx_quality = 100 - (tx_failures_delta * 100) /
4005 (tx_packets_delta + tx_failures_delta);
4006 else
4007 tx_quality = 100;
4008 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4009 tx_quality, tx_failures_delta, tx_packets_delta);
4010
4011 rssi = average_value(&priv->average_rssi);
4012 signal_quality =
4013 (100 *
4014 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4015 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4016 (priv->ieee->perfect_rssi - rssi) *
4017 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4018 62 * (priv->ieee->perfect_rssi - rssi))) /
4019 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4020 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4021 if (signal_quality > 100)
4022 signal_quality = 100;
4023 else if (signal_quality < 1)
4024 signal_quality = 0;
4025
4026 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4027 signal_quality, rssi);
4028
4029 quality = min(beacon_quality,
4030 min(rate_quality,
4031 min(tx_quality, min(rx_quality, signal_quality))));
4032 if (quality == beacon_quality)
4033 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4034 quality);
4035 if (quality == rate_quality)
4036 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4037 quality);
4038 if (quality == tx_quality)
4039 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4040 quality);
4041 if (quality == rx_quality)
4042 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4043 quality);
4044 if (quality == signal_quality)
4045 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4046 quality);
4047
4048 priv->quality = quality;
4049
4050 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4051 IPW_STATS_INTERVAL);
4052 }
4053
4054 static void ipw_bg_gather_stats(void *data)
4055 {
4056 struct ipw_priv *priv = data;
4057 mutex_lock(&priv->mutex);
4058 ipw_gather_stats(data);
4059 mutex_unlock(&priv->mutex);
4060 }
4061
4062 /* Missed beacon behavior:
4063 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4064 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4065 * Above disassociate threshold, give up and stop scanning.
4066 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4067 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4068 int missed_count)
4069 {
4070 priv->notif_missed_beacons = missed_count;
4071
4072 if (missed_count > priv->disassociate_threshold &&
4073 priv->status & STATUS_ASSOCIATED) {
4074 /* If associated and we've hit the missed
4075 * beacon threshold, disassociate, turn
4076 * off roaming, and abort any active scans */
4077 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4078 IPW_DL_STATE | IPW_DL_ASSOC,
4079 "Missed beacon: %d - disassociate\n", missed_count);
4080 priv->status &= ~STATUS_ROAMING;
4081 if (priv->status & STATUS_SCANNING) {
4082 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4083 IPW_DL_STATE,
4084 "Aborting scan with missed beacon.\n");
4085 queue_work(priv->workqueue, &priv->abort_scan);
4086 }
4087
4088 queue_work(priv->workqueue, &priv->disassociate);
4089 return;
4090 }
4091
4092 if (priv->status & STATUS_ROAMING) {
4093 /* If we are currently roaming, then just
4094 * print a debug statement... */
4095 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4096 "Missed beacon: %d - roam in progress\n",
4097 missed_count);
4098 return;
4099 }
4100
4101 if (roaming &&
4102 (missed_count > priv->roaming_threshold &&
4103 missed_count <= priv->disassociate_threshold)) {
4104 /* If we are not already roaming, set the ROAM
4105 * bit in the status and kick off a scan.
4106 * This can happen several times before we reach
4107 * disassociate_threshold. */
4108 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4109 "Missed beacon: %d - initiate "
4110 "roaming\n", missed_count);
4111 if (!(priv->status & STATUS_ROAMING)) {
4112 priv->status |= STATUS_ROAMING;
4113 if (!(priv->status & STATUS_SCANNING))
4114 queue_work(priv->workqueue,
4115 &priv->request_scan);
4116 }
4117 return;
4118 }
4119
4120 if (priv->status & STATUS_SCANNING) {
4121 /* Stop scan to keep fw from getting
4122 * stuck (only if we aren't roaming --
4123 * otherwise we'll never scan more than 2 or 3
4124 * channels..) */
4125 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4126 "Aborting scan with missed beacon.\n");
4127 queue_work(priv->workqueue, &priv->abort_scan);
4128 }
4129
4130 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4131 }
4132
4133 /**
4134 * Handle host notification packet.
4135 * Called from interrupt routine
4136 */
4137 static void ipw_rx_notification(struct ipw_priv *priv,
4138 struct ipw_rx_notification *notif)
4139 {
4140 notif->size = le16_to_cpu(notif->size);
4141
4142 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4143
4144 switch (notif->subtype) {
4145 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4146 struct notif_association *assoc = &notif->u.assoc;
4147
4148 switch (assoc->state) {
4149 case CMAS_ASSOCIATED:{
4150 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4151 IPW_DL_ASSOC,
4152 "associated: '%s' " MAC_FMT
4153 " \n",
4154 escape_essid(priv->essid,
4155 priv->essid_len),
4156 MAC_ARG(priv->bssid));
4157
4158 switch (priv->ieee->iw_mode) {
4159 case IW_MODE_INFRA:
4160 memcpy(priv->ieee->bssid,
4161 priv->bssid, ETH_ALEN);
4162 break;
4163
4164 case IW_MODE_ADHOC:
4165 memcpy(priv->ieee->bssid,
4166 priv->bssid, ETH_ALEN);
4167
4168 /* clear out the station table */
4169 priv->num_stations = 0;
4170
4171 IPW_DEBUG_ASSOC
4172 ("queueing adhoc check\n");
4173 queue_delayed_work(priv->
4174 workqueue,
4175 &priv->
4176 adhoc_check,
4177 priv->
4178 assoc_request.
4179 beacon_interval);
4180 break;
4181 }
4182
4183 priv->status &= ~STATUS_ASSOCIATING;
4184 priv->status |= STATUS_ASSOCIATED;
4185 queue_work(priv->workqueue,
4186 &priv->system_config);
4187
4188 #ifdef CONFIG_IPW_QOS
4189 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4190 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4191 if ((priv->status & STATUS_AUTH) &&
4192 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4193 == IEEE80211_STYPE_ASSOC_RESP)) {
4194 if ((sizeof
4195 (struct
4196 ieee80211_assoc_response)
4197 <= notif->size)
4198 && (notif->size <= 2314)) {
4199 struct
4200 ieee80211_rx_stats
4201 stats = {
4202 .len =
4203 notif->
4204 size - 1,
4205 };
4206
4207 IPW_DEBUG_QOS
4208 ("QoS Associate "
4209 "size %d\n",
4210 notif->size);
4211 ieee80211_rx_mgt(priv->
4212 ieee,
4213 (struct
4214 ieee80211_hdr_4addr
4215 *)
4216 &notif->u.raw, &stats);
4217 }
4218 }
4219 #endif
4220
4221 schedule_work(&priv->link_up);
4222
4223 break;
4224 }
4225
4226 case CMAS_AUTHENTICATED:{
4227 if (priv->
4228 status & (STATUS_ASSOCIATED |
4229 STATUS_AUTH)) {
4230 #ifdef CONFIG_IPW2200_DEBUG
4231 struct notif_authenticate *auth
4232 = &notif->u.auth;
4233 IPW_DEBUG(IPW_DL_NOTIF |
4234 IPW_DL_STATE |
4235 IPW_DL_ASSOC,
4236 "deauthenticated: '%s' "
4237 MAC_FMT
4238 ": (0x%04X) - %s \n",
4239 escape_essid(priv->
4240 essid,
4241 priv->
4242 essid_len),
4243 MAC_ARG(priv->bssid),
4244 ntohs(auth->status),
4245 ipw_get_status_code
4246 (ntohs
4247 (auth->status)));
4248 #endif
4249
4250 priv->status &=
4251 ~(STATUS_ASSOCIATING |
4252 STATUS_AUTH |
4253 STATUS_ASSOCIATED);
4254
4255 schedule_work(&priv->link_down);
4256 break;
4257 }
4258
4259 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4260 IPW_DL_ASSOC,
4261 "authenticated: '%s' " MAC_FMT
4262 "\n",
4263 escape_essid(priv->essid,
4264 priv->essid_len),
4265 MAC_ARG(priv->bssid));
4266 break;
4267 }
4268
4269 case CMAS_INIT:{
4270 if (priv->status & STATUS_AUTH) {
4271 struct
4272 ieee80211_assoc_response
4273 *resp;
4274 resp =
4275 (struct
4276 ieee80211_assoc_response
4277 *)&notif->u.raw;
4278 IPW_DEBUG(IPW_DL_NOTIF |
4279 IPW_DL_STATE |
4280 IPW_DL_ASSOC,
4281 "association failed (0x%04X): %s\n",
4282 ntohs(resp->status),
4283 ipw_get_status_code
4284 (ntohs
4285 (resp->status)));
4286 }
4287
4288 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4289 IPW_DL_ASSOC,
4290 "disassociated: '%s' " MAC_FMT
4291 " \n",
4292 escape_essid(priv->essid,
4293 priv->essid_len),
4294 MAC_ARG(priv->bssid));
4295
4296 priv->status &=
4297 ~(STATUS_DISASSOCIATING |
4298 STATUS_ASSOCIATING |
4299 STATUS_ASSOCIATED | STATUS_AUTH);
4300 if (priv->assoc_network
4301 && (priv->assoc_network->
4302 capability &
4303 WLAN_CAPABILITY_IBSS))
4304 ipw_remove_current_network
4305 (priv);
4306
4307 schedule_work(&priv->link_down);
4308
4309 break;
4310 }
4311
4312 case CMAS_RX_ASSOC_RESP:
4313 break;
4314
4315 default:
4316 IPW_ERROR("assoc: unknown (%d)\n",
4317 assoc->state);
4318 break;
4319 }
4320
4321 break;
4322 }
4323
4324 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4325 struct notif_authenticate *auth = &notif->u.auth;
4326 switch (auth->state) {
4327 case CMAS_AUTHENTICATED:
4328 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4329 "authenticated: '%s' " MAC_FMT " \n",
4330 escape_essid(priv->essid,
4331 priv->essid_len),
4332 MAC_ARG(priv->bssid));
4333 priv->status |= STATUS_AUTH;
4334 break;
4335
4336 case CMAS_INIT:
4337 if (priv->status & STATUS_AUTH) {
4338 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4339 IPW_DL_ASSOC,
4340 "authentication failed (0x%04X): %s\n",
4341 ntohs(auth->status),
4342 ipw_get_status_code(ntohs
4343 (auth->
4344 status)));
4345 }
4346 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4347 IPW_DL_ASSOC,
4348 "deauthenticated: '%s' " MAC_FMT "\n",
4349 escape_essid(priv->essid,
4350 priv->essid_len),
4351 MAC_ARG(priv->bssid));
4352
4353 priv->status &= ~(STATUS_ASSOCIATING |
4354 STATUS_AUTH |
4355 STATUS_ASSOCIATED);
4356
4357 schedule_work(&priv->link_down);
4358 break;
4359
4360 case CMAS_TX_AUTH_SEQ_1:
4361 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4362 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4363 break;
4364 case CMAS_RX_AUTH_SEQ_2:
4365 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4366 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4367 break;
4368 case CMAS_AUTH_SEQ_1_PASS:
4369 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4370 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4371 break;
4372 case CMAS_AUTH_SEQ_1_FAIL:
4373 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4374 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4375 break;
4376 case CMAS_TX_AUTH_SEQ_3:
4377 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4378 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4379 break;
4380 case CMAS_RX_AUTH_SEQ_4:
4381 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4382 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4383 break;
4384 case CMAS_AUTH_SEQ_2_PASS:
4385 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4386 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4387 break;
4388 case CMAS_AUTH_SEQ_2_FAIL:
4389 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4390 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4391 break;
4392 case CMAS_TX_ASSOC:
4393 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4394 IPW_DL_ASSOC, "TX_ASSOC\n");
4395 break;
4396 case CMAS_RX_ASSOC_RESP:
4397 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4398 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4399
4400 break;
4401 case CMAS_ASSOCIATED:
4402 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4403 IPW_DL_ASSOC, "ASSOCIATED\n");
4404 break;
4405 default:
4406 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4407 auth->state);
4408 break;
4409 }
4410 break;
4411 }
4412
4413 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4414 struct notif_channel_result *x =
4415 &notif->u.channel_result;
4416
4417 if (notif->size == sizeof(*x)) {
4418 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4419 x->channel_num);
4420 } else {
4421 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4422 "(should be %zd)\n",
4423 notif->size, sizeof(*x));
4424 }
4425 break;
4426 }
4427
4428 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4429 struct notif_scan_complete *x = &notif->u.scan_complete;
4430 if (notif->size == sizeof(*x)) {
4431 IPW_DEBUG_SCAN
4432 ("Scan completed: type %d, %d channels, "
4433 "%d status\n", x->scan_type,
4434 x->num_channels, x->status);
4435 } else {
4436 IPW_ERROR("Scan completed of wrong size %d "
4437 "(should be %zd)\n",
4438 notif->size, sizeof(*x));
4439 }
4440
4441 priv->status &=
4442 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4443
4444 wake_up_interruptible(&priv->wait_state);
4445 cancel_delayed_work(&priv->scan_check);
4446
4447 if (priv->status & STATUS_EXIT_PENDING)
4448 break;
4449
4450 priv->ieee->scans++;
4451
4452 #ifdef CONFIG_IPW2200_MONITOR
4453 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4454 priv->status |= STATUS_SCAN_FORCED;
4455 queue_work(priv->workqueue,
4456 &priv->request_scan);
4457 break;
4458 }
4459 priv->status &= ~STATUS_SCAN_FORCED;
4460 #endif /* CONFIG_IPW2200_MONITOR */
4461
4462 if (!(priv->status & (STATUS_ASSOCIATED |
4463 STATUS_ASSOCIATING |
4464 STATUS_ROAMING |
4465 STATUS_DISASSOCIATING)))
4466 queue_work(priv->workqueue, &priv->associate);
4467 else if (priv->status & STATUS_ROAMING) {
4468 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4469 /* If a scan completed and we are in roam mode, then
4470 * the scan that completed was the one requested as a
4471 * result of entering roam... so, schedule the
4472 * roam work */
4473 queue_work(priv->workqueue,
4474 &priv->roam);
4475 else
4476 /* Don't schedule if we aborted the scan */
4477 priv->status &= ~STATUS_ROAMING;
4478 } else if (priv->status & STATUS_SCAN_PENDING)
4479 queue_work(priv->workqueue,
4480 &priv->request_scan);
4481 else if (priv->config & CFG_BACKGROUND_SCAN
4482 && priv->status & STATUS_ASSOCIATED)
4483 queue_delayed_work(priv->workqueue,
4484 &priv->request_scan, HZ);
4485 break;
4486 }
4487
4488 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4489 struct notif_frag_length *x = &notif->u.frag_len;
4490
4491 if (notif->size == sizeof(*x))
4492 IPW_ERROR("Frag length: %d\n",
4493 le16_to_cpu(x->frag_length));
4494 else
4495 IPW_ERROR("Frag length of wrong size %d "
4496 "(should be %zd)\n",
4497 notif->size, sizeof(*x));
4498 break;
4499 }
4500
4501 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4502 struct notif_link_deterioration *x =
4503 &notif->u.link_deterioration;
4504
4505 if (notif->size == sizeof(*x)) {
4506 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4507 "link deterioration: type %d, cnt %d\n",
4508 x->silence_notification_type,
4509 x->silence_count);
4510 memcpy(&priv->last_link_deterioration, x,
4511 sizeof(*x));
4512 } else {
4513 IPW_ERROR("Link Deterioration of wrong size %d "
4514 "(should be %zd)\n",
4515 notif->size, sizeof(*x));
4516 }
4517 break;
4518 }
4519
4520 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4521 IPW_ERROR("Dino config\n");
4522 if (priv->hcmd
4523 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4524 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4525
4526 break;
4527 }
4528
4529 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4530 struct notif_beacon_state *x = &notif->u.beacon_state;
4531 if (notif->size != sizeof(*x)) {
4532 IPW_ERROR
4533 ("Beacon state of wrong size %d (should "
4534 "be %zd)\n", notif->size, sizeof(*x));
4535 break;
4536 }
4537
4538 if (le32_to_cpu(x->state) ==
4539 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4540 ipw_handle_missed_beacon(priv,
4541 le32_to_cpu(x->
4542 number));
4543
4544 break;
4545 }
4546
4547 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4548 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4549 if (notif->size == sizeof(*x)) {
4550 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4551 "0x%02x station %d\n",
4552 x->key_state, x->security_type,
4553 x->station_index);
4554 break;
4555 }
4556
4557 IPW_ERROR
4558 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4559 notif->size, sizeof(*x));
4560 break;
4561 }
4562
4563 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4564 struct notif_calibration *x = &notif->u.calibration;
4565
4566 if (notif->size == sizeof(*x)) {
4567 memcpy(&priv->calib, x, sizeof(*x));
4568 IPW_DEBUG_INFO("TODO: Calibration\n");
4569 break;
4570 }
4571
4572 IPW_ERROR
4573 ("Calibration of wrong size %d (should be %zd)\n",
4574 notif->size, sizeof(*x));
4575 break;
4576 }
4577
4578 case HOST_NOTIFICATION_NOISE_STATS:{
4579 if (notif->size == sizeof(u32)) {
4580 priv->last_noise =
4581 (u8) (le32_to_cpu(notif->u.noise.value) &
4582 0xff);
4583 average_add(&priv->average_noise,
4584 priv->last_noise);
4585 break;
4586 }
4587
4588 IPW_ERROR
4589 ("Noise stat is wrong size %d (should be %zd)\n",
4590 notif->size, sizeof(u32));
4591 break;
4592 }
4593
4594 default:
4595 IPW_DEBUG_NOTIF("Unknown notification: "
4596 "subtype=%d,flags=0x%2x,size=%d\n",
4597 notif->subtype, notif->flags, notif->size);
4598 }
4599 }
4600
4601 /**
4602 * Destroys all DMA structures and initialise them again
4603 *
4604 * @param priv
4605 * @return error code
4606 */
4607 static int ipw_queue_reset(struct ipw_priv *priv)
4608 {
4609 int rc = 0;
4610 /** @todo customize queue sizes */
4611 int nTx = 64, nTxCmd = 8;
4612 ipw_tx_queue_free(priv);
4613 /* Tx CMD queue */
4614 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4615 IPW_TX_CMD_QUEUE_READ_INDEX,
4616 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4617 IPW_TX_CMD_QUEUE_BD_BASE,
4618 IPW_TX_CMD_QUEUE_BD_SIZE);
4619 if (rc) {
4620 IPW_ERROR("Tx Cmd queue init failed\n");
4621 goto error;
4622 }
4623 /* Tx queue(s) */
4624 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4625 IPW_TX_QUEUE_0_READ_INDEX,
4626 IPW_TX_QUEUE_0_WRITE_INDEX,
4627 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4628 if (rc) {
4629 IPW_ERROR("Tx 0 queue init failed\n");
4630 goto error;
4631 }
4632 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4633 IPW_TX_QUEUE_1_READ_INDEX,
4634 IPW_TX_QUEUE_1_WRITE_INDEX,
4635 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4636 if (rc) {
4637 IPW_ERROR("Tx 1 queue init failed\n");
4638 goto error;
4639 }
4640 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4641 IPW_TX_QUEUE_2_READ_INDEX,
4642 IPW_TX_QUEUE_2_WRITE_INDEX,
4643 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4644 if (rc) {
4645 IPW_ERROR("Tx 2 queue init failed\n");
4646 goto error;
4647 }
4648 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4649 IPW_TX_QUEUE_3_READ_INDEX,
4650 IPW_TX_QUEUE_3_WRITE_INDEX,
4651 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4652 if (rc) {
4653 IPW_ERROR("Tx 3 queue init failed\n");
4654 goto error;
4655 }
4656 /* statistics */
4657 priv->rx_bufs_min = 0;
4658 priv->rx_pend_max = 0;
4659 return rc;
4660
4661 error:
4662 ipw_tx_queue_free(priv);
4663 return rc;
4664 }
4665
4666 /**
4667 * Reclaim Tx queue entries no more used by NIC.
4668 *
4669 * When FW adwances 'R' index, all entries between old and
4670 * new 'R' index need to be reclaimed. As result, some free space
4671 * forms. If there is enough free space (> low mark), wake Tx queue.
4672 *
4673 * @note Need to protect against garbage in 'R' index
4674 * @param priv
4675 * @param txq
4676 * @param qindex
4677 * @return Number of used entries remains in the queue
4678 */
4679 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4680 struct clx2_tx_queue *txq, int qindex)
4681 {
4682 u32 hw_tail;
4683 int used;
4684 struct clx2_queue *q = &txq->q;
4685
4686 hw_tail = ipw_read32(priv, q->reg_r);
4687 if (hw_tail >= q->n_bd) {
4688 IPW_ERROR
4689 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4690 hw_tail, q->n_bd);
4691 goto done;
4692 }
4693 for (; q->last_used != hw_tail;
4694 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4695 ipw_queue_tx_free_tfd(priv, txq);
4696 priv->tx_packets++;
4697 }
4698 done:
4699 if ((ipw_queue_space(q) > q->low_mark) &&
4700 (qindex >= 0) &&
4701 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4702 netif_wake_queue(priv->net_dev);
4703 used = q->first_empty - q->last_used;
4704 if (used < 0)
4705 used += q->n_bd;
4706
4707 return used;
4708 }
4709
4710 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4711 int len, int sync)
4712 {
4713 struct clx2_tx_queue *txq = &priv->txq_cmd;
4714 struct clx2_queue *q = &txq->q;
4715 struct tfd_frame *tfd;
4716
4717 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4718 IPW_ERROR("No space for Tx\n");
4719 return -EBUSY;
4720 }
4721
4722 tfd = &txq->bd[q->first_empty];
4723 txq->txb[q->first_empty] = NULL;
4724
4725 memset(tfd, 0, sizeof(*tfd));
4726 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4727 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4728 priv->hcmd_seq++;
4729 tfd->u.cmd.index = hcmd;
4730 tfd->u.cmd.length = len;
4731 memcpy(tfd->u.cmd.payload, buf, len);
4732 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4733 ipw_write32(priv, q->reg_w, q->first_empty);
4734 _ipw_read32(priv, 0x90);
4735
4736 return 0;
4737 }
4738
4739 /*
4740 * Rx theory of operation
4741 *
4742 * The host allocates 32 DMA target addresses and passes the host address
4743 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4744 * 0 to 31
4745 *
4746 * Rx Queue Indexes
4747 * The host/firmware share two index registers for managing the Rx buffers.
4748 *
4749 * The READ index maps to the first position that the firmware may be writing
4750 * to -- the driver can read up to (but not including) this position and get
4751 * good data.
4752 * The READ index is managed by the firmware once the card is enabled.
4753 *
4754 * The WRITE index maps to the last position the driver has read from -- the
4755 * position preceding WRITE is the last slot the firmware can place a packet.
4756 *
4757 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4758 * WRITE = READ.
4759 *
4760 * During initialization the host sets up the READ queue position to the first
4761 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4762 *
4763 * When the firmware places a packet in a buffer it will advance the READ index
4764 * and fire the RX interrupt. The driver can then query the READ index and
4765 * process as many packets as possible, moving the WRITE index forward as it
4766 * resets the Rx queue buffers with new memory.
4767 *
4768 * The management in the driver is as follows:
4769 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4770 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4771 * to replensish the ipw->rxq->rx_free.
4772 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4773 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4774 * 'processed' and 'read' driver indexes as well)
4775 * + A received packet is processed and handed to the kernel network stack,
4776 * detached from the ipw->rxq. The driver 'processed' index is updated.
4777 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4778 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4779 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4780 * were enough free buffers and RX_STALLED is set it is cleared.
4781 *
4782 *
4783 * Driver sequence:
4784 *
4785 * ipw_rx_queue_alloc() Allocates rx_free
4786 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4787 * ipw_rx_queue_restock
4788 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4789 * queue, updates firmware pointers, and updates
4790 * the WRITE index. If insufficient rx_free buffers
4791 * are available, schedules ipw_rx_queue_replenish
4792 *
4793 * -- enable interrupts --
4794 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4795 * READ INDEX, detaching the SKB from the pool.
4796 * Moves the packet buffer from queue to rx_used.
4797 * Calls ipw_rx_queue_restock to refill any empty
4798 * slots.
4799 * ...
4800 *
4801 */
4802
4803 /*
4804 * If there are slots in the RX queue that need to be restocked,
4805 * and we have free pre-allocated buffers, fill the ranks as much
4806 * as we can pulling from rx_free.
4807 *
4808 * This moves the 'write' index forward to catch up with 'processed', and
4809 * also updates the memory address in the firmware to reference the new
4810 * target buffer.
4811 */
4812 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4813 {
4814 struct ipw_rx_queue *rxq = priv->rxq;
4815 struct list_head *element;
4816 struct ipw_rx_mem_buffer *rxb;
4817 unsigned long flags;
4818 int write;
4819
4820 spin_lock_irqsave(&rxq->lock, flags);
4821 write = rxq->write;
4822 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4823 element = rxq->rx_free.next;
4824 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4825 list_del(element);
4826
4827 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
4828 rxb->dma_addr);
4829 rxq->queue[rxq->write] = rxb;
4830 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
4831 rxq->free_count--;
4832 }
4833 spin_unlock_irqrestore(&rxq->lock, flags);
4834
4835 /* If the pre-allocated buffer pool is dropping low, schedule to
4836 * refill it */
4837 if (rxq->free_count <= RX_LOW_WATERMARK)
4838 queue_work(priv->workqueue, &priv->rx_replenish);
4839
4840 /* If we've added more space for the firmware to place data, tell it */
4841 if (write != rxq->write)
4842 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
4843 }
4844
4845 /*
4846 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
4847 * Also restock the Rx queue via ipw_rx_queue_restock.
4848 *
4849 * This is called as a scheduled work item (except for during intialization)
4850 */
4851 static void ipw_rx_queue_replenish(void *data)
4852 {
4853 struct ipw_priv *priv = data;
4854 struct ipw_rx_queue *rxq = priv->rxq;
4855 struct list_head *element;
4856 struct ipw_rx_mem_buffer *rxb;
4857 unsigned long flags;
4858
4859 spin_lock_irqsave(&rxq->lock, flags);
4860 while (!list_empty(&rxq->rx_used)) {
4861 element = rxq->rx_used.next;
4862 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4863 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
4864 if (!rxb->skb) {
4865 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
4866 priv->net_dev->name);
4867 /* We don't reschedule replenish work here -- we will
4868 * call the restock method and if it still needs
4869 * more buffers it will schedule replenish */
4870 break;
4871 }
4872 list_del(element);
4873
4874 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
4875 rxb->dma_addr =
4876 pci_map_single(priv->pci_dev, rxb->skb->data,
4877 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4878
4879 list_add_tail(&rxb->list, &rxq->rx_free);
4880 rxq->free_count++;
4881 }
4882 spin_unlock_irqrestore(&rxq->lock, flags);
4883
4884 ipw_rx_queue_restock(priv);
4885 }
4886
4887 static void ipw_bg_rx_queue_replenish(void *data)
4888 {
4889 struct ipw_priv *priv = data;
4890 mutex_lock(&priv->mutex);
4891 ipw_rx_queue_replenish(data);
4892 mutex_unlock(&priv->mutex);
4893 }
4894
4895 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4896 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
4897 * This free routine walks the list of POOL entries and if SKB is set to
4898 * non NULL it is unmapped and freed
4899 */
4900 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
4901 {
4902 int i;
4903
4904 if (!rxq)
4905 return;
4906
4907 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4908 if (rxq->pool[i].skb != NULL) {
4909 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
4910 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4911 dev_kfree_skb(rxq->pool[i].skb);
4912 }
4913 }
4914
4915 kfree(rxq);
4916 }
4917
4918 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4919 {
4920 struct ipw_rx_queue *rxq;
4921 int i;
4922
4923 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
4924 if (unlikely(!rxq)) {
4925 IPW_ERROR("memory allocation failed\n");
4926 return NULL;
4927 }
4928 spin_lock_init(&rxq->lock);
4929 INIT_LIST_HEAD(&rxq->rx_free);
4930 INIT_LIST_HEAD(&rxq->rx_used);
4931
4932 /* Fill the rx_used queue with _all_ of the Rx buffers */
4933 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4934 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4935
4936 /* Set us so that we have processed and used all buffers, but have
4937 * not restocked the Rx queue with fresh buffers */
4938 rxq->read = rxq->write = 0;
4939 rxq->processed = RX_QUEUE_SIZE - 1;
4940 rxq->free_count = 0;
4941
4942 return rxq;
4943 }
4944
4945 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4946 {
4947 rate &= ~IEEE80211_BASIC_RATE_MASK;
4948 if (ieee_mode == IEEE_A) {
4949 switch (rate) {
4950 case IEEE80211_OFDM_RATE_6MB:
4951 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4952 1 : 0;
4953 case IEEE80211_OFDM_RATE_9MB:
4954 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4955 1 : 0;
4956 case IEEE80211_OFDM_RATE_12MB:
4957 return priv->
4958 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4959 case IEEE80211_OFDM_RATE_18MB:
4960 return priv->
4961 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4962 case IEEE80211_OFDM_RATE_24MB:
4963 return priv->
4964 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4965 case IEEE80211_OFDM_RATE_36MB:
4966 return priv->
4967 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4968 case IEEE80211_OFDM_RATE_48MB:
4969 return priv->
4970 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4971 case IEEE80211_OFDM_RATE_54MB:
4972 return priv->
4973 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4974 default:
4975 return 0;
4976 }
4977 }
4978
4979 /* B and G mixed */
4980 switch (rate) {
4981 case IEEE80211_CCK_RATE_1MB:
4982 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
4983 case IEEE80211_CCK_RATE_2MB:
4984 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
4985 case IEEE80211_CCK_RATE_5MB:
4986 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
4987 case IEEE80211_CCK_RATE_11MB:
4988 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
4989 }
4990
4991 /* If we are limited to B modulations, bail at this point */
4992 if (ieee_mode == IEEE_B)
4993 return 0;
4994
4995 /* G */
4996 switch (rate) {
4997 case IEEE80211_OFDM_RATE_6MB:
4998 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
4999 case IEEE80211_OFDM_RATE_9MB:
5000 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5001 case IEEE80211_OFDM_RATE_12MB:
5002 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5003 case IEEE80211_OFDM_RATE_18MB:
5004 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5005 case IEEE80211_OFDM_RATE_24MB:
5006 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5007 case IEEE80211_OFDM_RATE_36MB:
5008 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5009 case IEEE80211_OFDM_RATE_48MB:
5010 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5011 case IEEE80211_OFDM_RATE_54MB:
5012 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5013 }
5014
5015 return 0;
5016 }
5017
5018 static int ipw_compatible_rates(struct ipw_priv *priv,
5019 const struct ieee80211_network *network,
5020 struct ipw_supported_rates *rates)
5021 {
5022 int num_rates, i;
5023
5024 memset(rates, 0, sizeof(*rates));
5025 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5026 rates->num_rates = 0;
5027 for (i = 0; i < num_rates; i++) {
5028 if (!ipw_is_rate_in_mask(priv, network->mode,
5029 network->rates[i])) {
5030
5031 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5032 IPW_DEBUG_SCAN("Adding masked mandatory "
5033 "rate %02X\n",
5034 network->rates[i]);
5035 rates->supported_rates[rates->num_rates++] =
5036 network->rates[i];
5037 continue;
5038 }
5039
5040 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5041 network->rates[i], priv->rates_mask);
5042 continue;
5043 }
5044
5045 rates->supported_rates[rates->num_rates++] = network->rates[i];
5046 }
5047
5048 num_rates = min(network->rates_ex_len,
5049 (u8) (IPW_MAX_RATES - num_rates));
5050 for (i = 0; i < num_rates; i++) {
5051 if (!ipw_is_rate_in_mask(priv, network->mode,
5052 network->rates_ex[i])) {
5053 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5054 IPW_DEBUG_SCAN("Adding masked mandatory "
5055 "rate %02X\n",
5056 network->rates_ex[i]);
5057 rates->supported_rates[rates->num_rates++] =
5058 network->rates[i];
5059 continue;
5060 }
5061
5062 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5063 network->rates_ex[i], priv->rates_mask);
5064 continue;
5065 }
5066
5067 rates->supported_rates[rates->num_rates++] =
5068 network->rates_ex[i];
5069 }
5070
5071 return 1;
5072 }
5073
5074 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5075 const struct ipw_supported_rates *src)
5076 {
5077 u8 i;
5078 for (i = 0; i < src->num_rates; i++)
5079 dest->supported_rates[i] = src->supported_rates[i];
5080 dest->num_rates = src->num_rates;
5081 }
5082
5083 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5084 * mask should ever be used -- right now all callers to add the scan rates are
5085 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5086 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5087 u8 modulation, u32 rate_mask)
5088 {
5089 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5090 IEEE80211_BASIC_RATE_MASK : 0;
5091
5092 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5093 rates->supported_rates[rates->num_rates++] =
5094 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5095
5096 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5097 rates->supported_rates[rates->num_rates++] =
5098 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5099
5100 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5101 rates->supported_rates[rates->num_rates++] = basic_mask |
5102 IEEE80211_CCK_RATE_5MB;
5103
5104 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5105 rates->supported_rates[rates->num_rates++] = basic_mask |
5106 IEEE80211_CCK_RATE_11MB;
5107 }
5108
5109 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5110 u8 modulation, u32 rate_mask)
5111 {
5112 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5113 IEEE80211_BASIC_RATE_MASK : 0;
5114
5115 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5116 rates->supported_rates[rates->num_rates++] = basic_mask |
5117 IEEE80211_OFDM_RATE_6MB;
5118
5119 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5120 rates->supported_rates[rates->num_rates++] =
5121 IEEE80211_OFDM_RATE_9MB;
5122
5123 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5124 rates->supported_rates[rates->num_rates++] = basic_mask |
5125 IEEE80211_OFDM_RATE_12MB;
5126
5127 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5128 rates->supported_rates[rates->num_rates++] =
5129 IEEE80211_OFDM_RATE_18MB;
5130
5131 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5132 rates->supported_rates[rates->num_rates++] = basic_mask |
5133 IEEE80211_OFDM_RATE_24MB;
5134
5135 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5136 rates->supported_rates[rates->num_rates++] =
5137 IEEE80211_OFDM_RATE_36MB;
5138
5139 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5140 rates->supported_rates[rates->num_rates++] =
5141 IEEE80211_OFDM_RATE_48MB;
5142
5143 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5144 rates->supported_rates[rates->num_rates++] =
5145 IEEE80211_OFDM_RATE_54MB;
5146 }
5147
5148 struct ipw_network_match {
5149 struct ieee80211_network *network;
5150 struct ipw_supported_rates rates;
5151 };
5152
5153 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5154 struct ipw_network_match *match,
5155 struct ieee80211_network *network,
5156 int roaming)
5157 {
5158 struct ipw_supported_rates rates;
5159
5160 /* Verify that this network's capability is compatible with the
5161 * current mode (AdHoc or Infrastructure) */
5162 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5163 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5164 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5165 "capability mismatch.\n",
5166 escape_essid(network->ssid, network->ssid_len),
5167 MAC_ARG(network->bssid));
5168 return 0;
5169 }
5170
5171 /* If we do not have an ESSID for this AP, we can not associate with
5172 * it */
5173 if (network->flags & NETWORK_EMPTY_ESSID) {
5174 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5175 "because of hidden ESSID.\n",
5176 escape_essid(network->ssid, network->ssid_len),
5177 MAC_ARG(network->bssid));
5178 return 0;
5179 }
5180
5181 if (unlikely(roaming)) {
5182 /* If we are roaming, then ensure check if this is a valid
5183 * network to try and roam to */
5184 if ((network->ssid_len != match->network->ssid_len) ||
5185 memcmp(network->ssid, match->network->ssid,
5186 network->ssid_len)) {
5187 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5188 "because of non-network ESSID.\n",
5189 escape_essid(network->ssid,
5190 network->ssid_len),
5191 MAC_ARG(network->bssid));
5192 return 0;
5193 }
5194 } else {
5195 /* If an ESSID has been configured then compare the broadcast
5196 * ESSID to ours */
5197 if ((priv->config & CFG_STATIC_ESSID) &&
5198 ((network->ssid_len != priv->essid_len) ||
5199 memcmp(network->ssid, priv->essid,
5200 min(network->ssid_len, priv->essid_len)))) {
5201 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5202
5203 strncpy(escaped,
5204 escape_essid(network->ssid, network->ssid_len),
5205 sizeof(escaped));
5206 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5207 "because of ESSID mismatch: '%s'.\n",
5208 escaped, MAC_ARG(network->bssid),
5209 escape_essid(priv->essid,
5210 priv->essid_len));
5211 return 0;
5212 }
5213 }
5214
5215 /* If the old network rate is better than this one, don't bother
5216 * testing everything else. */
5217
5218 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5219 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5220 "current network.\n",
5221 escape_essid(match->network->ssid,
5222 match->network->ssid_len));
5223 return 0;
5224 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5225 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5226 "current network.\n",
5227 escape_essid(match->network->ssid,
5228 match->network->ssid_len));
5229 return 0;
5230 }
5231
5232 /* Now go through and see if the requested network is valid... */
5233 if (priv->ieee->scan_age != 0 &&
5234 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5235 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5236 "because of age: %ums.\n",
5237 escape_essid(network->ssid, network->ssid_len),
5238 MAC_ARG(network->bssid),
5239 jiffies_to_msecs(jiffies -
5240 network->last_scanned));
5241 return 0;
5242 }
5243
5244 if ((priv->config & CFG_STATIC_CHANNEL) &&
5245 (network->channel != priv->channel)) {
5246 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5247 "because of channel mismatch: %d != %d.\n",
5248 escape_essid(network->ssid, network->ssid_len),
5249 MAC_ARG(network->bssid),
5250 network->channel, priv->channel);
5251 return 0;
5252 }
5253
5254 /* Verify privacy compatability */
5255 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5256 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5257 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5258 "because of privacy mismatch: %s != %s.\n",
5259 escape_essid(network->ssid, network->ssid_len),
5260 MAC_ARG(network->bssid),
5261 priv->
5262 capability & CAP_PRIVACY_ON ? "on" : "off",
5263 network->
5264 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5265 "off");
5266 return 0;
5267 }
5268
5269 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5270 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5271 "because of the same BSSID match: " MAC_FMT
5272 ".\n", escape_essid(network->ssid,
5273 network->ssid_len),
5274 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5275 return 0;
5276 }
5277
5278 /* Filter out any incompatible freq / mode combinations */
5279 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5280 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5281 "because of invalid frequency/mode "
5282 "combination.\n",
5283 escape_essid(network->ssid, network->ssid_len),
5284 MAC_ARG(network->bssid));
5285 return 0;
5286 }
5287
5288 /* Ensure that the rates supported by the driver are compatible with
5289 * this AP, including verification of basic rates (mandatory) */
5290 if (!ipw_compatible_rates(priv, network, &rates)) {
5291 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5292 "because configured rate mask excludes "
5293 "AP mandatory rate.\n",
5294 escape_essid(network->ssid, network->ssid_len),
5295 MAC_ARG(network->bssid));
5296 return 0;
5297 }
5298
5299 if (rates.num_rates == 0) {
5300 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5301 "because of no compatible rates.\n",
5302 escape_essid(network->ssid, network->ssid_len),
5303 MAC_ARG(network->bssid));
5304 return 0;
5305 }
5306
5307 /* TODO: Perform any further minimal comparititive tests. We do not
5308 * want to put too much policy logic here; intelligent scan selection
5309 * should occur within a generic IEEE 802.11 user space tool. */
5310
5311 /* Set up 'new' AP to this network */
5312 ipw_copy_rates(&match->rates, &rates);
5313 match->network = network;
5314 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5315 escape_essid(network->ssid, network->ssid_len),
5316 MAC_ARG(network->bssid));
5317
5318 return 1;
5319 }
5320
5321 static void ipw_merge_adhoc_network(void *data)
5322 {
5323 struct ipw_priv *priv = data;
5324 struct ieee80211_network *network = NULL;
5325 struct ipw_network_match match = {
5326 .network = priv->assoc_network
5327 };
5328
5329 if ((priv->status & STATUS_ASSOCIATED) &&
5330 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5331 /* First pass through ROAM process -- look for a better
5332 * network */
5333 unsigned long flags;
5334
5335 spin_lock_irqsave(&priv->ieee->lock, flags);
5336 list_for_each_entry(network, &priv->ieee->network_list, list) {
5337 if (network != priv->assoc_network)
5338 ipw_find_adhoc_network(priv, &match, network,
5339 1);
5340 }
5341 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5342
5343 if (match.network == priv->assoc_network) {
5344 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5345 "merge to.\n");
5346 return;
5347 }
5348
5349 mutex_lock(&priv->mutex);
5350 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5351 IPW_DEBUG_MERGE("remove network %s\n",
5352 escape_essid(priv->essid,
5353 priv->essid_len));
5354 ipw_remove_current_network(priv);
5355 }
5356
5357 ipw_disassociate(priv);
5358 priv->assoc_network = match.network;
5359 mutex_unlock(&priv->mutex);
5360 return;
5361 }
5362 }
5363
5364 static int ipw_best_network(struct ipw_priv *priv,
5365 struct ipw_network_match *match,
5366 struct ieee80211_network *network, int roaming)
5367 {
5368 struct ipw_supported_rates rates;
5369
5370 /* Verify that this network's capability is compatible with the
5371 * current mode (AdHoc or Infrastructure) */
5372 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5373 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5374 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5375 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5376 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5377 "capability mismatch.\n",
5378 escape_essid(network->ssid, network->ssid_len),
5379 MAC_ARG(network->bssid));
5380 return 0;
5381 }
5382
5383 /* If we do not have an ESSID for this AP, we can not associate with
5384 * it */
5385 if (network->flags & NETWORK_EMPTY_ESSID) {
5386 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5387 "because of hidden ESSID.\n",
5388 escape_essid(network->ssid, network->ssid_len),
5389 MAC_ARG(network->bssid));
5390 return 0;
5391 }
5392
5393 if (unlikely(roaming)) {
5394 /* If we are roaming, then ensure check if this is a valid
5395 * network to try and roam to */
5396 if ((network->ssid_len != match->network->ssid_len) ||
5397 memcmp(network->ssid, match->network->ssid,
5398 network->ssid_len)) {
5399 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5400 "because of non-network ESSID.\n",
5401 escape_essid(network->ssid,
5402 network->ssid_len),
5403 MAC_ARG(network->bssid));
5404 return 0;
5405 }
5406 } else {
5407 /* If an ESSID has been configured then compare the broadcast
5408 * ESSID to ours */
5409 if ((priv->config & CFG_STATIC_ESSID) &&
5410 ((network->ssid_len != priv->essid_len) ||
5411 memcmp(network->ssid, priv->essid,
5412 min(network->ssid_len, priv->essid_len)))) {
5413 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5414 strncpy(escaped,
5415 escape_essid(network->ssid, network->ssid_len),
5416 sizeof(escaped));
5417 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5418 "because of ESSID mismatch: '%s'.\n",
5419 escaped, MAC_ARG(network->bssid),
5420 escape_essid(priv->essid,
5421 priv->essid_len));
5422 return 0;
5423 }
5424 }
5425
5426 /* If the old network rate is better than this one, don't bother
5427 * testing everything else. */
5428 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5429 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5430 strncpy(escaped,
5431 escape_essid(network->ssid, network->ssid_len),
5432 sizeof(escaped));
5433 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5434 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5435 escaped, MAC_ARG(network->bssid),
5436 escape_essid(match->network->ssid,
5437 match->network->ssid_len),
5438 MAC_ARG(match->network->bssid));
5439 return 0;
5440 }
5441
5442 /* If this network has already had an association attempt within the
5443 * last 3 seconds, do not try and associate again... */
5444 if (network->last_associate &&
5445 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5446 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5447 "because of storming (%ums since last "
5448 "assoc attempt).\n",
5449 escape_essid(network->ssid, network->ssid_len),
5450 MAC_ARG(network->bssid),
5451 jiffies_to_msecs(jiffies -
5452 network->last_associate));
5453 return 0;
5454 }
5455
5456 /* Now go through and see if the requested network is valid... */
5457 if (priv->ieee->scan_age != 0 &&
5458 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5459 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5460 "because of age: %ums.\n",
5461 escape_essid(network->ssid, network->ssid_len),
5462 MAC_ARG(network->bssid),
5463 jiffies_to_msecs(jiffies -
5464 network->last_scanned));
5465 return 0;
5466 }
5467
5468 if ((priv->config & CFG_STATIC_CHANNEL) &&
5469 (network->channel != priv->channel)) {
5470 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5471 "because of channel mismatch: %d != %d.\n",
5472 escape_essid(network->ssid, network->ssid_len),
5473 MAC_ARG(network->bssid),
5474 network->channel, priv->channel);
5475 return 0;
5476 }
5477
5478 /* Verify privacy compatability */
5479 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5480 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5481 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5482 "because of privacy mismatch: %s != %s.\n",
5483 escape_essid(network->ssid, network->ssid_len),
5484 MAC_ARG(network->bssid),
5485 priv->capability & CAP_PRIVACY_ON ? "on" :
5486 "off",
5487 network->capability &
5488 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5489 return 0;
5490 }
5491
5492 if ((priv->config & CFG_STATIC_BSSID) &&
5493 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5494 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5495 "because of BSSID mismatch: " MAC_FMT ".\n",
5496 escape_essid(network->ssid, network->ssid_len),
5497 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5498 return 0;
5499 }
5500
5501 /* Filter out any incompatible freq / mode combinations */
5502 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5503 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5504 "because of invalid frequency/mode "
5505 "combination.\n",
5506 escape_essid(network->ssid, network->ssid_len),
5507 MAC_ARG(network->bssid));
5508 return 0;
5509 }
5510
5511 /* Filter out invalid channel in current GEO */
5512 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5513 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5514 "because of invalid channel in current GEO\n",
5515 escape_essid(network->ssid, network->ssid_len),
5516 MAC_ARG(network->bssid));
5517 return 0;
5518 }
5519
5520 /* Ensure that the rates supported by the driver are compatible with
5521 * this AP, including verification of basic rates (mandatory) */
5522 if (!ipw_compatible_rates(priv, network, &rates)) {
5523 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5524 "because configured rate mask excludes "
5525 "AP mandatory rate.\n",
5526 escape_essid(network->ssid, network->ssid_len),
5527 MAC_ARG(network->bssid));
5528 return 0;
5529 }
5530
5531 if (rates.num_rates == 0) {
5532 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5533 "because of no compatible rates.\n",
5534 escape_essid(network->ssid, network->ssid_len),
5535 MAC_ARG(network->bssid));
5536 return 0;
5537 }
5538
5539 /* TODO: Perform any further minimal comparititive tests. We do not
5540 * want to put too much policy logic here; intelligent scan selection
5541 * should occur within a generic IEEE 802.11 user space tool. */
5542
5543 /* Set up 'new' AP to this network */
5544 ipw_copy_rates(&match->rates, &rates);
5545 match->network = network;
5546
5547 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5548 escape_essid(network->ssid, network->ssid_len),
5549 MAC_ARG(network->bssid));
5550
5551 return 1;
5552 }
5553
5554 static void ipw_adhoc_create(struct ipw_priv *priv,
5555 struct ieee80211_network *network)
5556 {
5557 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5558 int i;
5559
5560 /*
5561 * For the purposes of scanning, we can set our wireless mode
5562 * to trigger scans across combinations of bands, but when it
5563 * comes to creating a new ad-hoc network, we have tell the FW
5564 * exactly which band to use.
5565 *
5566 * We also have the possibility of an invalid channel for the
5567 * chossen band. Attempting to create a new ad-hoc network
5568 * with an invalid channel for wireless mode will trigger a
5569 * FW fatal error.
5570 *
5571 */
5572 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5573 case IEEE80211_52GHZ_BAND:
5574 network->mode = IEEE_A;
5575 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5576 BUG_ON(i == -1);
5577 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5578 IPW_WARNING("Overriding invalid channel\n");
5579 priv->channel = geo->a[0].channel;
5580 }
5581 break;
5582
5583 case IEEE80211_24GHZ_BAND:
5584 if (priv->ieee->mode & IEEE_G)
5585 network->mode = IEEE_G;
5586 else
5587 network->mode = IEEE_B;
5588 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5589 BUG_ON(i == -1);
5590 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5591 IPW_WARNING("Overriding invalid channel\n");
5592 priv->channel = geo->bg[0].channel;
5593 }
5594 break;
5595
5596 default:
5597 IPW_WARNING("Overriding invalid channel\n");
5598 if (priv->ieee->mode & IEEE_A) {
5599 network->mode = IEEE_A;
5600 priv->channel = geo->a[0].channel;
5601 } else if (priv->ieee->mode & IEEE_G) {
5602 network->mode = IEEE_G;
5603 priv->channel = geo->bg[0].channel;
5604 } else {
5605 network->mode = IEEE_B;
5606 priv->channel = geo->bg[0].channel;
5607 }
5608 break;
5609 }
5610
5611 network->channel = priv->channel;
5612 priv->config |= CFG_ADHOC_PERSIST;
5613 ipw_create_bssid(priv, network->bssid);
5614 network->ssid_len = priv->essid_len;
5615 memcpy(network->ssid, priv->essid, priv->essid_len);
5616 memset(&network->stats, 0, sizeof(network->stats));
5617 network->capability = WLAN_CAPABILITY_IBSS;
5618 if (!(priv->config & CFG_PREAMBLE_LONG))
5619 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5620 if (priv->capability & CAP_PRIVACY_ON)
5621 network->capability |= WLAN_CAPABILITY_PRIVACY;
5622 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5623 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5624 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5625 memcpy(network->rates_ex,
5626 &priv->rates.supported_rates[network->rates_len],
5627 network->rates_ex_len);
5628 network->last_scanned = 0;
5629 network->flags = 0;
5630 network->last_associate = 0;
5631 network->time_stamp[0] = 0;
5632 network->time_stamp[1] = 0;
5633 network->beacon_interval = 100; /* Default */
5634 network->listen_interval = 10; /* Default */
5635 network->atim_window = 0; /* Default */
5636 network->wpa_ie_len = 0;
5637 network->rsn_ie_len = 0;
5638 }
5639
5640 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5641 {
5642 struct ipw_tgi_tx_key key;
5643
5644 if (!(priv->ieee->sec.flags & (1 << index)))
5645 return;
5646
5647 key.key_id = index;
5648 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5649 key.security_type = type;
5650 key.station_index = 0; /* always 0 for BSS */
5651 key.flags = 0;
5652 /* 0 for new key; previous value of counter (after fatal error) */
5653 key.tx_counter[0] = 0;
5654 key.tx_counter[1] = 0;
5655
5656 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5657 }
5658
5659 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5660 {
5661 struct ipw_wep_key key;
5662 int i;
5663
5664 key.cmd_id = DINO_CMD_WEP_KEY;
5665 key.seq_num = 0;
5666
5667 /* Note: AES keys cannot be set for multiple times.
5668 * Only set it at the first time. */
5669 for (i = 0; i < 4; i++) {
5670 key.key_index = i | type;
5671 if (!(priv->ieee->sec.flags & (1 << i))) {
5672 key.key_size = 0;
5673 continue;
5674 }
5675
5676 key.key_size = priv->ieee->sec.key_sizes[i];
5677 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5678
5679 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5680 }
5681 }
5682
5683 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5684 {
5685 if (priv->ieee->host_encrypt)
5686 return;
5687
5688 switch (level) {
5689 case SEC_LEVEL_3:
5690 priv->sys_config.disable_unicast_decryption = 0;
5691 priv->ieee->host_decrypt = 0;
5692 break;
5693 case SEC_LEVEL_2:
5694 priv->sys_config.disable_unicast_decryption = 1;
5695 priv->ieee->host_decrypt = 1;
5696 break;
5697 case SEC_LEVEL_1:
5698 priv->sys_config.disable_unicast_decryption = 0;
5699 priv->ieee->host_decrypt = 0;
5700 break;
5701 case SEC_LEVEL_0:
5702 priv->sys_config.disable_unicast_decryption = 1;
5703 break;
5704 default:
5705 break;
5706 }
5707 }
5708
5709 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5710 {
5711 if (priv->ieee->host_encrypt)
5712 return;
5713
5714 switch (level) {
5715 case SEC_LEVEL_3:
5716 priv->sys_config.disable_multicast_decryption = 0;
5717 break;
5718 case SEC_LEVEL_2:
5719 priv->sys_config.disable_multicast_decryption = 1;
5720 break;
5721 case SEC_LEVEL_1:
5722 priv->sys_config.disable_multicast_decryption = 0;
5723 break;
5724 case SEC_LEVEL_0:
5725 priv->sys_config.disable_multicast_decryption = 1;
5726 break;
5727 default:
5728 break;
5729 }
5730 }
5731
5732 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5733 {
5734 switch (priv->ieee->sec.level) {
5735 case SEC_LEVEL_3:
5736 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5737 ipw_send_tgi_tx_key(priv,
5738 DCT_FLAG_EXT_SECURITY_CCM,
5739 priv->ieee->sec.active_key);
5740
5741 if (!priv->ieee->host_mc_decrypt)
5742 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5743 break;
5744 case SEC_LEVEL_2:
5745 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5746 ipw_send_tgi_tx_key(priv,
5747 DCT_FLAG_EXT_SECURITY_TKIP,
5748 priv->ieee->sec.active_key);
5749 break;
5750 case SEC_LEVEL_1:
5751 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5752 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5753 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5754 break;
5755 case SEC_LEVEL_0:
5756 default:
5757 break;
5758 }
5759 }
5760
5761 static void ipw_adhoc_check(void *data)
5762 {
5763 struct ipw_priv *priv = data;
5764
5765 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5766 !(priv->config & CFG_ADHOC_PERSIST)) {
5767 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5768 IPW_DL_STATE | IPW_DL_ASSOC,
5769 "Missed beacon: %d - disassociate\n",
5770 priv->missed_adhoc_beacons);
5771 ipw_remove_current_network(priv);
5772 ipw_disassociate(priv);
5773 return;
5774 }
5775
5776 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5777 priv->assoc_request.beacon_interval);
5778 }
5779
5780 static void ipw_bg_adhoc_check(void *data)
5781 {
5782 struct ipw_priv *priv = data;
5783 mutex_lock(&priv->mutex);
5784 ipw_adhoc_check(data);
5785 mutex_unlock(&priv->mutex);
5786 }
5787
5788 #ifdef CONFIG_IPW2200_DEBUG
5789 static void ipw_debug_config(struct ipw_priv *priv)
5790 {
5791 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5792 "[CFG 0x%08X]\n", priv->config);
5793 if (priv->config & CFG_STATIC_CHANNEL)
5794 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5795 else
5796 IPW_DEBUG_INFO("Channel unlocked.\n");
5797 if (priv->config & CFG_STATIC_ESSID)
5798 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5799 escape_essid(priv->essid, priv->essid_len));
5800 else
5801 IPW_DEBUG_INFO("ESSID unlocked.\n");
5802 if (priv->config & CFG_STATIC_BSSID)
5803 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5804 MAC_ARG(priv->bssid));
5805 else
5806 IPW_DEBUG_INFO("BSSID unlocked.\n");
5807 if (priv->capability & CAP_PRIVACY_ON)
5808 IPW_DEBUG_INFO("PRIVACY on\n");
5809 else
5810 IPW_DEBUG_INFO("PRIVACY off\n");
5811 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5812 }
5813 #else
5814 #define ipw_debug_config(x) do {} while (0)
5815 #endif
5816
5817 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
5818 {
5819 /* TODO: Verify that this works... */
5820 struct ipw_fixed_rate fr = {
5821 .tx_rates = priv->rates_mask
5822 };
5823 u32 reg;
5824 u16 mask = 0;
5825
5826 /* Identify 'current FW band' and match it with the fixed
5827 * Tx rates */
5828
5829 switch (priv->ieee->freq_band) {
5830 case IEEE80211_52GHZ_BAND: /* A only */
5831 /* IEEE_A */
5832 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
5833 /* Invalid fixed rate mask */
5834 IPW_DEBUG_WX
5835 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5836 fr.tx_rates = 0;
5837 break;
5838 }
5839
5840 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
5841 break;
5842
5843 default: /* 2.4Ghz or Mixed */
5844 /* IEEE_B */
5845 if (mode == IEEE_B) {
5846 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
5847 /* Invalid fixed rate mask */
5848 IPW_DEBUG_WX
5849 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5850 fr.tx_rates = 0;
5851 }
5852 break;
5853 }
5854
5855 /* IEEE_G */
5856 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
5857 IEEE80211_OFDM_RATES_MASK)) {
5858 /* Invalid fixed rate mask */
5859 IPW_DEBUG_WX
5860 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5861 fr.tx_rates = 0;
5862 break;
5863 }
5864
5865 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
5866 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
5867 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
5868 }
5869
5870 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
5871 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
5872 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
5873 }
5874
5875 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
5876 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
5877 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
5878 }
5879
5880 fr.tx_rates |= mask;
5881 break;
5882 }
5883
5884 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
5885 ipw_write_reg32(priv, reg, *(u32 *) & fr);
5886 }
5887
5888 static void ipw_abort_scan(struct ipw_priv *priv)
5889 {
5890 int err;
5891
5892 if (priv->status & STATUS_SCAN_ABORTING) {
5893 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5894 return;
5895 }
5896 priv->status |= STATUS_SCAN_ABORTING;
5897
5898 err = ipw_send_scan_abort(priv);
5899 if (err)
5900 IPW_DEBUG_HC("Request to abort scan failed.\n");
5901 }
5902
5903 static void ipw_add_scan_channels(struct ipw_priv *priv,
5904 struct ipw_scan_request_ext *scan,
5905 int scan_type)
5906 {
5907 int channel_index = 0;
5908 const struct ieee80211_geo *geo;
5909 int i;
5910
5911 geo = ieee80211_get_geo(priv->ieee);
5912
5913 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5914 int start = channel_index;
5915 for (i = 0; i < geo->a_channels; i++) {
5916 if ((priv->status & STATUS_ASSOCIATED) &&
5917 geo->a[i].channel == priv->channel)
5918 continue;
5919 channel_index++;
5920 scan->channels_list[channel_index] = geo->a[i].channel;
5921 ipw_set_scan_type(scan, channel_index,
5922 geo->a[i].
5923 flags & IEEE80211_CH_PASSIVE_ONLY ?
5924 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
5925 scan_type);
5926 }
5927
5928 if (start != channel_index) {
5929 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
5930 (channel_index - start);
5931 channel_index++;
5932 }
5933 }
5934
5935 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5936 int start = channel_index;
5937 if (priv->config & CFG_SPEED_SCAN) {
5938 int index;
5939 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
5940 /* nop out the list */
5941 [0] = 0
5942 };
5943
5944 u8 channel;
5945 while (channel_index < IPW_SCAN_CHANNELS) {
5946 channel =
5947 priv->speed_scan[priv->speed_scan_pos];
5948 if (channel == 0) {
5949 priv->speed_scan_pos = 0;
5950 channel = priv->speed_scan[0];
5951 }
5952 if ((priv->status & STATUS_ASSOCIATED) &&
5953 channel == priv->channel) {
5954 priv->speed_scan_pos++;
5955 continue;
5956 }
5957
5958 /* If this channel has already been
5959 * added in scan, break from loop
5960 * and this will be the first channel
5961 * in the next scan.
5962 */
5963 if (channels[channel - 1] != 0)
5964 break;
5965
5966 channels[channel - 1] = 1;
5967 priv->speed_scan_pos++;
5968 channel_index++;
5969 scan->channels_list[channel_index] = channel;
5970 index =
5971 ieee80211_channel_to_index(priv->ieee, channel);
5972 ipw_set_scan_type(scan, channel_index,
5973 geo->bg[index].
5974 flags &
5975 IEEE80211_CH_PASSIVE_ONLY ?
5976 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
5977 : scan_type);
5978 }
5979 } else {
5980 for (i = 0; i < geo->bg_channels; i++) {
5981 if ((priv->status & STATUS_ASSOCIATED) &&
5982 geo->bg[i].channel == priv->channel)
5983 continue;
5984 channel_index++;
5985 scan->channels_list[channel_index] =
5986 geo->bg[i].channel;
5987 ipw_set_scan_type(scan, channel_index,
5988 geo->bg[i].
5989 flags &
5990 IEEE80211_CH_PASSIVE_ONLY ?
5991 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
5992 : scan_type);
5993 }
5994 }
5995
5996 if (start != channel_index) {
5997 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
5998 (channel_index - start);
5999 }
6000 }
6001 }
6002
6003 static int ipw_request_scan(struct ipw_priv *priv)
6004 {
6005 struct ipw_scan_request_ext scan;
6006 int err = 0, scan_type;
6007
6008 if (!(priv->status & STATUS_INIT) ||
6009 (priv->status & STATUS_EXIT_PENDING))
6010 return 0;
6011
6012 mutex_lock(&priv->mutex);
6013
6014 if (priv->status & STATUS_SCANNING) {
6015 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6016 priv->status |= STATUS_SCAN_PENDING;
6017 goto done;
6018 }
6019
6020 if (!(priv->status & STATUS_SCAN_FORCED) &&
6021 priv->status & STATUS_SCAN_ABORTING) {
6022 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6023 priv->status |= STATUS_SCAN_PENDING;
6024 goto done;
6025 }
6026
6027 if (priv->status & STATUS_RF_KILL_MASK) {
6028 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6029 priv->status |= STATUS_SCAN_PENDING;
6030 goto done;
6031 }
6032
6033 memset(&scan, 0, sizeof(scan));
6034
6035 if (priv->config & CFG_SPEED_SCAN)
6036 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6037 cpu_to_le16(30);
6038 else
6039 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6040 cpu_to_le16(20);
6041
6042 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6043 cpu_to_le16(20);
6044 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6045
6046 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6047
6048 #ifdef CONFIG_IPW2200_MONITOR
6049 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6050 u8 channel;
6051 u8 band = 0;
6052
6053 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6054 case IEEE80211_52GHZ_BAND:
6055 band = (u8) (IPW_A_MODE << 6) | 1;
6056 channel = priv->channel;
6057 break;
6058
6059 case IEEE80211_24GHZ_BAND:
6060 band = (u8) (IPW_B_MODE << 6) | 1;
6061 channel = priv->channel;
6062 break;
6063
6064 default:
6065 band = (u8) (IPW_B_MODE << 6) | 1;
6066 channel = 9;
6067 break;
6068 }
6069
6070 scan.channels_list[0] = band;
6071 scan.channels_list[1] = channel;
6072 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6073
6074 /* NOTE: The card will sit on this channel for this time
6075 * period. Scan aborts are timing sensitive and frequently
6076 * result in firmware restarts. As such, it is best to
6077 * set a small dwell_time here and just keep re-issuing
6078 * scans. Otherwise fast channel hopping will not actually
6079 * hop channels.
6080 *
6081 * TODO: Move SPEED SCAN support to all modes and bands */
6082 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6083 cpu_to_le16(2000);
6084 } else {
6085 #endif /* CONFIG_IPW2200_MONITOR */
6086 /* If we are roaming, then make this a directed scan for the
6087 * current network. Otherwise, ensure that every other scan
6088 * is a fast channel hop scan */
6089 if ((priv->status & STATUS_ROAMING)
6090 || (!(priv->status & STATUS_ASSOCIATED)
6091 && (priv->config & CFG_STATIC_ESSID)
6092 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6093 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6094 if (err) {
6095 IPW_DEBUG_HC("Attempt to send SSID command "
6096 "failed.\n");
6097 goto done;
6098 }
6099
6100 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6101 } else
6102 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6103
6104 ipw_add_scan_channels(priv, &scan, scan_type);
6105 #ifdef CONFIG_IPW2200_MONITOR
6106 }
6107 #endif
6108
6109 err = ipw_send_scan_request_ext(priv, &scan);
6110 if (err) {
6111 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6112 goto done;
6113 }
6114
6115 priv->status |= STATUS_SCANNING;
6116 priv->status &= ~STATUS_SCAN_PENDING;
6117 queue_delayed_work(priv->workqueue, &priv->scan_check,
6118 IPW_SCAN_CHECK_WATCHDOG);
6119 done:
6120 mutex_unlock(&priv->mutex);
6121 return err;
6122 }
6123
6124 static void ipw_bg_abort_scan(void *data)
6125 {
6126 struct ipw_priv *priv = data;
6127 mutex_lock(&priv->mutex);
6128 ipw_abort_scan(data);
6129 mutex_unlock(&priv->mutex);
6130 }
6131
6132 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6133 {
6134 /* This is called when wpa_supplicant loads and closes the driver
6135 * interface. */
6136 priv->ieee->wpa_enabled = value;
6137 return 0;
6138 }
6139
6140 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6141 {
6142 struct ieee80211_device *ieee = priv->ieee;
6143 struct ieee80211_security sec = {
6144 .flags = SEC_AUTH_MODE,
6145 };
6146 int ret = 0;
6147
6148 if (value & IW_AUTH_ALG_SHARED_KEY) {
6149 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6150 ieee->open_wep = 0;
6151 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6152 sec.auth_mode = WLAN_AUTH_OPEN;
6153 ieee->open_wep = 1;
6154 } else if (value & IW_AUTH_ALG_LEAP) {
6155 sec.auth_mode = WLAN_AUTH_LEAP;
6156 ieee->open_wep = 1;
6157 } else
6158 return -EINVAL;
6159
6160 if (ieee->set_security)
6161 ieee->set_security(ieee->dev, &sec);
6162 else
6163 ret = -EOPNOTSUPP;
6164
6165 return ret;
6166 }
6167
6168 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6169 int wpa_ie_len)
6170 {
6171 /* make sure WPA is enabled */
6172 ipw_wpa_enable(priv, 1);
6173
6174 ipw_disassociate(priv);
6175 }
6176
6177 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6178 char *capabilities, int length)
6179 {
6180 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6181
6182 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6183 capabilities);
6184 }
6185
6186 /*
6187 * WE-18 support
6188 */
6189
6190 /* SIOCSIWGENIE */
6191 static int ipw_wx_set_genie(struct net_device *dev,
6192 struct iw_request_info *info,
6193 union iwreq_data *wrqu, char *extra)
6194 {
6195 struct ipw_priv *priv = ieee80211_priv(dev);
6196 struct ieee80211_device *ieee = priv->ieee;
6197 u8 *buf;
6198 int err = 0;
6199
6200 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6201 (wrqu->data.length && extra == NULL))
6202 return -EINVAL;
6203
6204 //mutex_lock(&priv->mutex);
6205
6206 //if (!ieee->wpa_enabled) {
6207 // err = -EOPNOTSUPP;
6208 // goto out;
6209 //}
6210
6211 if (wrqu->data.length) {
6212 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6213 if (buf == NULL) {
6214 err = -ENOMEM;
6215 goto out;
6216 }
6217
6218 memcpy(buf, extra, wrqu->data.length);
6219 kfree(ieee->wpa_ie);
6220 ieee->wpa_ie = buf;
6221 ieee->wpa_ie_len = wrqu->data.length;
6222 } else {
6223 kfree(ieee->wpa_ie);
6224 ieee->wpa_ie = NULL;
6225 ieee->wpa_ie_len = 0;
6226 }
6227
6228 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6229 out:
6230 //mutex_unlock(&priv->mutex);
6231 return err;
6232 }
6233
6234 /* SIOCGIWGENIE */
6235 static int ipw_wx_get_genie(struct net_device *dev,
6236 struct iw_request_info *info,
6237 union iwreq_data *wrqu, char *extra)
6238 {
6239 struct ipw_priv *priv = ieee80211_priv(dev);
6240 struct ieee80211_device *ieee = priv->ieee;
6241 int err = 0;
6242
6243 //mutex_lock(&priv->mutex);
6244
6245 //if (!ieee->wpa_enabled) {
6246 // err = -EOPNOTSUPP;
6247 // goto out;
6248 //}
6249
6250 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6251 wrqu->data.length = 0;
6252 goto out;
6253 }
6254
6255 if (wrqu->data.length < ieee->wpa_ie_len) {
6256 err = -E2BIG;
6257 goto out;
6258 }
6259
6260 wrqu->data.length = ieee->wpa_ie_len;
6261 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6262
6263 out:
6264 //mutex_unlock(&priv->mutex);
6265 return err;
6266 }
6267
6268 static int wext_cipher2level(int cipher)
6269 {
6270 switch (cipher) {
6271 case IW_AUTH_CIPHER_NONE:
6272 return SEC_LEVEL_0;
6273 case IW_AUTH_CIPHER_WEP40:
6274 case IW_AUTH_CIPHER_WEP104:
6275 return SEC_LEVEL_1;
6276 case IW_AUTH_CIPHER_TKIP:
6277 return SEC_LEVEL_2;
6278 case IW_AUTH_CIPHER_CCMP:
6279 return SEC_LEVEL_3;
6280 default:
6281 return -1;
6282 }
6283 }
6284
6285 /* SIOCSIWAUTH */
6286 static int ipw_wx_set_auth(struct net_device *dev,
6287 struct iw_request_info *info,
6288 union iwreq_data *wrqu, char *extra)
6289 {
6290 struct ipw_priv *priv = ieee80211_priv(dev);
6291 struct ieee80211_device *ieee = priv->ieee;
6292 struct iw_param *param = &wrqu->param;
6293 struct ieee80211_crypt_data *crypt;
6294 unsigned long flags;
6295 int ret = 0;
6296
6297 switch (param->flags & IW_AUTH_INDEX) {
6298 case IW_AUTH_WPA_VERSION:
6299 break;
6300 case IW_AUTH_CIPHER_PAIRWISE:
6301 ipw_set_hw_decrypt_unicast(priv,
6302 wext_cipher2level(param->value));
6303 break;
6304 case IW_AUTH_CIPHER_GROUP:
6305 ipw_set_hw_decrypt_multicast(priv,
6306 wext_cipher2level(param->value));
6307 break;
6308 case IW_AUTH_KEY_MGMT:
6309 /*
6310 * ipw2200 does not use these parameters
6311 */
6312 break;
6313
6314 case IW_AUTH_TKIP_COUNTERMEASURES:
6315 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6316 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6317 break;
6318
6319 flags = crypt->ops->get_flags(crypt->priv);
6320
6321 if (param->value)
6322 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6323 else
6324 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6325
6326 crypt->ops->set_flags(flags, crypt->priv);
6327
6328 break;
6329
6330 case IW_AUTH_DROP_UNENCRYPTED:{
6331 /* HACK:
6332 *
6333 * wpa_supplicant calls set_wpa_enabled when the driver
6334 * is loaded and unloaded, regardless of if WPA is being
6335 * used. No other calls are made which can be used to
6336 * determine if encryption will be used or not prior to
6337 * association being expected. If encryption is not being
6338 * used, drop_unencrypted is set to false, else true -- we
6339 * can use this to determine if the CAP_PRIVACY_ON bit should
6340 * be set.
6341 */
6342 struct ieee80211_security sec = {
6343 .flags = SEC_ENABLED,
6344 .enabled = param->value,
6345 };
6346 priv->ieee->drop_unencrypted = param->value;
6347 /* We only change SEC_LEVEL for open mode. Others
6348 * are set by ipw_wpa_set_encryption.
6349 */
6350 if (!param->value) {
6351 sec.flags |= SEC_LEVEL;
6352 sec.level = SEC_LEVEL_0;
6353 } else {
6354 sec.flags |= SEC_LEVEL;
6355 sec.level = SEC_LEVEL_1;
6356 }
6357 if (priv->ieee->set_security)
6358 priv->ieee->set_security(priv->ieee->dev, &sec);
6359 break;
6360 }
6361
6362 case IW_AUTH_80211_AUTH_ALG:
6363 ret = ipw_wpa_set_auth_algs(priv, param->value);
6364 break;
6365
6366 case IW_AUTH_WPA_ENABLED:
6367 ret = ipw_wpa_enable(priv, param->value);
6368 break;
6369
6370 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6371 ieee->ieee802_1x = param->value;
6372 break;
6373
6374 //case IW_AUTH_ROAMING_CONTROL:
6375 case IW_AUTH_PRIVACY_INVOKED:
6376 ieee->privacy_invoked = param->value;
6377 break;
6378
6379 default:
6380 return -EOPNOTSUPP;
6381 }
6382 return ret;
6383 }
6384
6385 /* SIOCGIWAUTH */
6386 static int ipw_wx_get_auth(struct net_device *dev,
6387 struct iw_request_info *info,
6388 union iwreq_data *wrqu, char *extra)
6389 {
6390 struct ipw_priv *priv = ieee80211_priv(dev);
6391 struct ieee80211_device *ieee = priv->ieee;
6392 struct ieee80211_crypt_data *crypt;
6393 struct iw_param *param = &wrqu->param;
6394 int ret = 0;
6395
6396 switch (param->flags & IW_AUTH_INDEX) {
6397 case IW_AUTH_WPA_VERSION:
6398 case IW_AUTH_CIPHER_PAIRWISE:
6399 case IW_AUTH_CIPHER_GROUP:
6400 case IW_AUTH_KEY_MGMT:
6401 /*
6402 * wpa_supplicant will control these internally
6403 */
6404 ret = -EOPNOTSUPP;
6405 break;
6406
6407 case IW_AUTH_TKIP_COUNTERMEASURES:
6408 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6409 if (!crypt || !crypt->ops->get_flags)
6410 break;
6411
6412 param->value = (crypt->ops->get_flags(crypt->priv) &
6413 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6414
6415 break;
6416
6417 case IW_AUTH_DROP_UNENCRYPTED:
6418 param->value = ieee->drop_unencrypted;
6419 break;
6420
6421 case IW_AUTH_80211_AUTH_ALG:
6422 param->value = ieee->sec.auth_mode;
6423 break;
6424
6425 case IW_AUTH_WPA_ENABLED:
6426 param->value = ieee->wpa_enabled;
6427 break;
6428
6429 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6430 param->value = ieee->ieee802_1x;
6431 break;
6432
6433 case IW_AUTH_ROAMING_CONTROL:
6434 case IW_AUTH_PRIVACY_INVOKED:
6435 param->value = ieee->privacy_invoked;
6436 break;
6437
6438 default:
6439 return -EOPNOTSUPP;
6440 }
6441 return 0;
6442 }
6443
6444 /* SIOCSIWENCODEEXT */
6445 static int ipw_wx_set_encodeext(struct net_device *dev,
6446 struct iw_request_info *info,
6447 union iwreq_data *wrqu, char *extra)
6448 {
6449 struct ipw_priv *priv = ieee80211_priv(dev);
6450 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6451
6452 if (hwcrypto) {
6453 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6454 /* IPW HW can't build TKIP MIC,
6455 host decryption still needed */
6456 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6457 priv->ieee->host_mc_decrypt = 1;
6458 else {
6459 priv->ieee->host_encrypt = 0;
6460 priv->ieee->host_encrypt_msdu = 1;
6461 priv->ieee->host_decrypt = 1;
6462 }
6463 } else {
6464 priv->ieee->host_encrypt = 0;
6465 priv->ieee->host_encrypt_msdu = 0;
6466 priv->ieee->host_decrypt = 0;
6467 priv->ieee->host_mc_decrypt = 0;
6468 }
6469 }
6470
6471 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6472 }
6473
6474 /* SIOCGIWENCODEEXT */
6475 static int ipw_wx_get_encodeext(struct net_device *dev,
6476 struct iw_request_info *info,
6477 union iwreq_data *wrqu, char *extra)
6478 {
6479 struct ipw_priv *priv = ieee80211_priv(dev);
6480 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6481 }
6482
6483 /* SIOCSIWMLME */
6484 static int ipw_wx_set_mlme(struct net_device *dev,
6485 struct iw_request_info *info,
6486 union iwreq_data *wrqu, char *extra)
6487 {
6488 struct ipw_priv *priv = ieee80211_priv(dev);
6489 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6490 u16 reason;
6491
6492 reason = cpu_to_le16(mlme->reason_code);
6493
6494 switch (mlme->cmd) {
6495 case IW_MLME_DEAUTH:
6496 // silently ignore
6497 break;
6498
6499 case IW_MLME_DISASSOC:
6500 ipw_disassociate(priv);
6501 break;
6502
6503 default:
6504 return -EOPNOTSUPP;
6505 }
6506 return 0;
6507 }
6508
6509 #ifdef CONFIG_IPW_QOS
6510
6511 /* QoS */
6512 /*
6513 * get the modulation type of the current network or
6514 * the card current mode
6515 */
6516 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6517 {
6518 u8 mode = 0;
6519
6520 if (priv->status & STATUS_ASSOCIATED) {
6521 unsigned long flags;
6522
6523 spin_lock_irqsave(&priv->ieee->lock, flags);
6524 mode = priv->assoc_network->mode;
6525 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6526 } else {
6527 mode = priv->ieee->mode;
6528 }
6529 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6530 return mode;
6531 }
6532
6533 /*
6534 * Handle management frame beacon and probe response
6535 */
6536 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6537 int active_network,
6538 struct ieee80211_network *network)
6539 {
6540 u32 size = sizeof(struct ieee80211_qos_parameters);
6541
6542 if (network->capability & WLAN_CAPABILITY_IBSS)
6543 network->qos_data.active = network->qos_data.supported;
6544
6545 if (network->flags & NETWORK_HAS_QOS_MASK) {
6546 if (active_network &&
6547 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6548 network->qos_data.active = network->qos_data.supported;
6549
6550 if ((network->qos_data.active == 1) && (active_network == 1) &&
6551 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6552 (network->qos_data.old_param_count !=
6553 network->qos_data.param_count)) {
6554 network->qos_data.old_param_count =
6555 network->qos_data.param_count;
6556 schedule_work(&priv->qos_activate);
6557 IPW_DEBUG_QOS("QoS parameters change call "
6558 "qos_activate\n");
6559 }
6560 } else {
6561 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6562 memcpy(&network->qos_data.parameters,
6563 &def_parameters_CCK, size);
6564 else
6565 memcpy(&network->qos_data.parameters,
6566 &def_parameters_OFDM, size);
6567
6568 if ((network->qos_data.active == 1) && (active_network == 1)) {
6569 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6570 schedule_work(&priv->qos_activate);
6571 }
6572
6573 network->qos_data.active = 0;
6574 network->qos_data.supported = 0;
6575 }
6576 if ((priv->status & STATUS_ASSOCIATED) &&
6577 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6578 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6579 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6580 !(network->flags & NETWORK_EMPTY_ESSID))
6581 if ((network->ssid_len ==
6582 priv->assoc_network->ssid_len) &&
6583 !memcmp(network->ssid,
6584 priv->assoc_network->ssid,
6585 network->ssid_len)) {
6586 queue_work(priv->workqueue,
6587 &priv->merge_networks);
6588 }
6589 }
6590
6591 return 0;
6592 }
6593
6594 /*
6595 * This function set up the firmware to support QoS. It sends
6596 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6597 */
6598 static int ipw_qos_activate(struct ipw_priv *priv,
6599 struct ieee80211_qos_data *qos_network_data)
6600 {
6601 int err;
6602 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6603 struct ieee80211_qos_parameters *active_one = NULL;
6604 u32 size = sizeof(struct ieee80211_qos_parameters);
6605 u32 burst_duration;
6606 int i;
6607 u8 type;
6608
6609 type = ipw_qos_current_mode(priv);
6610
6611 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6612 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6613 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6614 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6615
6616 if (qos_network_data == NULL) {
6617 if (type == IEEE_B) {
6618 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6619 active_one = &def_parameters_CCK;
6620 } else
6621 active_one = &def_parameters_OFDM;
6622
6623 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6624 burst_duration = ipw_qos_get_burst_duration(priv);
6625 for (i = 0; i < QOS_QUEUE_NUM; i++)
6626 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6627 (u16) burst_duration;
6628 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6629 if (type == IEEE_B) {
6630 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6631 type);
6632 if (priv->qos_data.qos_enable == 0)
6633 active_one = &def_parameters_CCK;
6634 else
6635 active_one = priv->qos_data.def_qos_parm_CCK;
6636 } else {
6637 if (priv->qos_data.qos_enable == 0)
6638 active_one = &def_parameters_OFDM;
6639 else
6640 active_one = priv->qos_data.def_qos_parm_OFDM;
6641 }
6642 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6643 } else {
6644 unsigned long flags;
6645 int active;
6646
6647 spin_lock_irqsave(&priv->ieee->lock, flags);
6648 active_one = &(qos_network_data->parameters);
6649 qos_network_data->old_param_count =
6650 qos_network_data->param_count;
6651 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6652 active = qos_network_data->supported;
6653 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6654
6655 if (active == 0) {
6656 burst_duration = ipw_qos_get_burst_duration(priv);
6657 for (i = 0; i < QOS_QUEUE_NUM; i++)
6658 qos_parameters[QOS_PARAM_SET_ACTIVE].
6659 tx_op_limit[i] = (u16) burst_duration;
6660 }
6661 }
6662
6663 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6664 err = ipw_send_qos_params_command(priv,
6665 (struct ieee80211_qos_parameters *)
6666 &(qos_parameters[0]));
6667 if (err)
6668 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6669
6670 return err;
6671 }
6672
6673 /*
6674 * send IPW_CMD_WME_INFO to the firmware
6675 */
6676 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6677 {
6678 int ret = 0;
6679 struct ieee80211_qos_information_element qos_info;
6680
6681 if (priv == NULL)
6682 return -1;
6683
6684 qos_info.elementID = QOS_ELEMENT_ID;
6685 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6686
6687 qos_info.version = QOS_VERSION_1;
6688 qos_info.ac_info = 0;
6689
6690 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6691 qos_info.qui_type = QOS_OUI_TYPE;
6692 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6693
6694 ret = ipw_send_qos_info_command(priv, &qos_info);
6695 if (ret != 0) {
6696 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6697 }
6698 return ret;
6699 }
6700
6701 /*
6702 * Set the QoS parameter with the association request structure
6703 */
6704 static int ipw_qos_association(struct ipw_priv *priv,
6705 struct ieee80211_network *network)
6706 {
6707 int err = 0;
6708 struct ieee80211_qos_data *qos_data = NULL;
6709 struct ieee80211_qos_data ibss_data = {
6710 .supported = 1,
6711 .active = 1,
6712 };
6713
6714 switch (priv->ieee->iw_mode) {
6715 case IW_MODE_ADHOC:
6716 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6717
6718 qos_data = &ibss_data;
6719 break;
6720
6721 case IW_MODE_INFRA:
6722 qos_data = &network->qos_data;
6723 break;
6724
6725 default:
6726 BUG();
6727 break;
6728 }
6729
6730 err = ipw_qos_activate(priv, qos_data);
6731 if (err) {
6732 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6733 return err;
6734 }
6735
6736 if (priv->qos_data.qos_enable && qos_data->supported) {
6737 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6738 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6739 return ipw_qos_set_info_element(priv);
6740 }
6741
6742 return 0;
6743 }
6744
6745 /*
6746 * handling the beaconing responces. if we get different QoS setting
6747 * of the network from the the associated setting adjust the QoS
6748 * setting
6749 */
6750 static int ipw_qos_association_resp(struct ipw_priv *priv,
6751 struct ieee80211_network *network)
6752 {
6753 int ret = 0;
6754 unsigned long flags;
6755 u32 size = sizeof(struct ieee80211_qos_parameters);
6756 int set_qos_param = 0;
6757
6758 if ((priv == NULL) || (network == NULL) ||
6759 (priv->assoc_network == NULL))
6760 return ret;
6761
6762 if (!(priv->status & STATUS_ASSOCIATED))
6763 return ret;
6764
6765 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6766 return ret;
6767
6768 spin_lock_irqsave(&priv->ieee->lock, flags);
6769 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6770 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6771 sizeof(struct ieee80211_qos_data));
6772 priv->assoc_network->qos_data.active = 1;
6773 if ((network->qos_data.old_param_count !=
6774 network->qos_data.param_count)) {
6775 set_qos_param = 1;
6776 network->qos_data.old_param_count =
6777 network->qos_data.param_count;
6778 }
6779
6780 } else {
6781 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6782 memcpy(&priv->assoc_network->qos_data.parameters,
6783 &def_parameters_CCK, size);
6784 else
6785 memcpy(&priv->assoc_network->qos_data.parameters,
6786 &def_parameters_OFDM, size);
6787 priv->assoc_network->qos_data.active = 0;
6788 priv->assoc_network->qos_data.supported = 0;
6789 set_qos_param = 1;
6790 }
6791
6792 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6793
6794 if (set_qos_param == 1)
6795 schedule_work(&priv->qos_activate);
6796
6797 return ret;
6798 }
6799
6800 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6801 {
6802 u32 ret = 0;
6803
6804 if ((priv == NULL))
6805 return 0;
6806
6807 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6808 ret = priv->qos_data.burst_duration_CCK;
6809 else
6810 ret = priv->qos_data.burst_duration_OFDM;
6811
6812 return ret;
6813 }
6814
6815 /*
6816 * Initialize the setting of QoS global
6817 */
6818 static void ipw_qos_init(struct ipw_priv *priv, int enable,
6819 int burst_enable, u32 burst_duration_CCK,
6820 u32 burst_duration_OFDM)
6821 {
6822 priv->qos_data.qos_enable = enable;
6823
6824 if (priv->qos_data.qos_enable) {
6825 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
6826 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
6827 IPW_DEBUG_QOS("QoS is enabled\n");
6828 } else {
6829 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
6830 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
6831 IPW_DEBUG_QOS("QoS is not enabled\n");
6832 }
6833
6834 priv->qos_data.burst_enable = burst_enable;
6835
6836 if (burst_enable) {
6837 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
6838 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
6839 } else {
6840 priv->qos_data.burst_duration_CCK = 0;
6841 priv->qos_data.burst_duration_OFDM = 0;
6842 }
6843 }
6844
6845 /*
6846 * map the packet priority to the right TX Queue
6847 */
6848 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
6849 {
6850 if (priority > 7 || !priv->qos_data.qos_enable)
6851 priority = 0;
6852
6853 return from_priority_to_tx_queue[priority] - 1;
6854 }
6855
6856 /*
6857 * add QoS parameter to the TX command
6858 */
6859 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
6860 u16 priority,
6861 struct tfd_data *tfd, u8 unicast)
6862 {
6863 int ret = 0;
6864 int tx_queue_id = 0;
6865 struct ieee80211_qos_data *qos_data = NULL;
6866 int active, supported;
6867 unsigned long flags;
6868
6869 if (!(priv->status & STATUS_ASSOCIATED))
6870 return 0;
6871
6872 qos_data = &priv->assoc_network->qos_data;
6873
6874 spin_lock_irqsave(&priv->ieee->lock, flags);
6875
6876 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6877 if (unicast == 0)
6878 qos_data->active = 0;
6879 else
6880 qos_data->active = qos_data->supported;
6881 }
6882
6883 active = qos_data->active;
6884 supported = qos_data->supported;
6885
6886 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6887
6888 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
6889 "unicast %d\n",
6890 priv->qos_data.qos_enable, active, supported, unicast);
6891 if (active && priv->qos_data.qos_enable) {
6892 ret = from_priority_to_tx_queue[priority];
6893 tx_queue_id = ret - 1;
6894 IPW_DEBUG_QOS("QoS packet priority is %d \n", priority);
6895 if (priority <= 7) {
6896 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
6897 tfd->tfd.tfd_26.mchdr.qos_ctrl = priority;
6898 tfd->tfd.tfd_26.mchdr.frame_ctl |=
6899 IEEE80211_STYPE_QOS_DATA;
6900
6901 if (priv->qos_data.qos_no_ack_mask &
6902 (1UL << tx_queue_id)) {
6903 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
6904 tfd->tfd.tfd_26.mchdr.qos_ctrl |=
6905 CTRL_QOS_NO_ACK;
6906 }
6907 }
6908 }
6909
6910 return ret;
6911 }
6912
6913 /*
6914 * background support to run QoS activate functionality
6915 */
6916 static void ipw_bg_qos_activate(void *data)
6917 {
6918 struct ipw_priv *priv = data;
6919
6920 if (priv == NULL)
6921 return;
6922
6923 mutex_lock(&priv->mutex);
6924
6925 if (priv->status & STATUS_ASSOCIATED)
6926 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
6927
6928 mutex_unlock(&priv->mutex);
6929 }
6930
6931 static int ipw_handle_probe_response(struct net_device *dev,
6932 struct ieee80211_probe_response *resp,
6933 struct ieee80211_network *network)
6934 {
6935 struct ipw_priv *priv = ieee80211_priv(dev);
6936 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
6937 (network == priv->assoc_network));
6938
6939 ipw_qos_handle_probe_response(priv, active_network, network);
6940
6941 return 0;
6942 }
6943
6944 static int ipw_handle_beacon(struct net_device *dev,
6945 struct ieee80211_beacon *resp,
6946 struct ieee80211_network *network)
6947 {
6948 struct ipw_priv *priv = ieee80211_priv(dev);
6949 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
6950 (network == priv->assoc_network));
6951
6952 ipw_qos_handle_probe_response(priv, active_network, network);
6953
6954 return 0;
6955 }
6956
6957 static int ipw_handle_assoc_response(struct net_device *dev,
6958 struct ieee80211_assoc_response *resp,
6959 struct ieee80211_network *network)
6960 {
6961 struct ipw_priv *priv = ieee80211_priv(dev);
6962 ipw_qos_association_resp(priv, network);
6963 return 0;
6964 }
6965
6966 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
6967 *qos_param)
6968 {
6969 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
6970 sizeof(*qos_param) * 3, qos_param);
6971 }
6972
6973 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
6974 *qos_param)
6975 {
6976 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
6977 qos_param);
6978 }
6979
6980 #endif /* CONFIG_IPW_QOS */
6981
6982 static int ipw_associate_network(struct ipw_priv *priv,
6983 struct ieee80211_network *network,
6984 struct ipw_supported_rates *rates, int roaming)
6985 {
6986 int err;
6987
6988 if (priv->config & CFG_FIXED_RATE)
6989 ipw_set_fixed_rate(priv, network->mode);
6990
6991 if (!(priv->config & CFG_STATIC_ESSID)) {
6992 priv->essid_len = min(network->ssid_len,
6993 (u8) IW_ESSID_MAX_SIZE);
6994 memcpy(priv->essid, network->ssid, priv->essid_len);
6995 }
6996
6997 network->last_associate = jiffies;
6998
6999 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7000 priv->assoc_request.channel = network->channel;
7001 priv->assoc_request.auth_key = 0;
7002
7003 if ((priv->capability & CAP_PRIVACY_ON) &&
7004 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7005 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7006 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7007
7008 if (priv->ieee->sec.level == SEC_LEVEL_1)
7009 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7010
7011 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7012 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7013 priv->assoc_request.auth_type = AUTH_LEAP;
7014 else
7015 priv->assoc_request.auth_type = AUTH_OPEN;
7016
7017 if (priv->ieee->wpa_ie_len) {
7018 priv->assoc_request.policy_support = 0x02; /* RSN active */
7019 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7020 priv->ieee->wpa_ie_len);
7021 }
7022
7023 /*
7024 * It is valid for our ieee device to support multiple modes, but
7025 * when it comes to associating to a given network we have to choose
7026 * just one mode.
7027 */
7028 if (network->mode & priv->ieee->mode & IEEE_A)
7029 priv->assoc_request.ieee_mode = IPW_A_MODE;
7030 else if (network->mode & priv->ieee->mode & IEEE_G)
7031 priv->assoc_request.ieee_mode = IPW_G_MODE;
7032 else if (network->mode & priv->ieee->mode & IEEE_B)
7033 priv->assoc_request.ieee_mode = IPW_B_MODE;
7034
7035 priv->assoc_request.capability = network->capability;
7036 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7037 && !(priv->config & CFG_PREAMBLE_LONG)) {
7038 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7039 } else {
7040 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7041
7042 /* Clear the short preamble if we won't be supporting it */
7043 priv->assoc_request.capability &=
7044 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7045 }
7046
7047 /* Clear capability bits that aren't used in Ad Hoc */
7048 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7049 priv->assoc_request.capability &=
7050 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7051
7052 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7053 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7054 roaming ? "Rea" : "A",
7055 escape_essid(priv->essid, priv->essid_len),
7056 network->channel,
7057 ipw_modes[priv->assoc_request.ieee_mode],
7058 rates->num_rates,
7059 (priv->assoc_request.preamble_length ==
7060 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7061 network->capability &
7062 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7063 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7064 priv->capability & CAP_PRIVACY_ON ?
7065 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7066 "(open)") : "",
7067 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7068 priv->capability & CAP_PRIVACY_ON ?
7069 '1' + priv->ieee->sec.active_key : '.',
7070 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7071
7072 priv->assoc_request.beacon_interval = network->beacon_interval;
7073 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7074 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7075 priv->assoc_request.assoc_type = HC_IBSS_START;
7076 priv->assoc_request.assoc_tsf_msw = 0;
7077 priv->assoc_request.assoc_tsf_lsw = 0;
7078 } else {
7079 if (unlikely(roaming))
7080 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7081 else
7082 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7083 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7084 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7085 }
7086
7087 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7088
7089 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7090 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7091 priv->assoc_request.atim_window = network->atim_window;
7092 } else {
7093 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7094 priv->assoc_request.atim_window = 0;
7095 }
7096
7097 priv->assoc_request.listen_interval = network->listen_interval;
7098
7099 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7100 if (err) {
7101 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7102 return err;
7103 }
7104
7105 rates->ieee_mode = priv->assoc_request.ieee_mode;
7106 rates->purpose = IPW_RATE_CONNECT;
7107 ipw_send_supported_rates(priv, rates);
7108
7109 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7110 priv->sys_config.dot11g_auto_detection = 1;
7111 else
7112 priv->sys_config.dot11g_auto_detection = 0;
7113
7114 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7115 priv->sys_config.answer_broadcast_ssid_probe = 1;
7116 else
7117 priv->sys_config.answer_broadcast_ssid_probe = 0;
7118
7119 err = ipw_send_system_config(priv, &priv->sys_config);
7120 if (err) {
7121 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7122 return err;
7123 }
7124
7125 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7126 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7127 if (err) {
7128 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7129 return err;
7130 }
7131
7132 /*
7133 * If preemption is enabled, it is possible for the association
7134 * to complete before we return from ipw_send_associate. Therefore
7135 * we have to be sure and update our priviate data first.
7136 */
7137 priv->channel = network->channel;
7138 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7139 priv->status |= STATUS_ASSOCIATING;
7140 priv->status &= ~STATUS_SECURITY_UPDATED;
7141
7142 priv->assoc_network = network;
7143
7144 #ifdef CONFIG_IPW_QOS
7145 ipw_qos_association(priv, network);
7146 #endif
7147
7148 err = ipw_send_associate(priv, &priv->assoc_request);
7149 if (err) {
7150 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7151 return err;
7152 }
7153
7154 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7155 escape_essid(priv->essid, priv->essid_len),
7156 MAC_ARG(priv->bssid));
7157
7158 return 0;
7159 }
7160
7161 static void ipw_roam(void *data)
7162 {
7163 struct ipw_priv *priv = data;
7164 struct ieee80211_network *network = NULL;
7165 struct ipw_network_match match = {
7166 .network = priv->assoc_network
7167 };
7168
7169 /* The roaming process is as follows:
7170 *
7171 * 1. Missed beacon threshold triggers the roaming process by
7172 * setting the status ROAM bit and requesting a scan.
7173 * 2. When the scan completes, it schedules the ROAM work
7174 * 3. The ROAM work looks at all of the known networks for one that
7175 * is a better network than the currently associated. If none
7176 * found, the ROAM process is over (ROAM bit cleared)
7177 * 4. If a better network is found, a disassociation request is
7178 * sent.
7179 * 5. When the disassociation completes, the roam work is again
7180 * scheduled. The second time through, the driver is no longer
7181 * associated, and the newly selected network is sent an
7182 * association request.
7183 * 6. At this point ,the roaming process is complete and the ROAM
7184 * status bit is cleared.
7185 */
7186
7187 /* If we are no longer associated, and the roaming bit is no longer
7188 * set, then we are not actively roaming, so just return */
7189 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7190 return;
7191
7192 if (priv->status & STATUS_ASSOCIATED) {
7193 /* First pass through ROAM process -- look for a better
7194 * network */
7195 unsigned long flags;
7196 u8 rssi = priv->assoc_network->stats.rssi;
7197 priv->assoc_network->stats.rssi = -128;
7198 spin_lock_irqsave(&priv->ieee->lock, flags);
7199 list_for_each_entry(network, &priv->ieee->network_list, list) {
7200 if (network != priv->assoc_network)
7201 ipw_best_network(priv, &match, network, 1);
7202 }
7203 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7204 priv->assoc_network->stats.rssi = rssi;
7205
7206 if (match.network == priv->assoc_network) {
7207 IPW_DEBUG_ASSOC("No better APs in this network to "
7208 "roam to.\n");
7209 priv->status &= ~STATUS_ROAMING;
7210 ipw_debug_config(priv);
7211 return;
7212 }
7213
7214 ipw_send_disassociate(priv, 1);
7215 priv->assoc_network = match.network;
7216
7217 return;
7218 }
7219
7220 /* Second pass through ROAM process -- request association */
7221 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7222 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7223 priv->status &= ~STATUS_ROAMING;
7224 }
7225
7226 static void ipw_bg_roam(void *data)
7227 {
7228 struct ipw_priv *priv = data;
7229 mutex_lock(&priv->mutex);
7230 ipw_roam(data);
7231 mutex_unlock(&priv->mutex);
7232 }
7233
7234 static int ipw_associate(void *data)
7235 {
7236 struct ipw_priv *priv = data;
7237
7238 struct ieee80211_network *network = NULL;
7239 struct ipw_network_match match = {
7240 .network = NULL
7241 };
7242 struct ipw_supported_rates *rates;
7243 struct list_head *element;
7244 unsigned long flags;
7245
7246 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7247 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7248 return 0;
7249 }
7250
7251 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7252 IPW_DEBUG_ASSOC("Not attempting association (already in "
7253 "progress)\n");
7254 return 0;
7255 }
7256
7257 if (priv->status & STATUS_DISASSOCIATING) {
7258 IPW_DEBUG_ASSOC("Not attempting association (in "
7259 "disassociating)\n ");
7260 queue_work(priv->workqueue, &priv->associate);
7261 return 0;
7262 }
7263
7264 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7265 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7266 "initialized)\n");
7267 return 0;
7268 }
7269
7270 if (!(priv->config & CFG_ASSOCIATE) &&
7271 !(priv->config & (CFG_STATIC_ESSID |
7272 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7273 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7274 return 0;
7275 }
7276
7277 /* Protect our use of the network_list */
7278 spin_lock_irqsave(&priv->ieee->lock, flags);
7279 list_for_each_entry(network, &priv->ieee->network_list, list)
7280 ipw_best_network(priv, &match, network, 0);
7281
7282 network = match.network;
7283 rates = &match.rates;
7284
7285 if (network == NULL &&
7286 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7287 priv->config & CFG_ADHOC_CREATE &&
7288 priv->config & CFG_STATIC_ESSID &&
7289 priv->config & CFG_STATIC_CHANNEL &&
7290 !list_empty(&priv->ieee->network_free_list)) {
7291 element = priv->ieee->network_free_list.next;
7292 network = list_entry(element, struct ieee80211_network, list);
7293 ipw_adhoc_create(priv, network);
7294 rates = &priv->rates;
7295 list_del(element);
7296 list_add_tail(&network->list, &priv->ieee->network_list);
7297 }
7298 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7299
7300 /* If we reached the end of the list, then we don't have any valid
7301 * matching APs */
7302 if (!network) {
7303 ipw_debug_config(priv);
7304
7305 if (!(priv->status & STATUS_SCANNING)) {
7306 if (!(priv->config & CFG_SPEED_SCAN))
7307 queue_delayed_work(priv->workqueue,
7308 &priv->request_scan,
7309 SCAN_INTERVAL);
7310 else
7311 queue_work(priv->workqueue,
7312 &priv->request_scan);
7313 }
7314
7315 return 0;
7316 }
7317
7318 ipw_associate_network(priv, network, rates, 0);
7319
7320 return 1;
7321 }
7322
7323 static void ipw_bg_associate(void *data)
7324 {
7325 struct ipw_priv *priv = data;
7326 mutex_lock(&priv->mutex);
7327 ipw_associate(data);
7328 mutex_unlock(&priv->mutex);
7329 }
7330
7331 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7332 struct sk_buff *skb)
7333 {
7334 struct ieee80211_hdr *hdr;
7335 u16 fc;
7336
7337 hdr = (struct ieee80211_hdr *)skb->data;
7338 fc = le16_to_cpu(hdr->frame_ctl);
7339 if (!(fc & IEEE80211_FCTL_PROTECTED))
7340 return;
7341
7342 fc &= ~IEEE80211_FCTL_PROTECTED;
7343 hdr->frame_ctl = cpu_to_le16(fc);
7344 switch (priv->ieee->sec.level) {
7345 case SEC_LEVEL_3:
7346 /* Remove CCMP HDR */
7347 memmove(skb->data + IEEE80211_3ADDR_LEN,
7348 skb->data + IEEE80211_3ADDR_LEN + 8,
7349 skb->len - IEEE80211_3ADDR_LEN - 8);
7350 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7351 break;
7352 case SEC_LEVEL_2:
7353 break;
7354 case SEC_LEVEL_1:
7355 /* Remove IV */
7356 memmove(skb->data + IEEE80211_3ADDR_LEN,
7357 skb->data + IEEE80211_3ADDR_LEN + 4,
7358 skb->len - IEEE80211_3ADDR_LEN - 4);
7359 skb_trim(skb, skb->len - 8); /* IV + ICV */
7360 break;
7361 case SEC_LEVEL_0:
7362 break;
7363 default:
7364 printk(KERN_ERR "Unknow security level %d\n",
7365 priv->ieee->sec.level);
7366 break;
7367 }
7368 }
7369
7370 static void ipw_handle_data_packet(struct ipw_priv *priv,
7371 struct ipw_rx_mem_buffer *rxb,
7372 struct ieee80211_rx_stats *stats)
7373 {
7374 struct ieee80211_hdr_4addr *hdr;
7375 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7376
7377 /* We received data from the HW, so stop the watchdog */
7378 priv->net_dev->trans_start = jiffies;
7379
7380 /* We only process data packets if the
7381 * interface is open */
7382 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7383 skb_tailroom(rxb->skb))) {
7384 priv->ieee->stats.rx_errors++;
7385 priv->wstats.discard.misc++;
7386 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7387 return;
7388 } else if (unlikely(!netif_running(priv->net_dev))) {
7389 priv->ieee->stats.rx_dropped++;
7390 priv->wstats.discard.misc++;
7391 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7392 return;
7393 }
7394
7395 /* Advance skb->data to the start of the actual payload */
7396 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7397
7398 /* Set the size of the skb to the size of the frame */
7399 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7400
7401 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7402
7403 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7404 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7405 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7406 (is_multicast_ether_addr(hdr->addr1) ?
7407 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7408 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7409
7410 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7411 priv->ieee->stats.rx_errors++;
7412 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7413 rxb->skb = NULL;
7414 __ipw_led_activity_on(priv);
7415 }
7416 }
7417
7418 #ifdef CONFIG_IEEE80211_RADIOTAP
7419 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7420 struct ipw_rx_mem_buffer *rxb,
7421 struct ieee80211_rx_stats *stats)
7422 {
7423 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7424 struct ipw_rx_frame *frame = &pkt->u.frame;
7425
7426 /* initial pull of some data */
7427 u16 received_channel = frame->received_channel;
7428 u8 antennaAndPhy = frame->antennaAndPhy;
7429 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7430 u16 pktrate = frame->rate;
7431
7432 /* Magic struct that slots into the radiotap header -- no reason
7433 * to build this manually element by element, we can write it much
7434 * more efficiently than we can parse it. ORDER MATTERS HERE */
7435 struct ipw_rt_hdr {
7436 struct ieee80211_radiotap_header rt_hdr;
7437 u8 rt_flags; /* radiotap packet flags */
7438 u8 rt_rate; /* rate in 500kb/s */
7439 u16 rt_channel; /* channel in mhz */
7440 u16 rt_chbitmask; /* channel bitfield */
7441 s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
7442 u8 rt_antenna; /* antenna number */
7443 } *ipw_rt;
7444
7445 short len = le16_to_cpu(pkt->u.frame.length);
7446
7447 /* We received data from the HW, so stop the watchdog */
7448 priv->net_dev->trans_start = jiffies;
7449
7450 /* We only process data packets if the
7451 * interface is open */
7452 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7453 skb_tailroom(rxb->skb))) {
7454 priv->ieee->stats.rx_errors++;
7455 priv->wstats.discard.misc++;
7456 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7457 return;
7458 } else if (unlikely(!netif_running(priv->net_dev))) {
7459 priv->ieee->stats.rx_dropped++;
7460 priv->wstats.discard.misc++;
7461 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7462 return;
7463 }
7464
7465 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7466 * that now */
7467 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7468 /* FIXME: Should alloc bigger skb instead */
7469 priv->ieee->stats.rx_dropped++;
7470 priv->wstats.discard.misc++;
7471 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7472 return;
7473 }
7474
7475 /* copy the frame itself */
7476 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7477 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7478
7479 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7480 * part of our real header, saves a little time.
7481 *
7482 * No longer necessary since we fill in all our data. Purge before merging
7483 * patch officially.
7484 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7485 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7486 */
7487
7488 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7489
7490 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7491 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7492 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7493
7494 /* Big bitfield of all the fields we provide in radiotap */
7495 ipw_rt->rt_hdr.it_present =
7496 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7497 (1 << IEEE80211_RADIOTAP_RATE) |
7498 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7499 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7500 (1 << IEEE80211_RADIOTAP_ANTENNA));
7501
7502 /* Zero the flags, we'll add to them as we go */
7503 ipw_rt->rt_flags = 0;
7504
7505 /* Convert signal to DBM */
7506 ipw_rt->rt_dbmsignal = antsignal;
7507
7508 /* Convert the channel data and set the flags */
7509 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7510 if (received_channel > 14) { /* 802.11a */
7511 ipw_rt->rt_chbitmask =
7512 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7513 } else if (antennaAndPhy & 32) { /* 802.11b */
7514 ipw_rt->rt_chbitmask =
7515 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7516 } else { /* 802.11g */
7517 ipw_rt->rt_chbitmask =
7518 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7519 }
7520
7521 /* set the rate in multiples of 500k/s */
7522 switch (pktrate) {
7523 case IPW_TX_RATE_1MB:
7524 ipw_rt->rt_rate = 2;
7525 break;
7526 case IPW_TX_RATE_2MB:
7527 ipw_rt->rt_rate = 4;
7528 break;
7529 case IPW_TX_RATE_5MB:
7530 ipw_rt->rt_rate = 10;
7531 break;
7532 case IPW_TX_RATE_6MB:
7533 ipw_rt->rt_rate = 12;
7534 break;
7535 case IPW_TX_RATE_9MB:
7536 ipw_rt->rt_rate = 18;
7537 break;
7538 case IPW_TX_RATE_11MB:
7539 ipw_rt->rt_rate = 22;
7540 break;
7541 case IPW_TX_RATE_12MB:
7542 ipw_rt->rt_rate = 24;
7543 break;
7544 case IPW_TX_RATE_18MB:
7545 ipw_rt->rt_rate = 36;
7546 break;
7547 case IPW_TX_RATE_24MB:
7548 ipw_rt->rt_rate = 48;
7549 break;
7550 case IPW_TX_RATE_36MB:
7551 ipw_rt->rt_rate = 72;
7552 break;
7553 case IPW_TX_RATE_48MB:
7554 ipw_rt->rt_rate = 96;
7555 break;
7556 case IPW_TX_RATE_54MB:
7557 ipw_rt->rt_rate = 108;
7558 break;
7559 default:
7560 ipw_rt->rt_rate = 0;
7561 break;
7562 }
7563
7564 /* antenna number */
7565 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7566
7567 /* set the preamble flag if we have it */
7568 if ((antennaAndPhy & 64))
7569 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7570
7571 /* Set the size of the skb to the size of the frame */
7572 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7573
7574 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7575
7576 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7577 priv->ieee->stats.rx_errors++;
7578 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7579 rxb->skb = NULL;
7580 /* no LED during capture */
7581 }
7582 }
7583 #endif
7584
7585 static int is_network_packet(struct ipw_priv *priv,
7586 struct ieee80211_hdr_4addr *header)
7587 {
7588 /* Filter incoming packets to determine if they are targetted toward
7589 * this network, discarding packets coming from ourselves */
7590 switch (priv->ieee->iw_mode) {
7591 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7592 /* packets from our adapter are dropped (echo) */
7593 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7594 return 0;
7595
7596 /* {broad,multi}cast packets to our BSSID go through */
7597 if (is_multicast_ether_addr(header->addr1))
7598 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7599
7600 /* packets to our adapter go through */
7601 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7602 ETH_ALEN);
7603
7604 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7605 /* packets from our adapter are dropped (echo) */
7606 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7607 return 0;
7608
7609 /* {broad,multi}cast packets to our BSS go through */
7610 if (is_multicast_ether_addr(header->addr1))
7611 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7612
7613 /* packets to our adapter go through */
7614 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7615 ETH_ALEN);
7616 }
7617
7618 return 1;
7619 }
7620
7621 #define IPW_PACKET_RETRY_TIME HZ
7622
7623 static int is_duplicate_packet(struct ipw_priv *priv,
7624 struct ieee80211_hdr_4addr *header)
7625 {
7626 u16 sc = le16_to_cpu(header->seq_ctl);
7627 u16 seq = WLAN_GET_SEQ_SEQ(sc);
7628 u16 frag = WLAN_GET_SEQ_FRAG(sc);
7629 u16 *last_seq, *last_frag;
7630 unsigned long *last_time;
7631
7632 switch (priv->ieee->iw_mode) {
7633 case IW_MODE_ADHOC:
7634 {
7635 struct list_head *p;
7636 struct ipw_ibss_seq *entry = NULL;
7637 u8 *mac = header->addr2;
7638 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
7639
7640 __list_for_each(p, &priv->ibss_mac_hash[index]) {
7641 entry =
7642 list_entry(p, struct ipw_ibss_seq, list);
7643 if (!memcmp(entry->mac, mac, ETH_ALEN))
7644 break;
7645 }
7646 if (p == &priv->ibss_mac_hash[index]) {
7647 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
7648 if (!entry) {
7649 IPW_ERROR
7650 ("Cannot malloc new mac entry\n");
7651 return 0;
7652 }
7653 memcpy(entry->mac, mac, ETH_ALEN);
7654 entry->seq_num = seq;
7655 entry->frag_num = frag;
7656 entry->packet_time = jiffies;
7657 list_add(&entry->list,
7658 &priv->ibss_mac_hash[index]);
7659 return 0;
7660 }
7661 last_seq = &entry->seq_num;
7662 last_frag = &entry->frag_num;
7663 last_time = &entry->packet_time;
7664 break;
7665 }
7666 case IW_MODE_INFRA:
7667 last_seq = &priv->last_seq_num;
7668 last_frag = &priv->last_frag_num;
7669 last_time = &priv->last_packet_time;
7670 break;
7671 default:
7672 return 0;
7673 }
7674 if ((*last_seq == seq) &&
7675 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
7676 if (*last_frag == frag)
7677 goto drop;
7678 if (*last_frag + 1 != frag)
7679 /* out-of-order fragment */
7680 goto drop;
7681 } else
7682 *last_seq = seq;
7683
7684 *last_frag = frag;
7685 *last_time = jiffies;
7686 return 0;
7687
7688 drop:
7689 /* Comment this line now since we observed the card receives
7690 * duplicate packets but the FCTL_RETRY bit is not set in the
7691 * IBSS mode with fragmentation enabled.
7692 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
7693 return 1;
7694 }
7695
7696 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
7697 struct ipw_rx_mem_buffer *rxb,
7698 struct ieee80211_rx_stats *stats)
7699 {
7700 struct sk_buff *skb = rxb->skb;
7701 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
7702 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
7703 (skb->data + IPW_RX_FRAME_SIZE);
7704
7705 ieee80211_rx_mgt(priv->ieee, header, stats);
7706
7707 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
7708 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7709 IEEE80211_STYPE_PROBE_RESP) ||
7710 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7711 IEEE80211_STYPE_BEACON))) {
7712 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
7713 ipw_add_station(priv, header->addr2);
7714 }
7715
7716 if (priv->config & CFG_NET_STATS) {
7717 IPW_DEBUG_HC("sending stat packet\n");
7718
7719 /* Set the size of the skb to the size of the full
7720 * ipw header and 802.11 frame */
7721 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
7722 IPW_RX_FRAME_SIZE);
7723
7724 /* Advance past the ipw packet header to the 802.11 frame */
7725 skb_pull(skb, IPW_RX_FRAME_SIZE);
7726
7727 /* Push the ieee80211_rx_stats before the 802.11 frame */
7728 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
7729
7730 skb->dev = priv->ieee->dev;
7731
7732 /* Point raw at the ieee80211_stats */
7733 skb->mac.raw = skb->data;
7734
7735 skb->pkt_type = PACKET_OTHERHOST;
7736 skb->protocol = __constant_htons(ETH_P_80211_STATS);
7737 memset(skb->cb, 0, sizeof(rxb->skb->cb));
7738 netif_rx(skb);
7739 rxb->skb = NULL;
7740 }
7741 }
7742
7743 /*
7744 * Main entry function for recieving a packet with 80211 headers. This
7745 * should be called when ever the FW has notified us that there is a new
7746 * skb in the recieve queue.
7747 */
7748 static void ipw_rx(struct ipw_priv *priv)
7749 {
7750 struct ipw_rx_mem_buffer *rxb;
7751 struct ipw_rx_packet *pkt;
7752 struct ieee80211_hdr_4addr *header;
7753 u32 r, w, i;
7754 u8 network_packet;
7755
7756 r = ipw_read32(priv, IPW_RX_READ_INDEX);
7757 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
7758 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
7759
7760 while (i != r) {
7761 rxb = priv->rxq->queue[i];
7762 if (unlikely(rxb == NULL)) {
7763 printk(KERN_CRIT "Queue not allocated!\n");
7764 break;
7765 }
7766 priv->rxq->queue[i] = NULL;
7767
7768 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
7769 IPW_RX_BUF_SIZE,
7770 PCI_DMA_FROMDEVICE);
7771
7772 pkt = (struct ipw_rx_packet *)rxb->skb->data;
7773 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
7774 pkt->header.message_type,
7775 pkt->header.rx_seq_num, pkt->header.control_bits);
7776
7777 switch (pkt->header.message_type) {
7778 case RX_FRAME_TYPE: /* 802.11 frame */ {
7779 struct ieee80211_rx_stats stats = {
7780 .rssi =
7781 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7782 IPW_RSSI_TO_DBM,
7783 .signal =
7784 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7785 IPW_RSSI_TO_DBM + 0x100,
7786 .noise =
7787 le16_to_cpu(pkt->u.frame.noise),
7788 .rate = pkt->u.frame.rate,
7789 .mac_time = jiffies,
7790 .received_channel =
7791 pkt->u.frame.received_channel,
7792 .freq =
7793 (pkt->u.frame.
7794 control & (1 << 0)) ?
7795 IEEE80211_24GHZ_BAND :
7796 IEEE80211_52GHZ_BAND,
7797 .len = le16_to_cpu(pkt->u.frame.length),
7798 };
7799
7800 if (stats.rssi != 0)
7801 stats.mask |= IEEE80211_STATMASK_RSSI;
7802 if (stats.signal != 0)
7803 stats.mask |= IEEE80211_STATMASK_SIGNAL;
7804 if (stats.noise != 0)
7805 stats.mask |= IEEE80211_STATMASK_NOISE;
7806 if (stats.rate != 0)
7807 stats.mask |= IEEE80211_STATMASK_RATE;
7808
7809 priv->rx_packets++;
7810
7811 #ifdef CONFIG_IPW2200_MONITOR
7812 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7813 #ifdef CONFIG_IEEE80211_RADIOTAP
7814 ipw_handle_data_packet_monitor(priv,
7815 rxb,
7816 &stats);
7817 #else
7818 ipw_handle_data_packet(priv, rxb,
7819 &stats);
7820 #endif
7821 break;
7822 }
7823 #endif
7824
7825 header =
7826 (struct ieee80211_hdr_4addr *)(rxb->skb->
7827 data +
7828 IPW_RX_FRAME_SIZE);
7829 /* TODO: Check Ad-Hoc dest/source and make sure
7830 * that we are actually parsing these packets
7831 * correctly -- we should probably use the
7832 * frame control of the packet and disregard
7833 * the current iw_mode */
7834
7835 network_packet =
7836 is_network_packet(priv, header);
7837 if (network_packet && priv->assoc_network) {
7838 priv->assoc_network->stats.rssi =
7839 stats.rssi;
7840 average_add(&priv->average_rssi,
7841 stats.rssi);
7842 priv->last_rx_rssi = stats.rssi;
7843 }
7844
7845 IPW_DEBUG_RX("Frame: len=%u\n",
7846 le16_to_cpu(pkt->u.frame.length));
7847
7848 if (le16_to_cpu(pkt->u.frame.length) <
7849 ieee80211_get_hdrlen(le16_to_cpu(
7850 header->frame_ctl))) {
7851 IPW_DEBUG_DROP
7852 ("Received packet is too small. "
7853 "Dropping.\n");
7854 priv->ieee->stats.rx_errors++;
7855 priv->wstats.discard.misc++;
7856 break;
7857 }
7858
7859 switch (WLAN_FC_GET_TYPE
7860 (le16_to_cpu(header->frame_ctl))) {
7861
7862 case IEEE80211_FTYPE_MGMT:
7863 ipw_handle_mgmt_packet(priv, rxb,
7864 &stats);
7865 break;
7866
7867 case IEEE80211_FTYPE_CTL:
7868 break;
7869
7870 case IEEE80211_FTYPE_DATA:
7871 if (unlikely(!network_packet ||
7872 is_duplicate_packet(priv,
7873 header)))
7874 {
7875 IPW_DEBUG_DROP("Dropping: "
7876 MAC_FMT ", "
7877 MAC_FMT ", "
7878 MAC_FMT "\n",
7879 MAC_ARG(header->
7880 addr1),
7881 MAC_ARG(header->
7882 addr2),
7883 MAC_ARG(header->
7884 addr3));
7885 break;
7886 }
7887
7888 ipw_handle_data_packet(priv, rxb,
7889 &stats);
7890
7891 break;
7892 }
7893 break;
7894 }
7895
7896 case RX_HOST_NOTIFICATION_TYPE:{
7897 IPW_DEBUG_RX
7898 ("Notification: subtype=%02X flags=%02X size=%d\n",
7899 pkt->u.notification.subtype,
7900 pkt->u.notification.flags,
7901 pkt->u.notification.size);
7902 ipw_rx_notification(priv, &pkt->u.notification);
7903 break;
7904 }
7905
7906 default:
7907 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
7908 pkt->header.message_type);
7909 break;
7910 }
7911
7912 /* For now we just don't re-use anything. We can tweak this
7913 * later to try and re-use notification packets and SKBs that
7914 * fail to Rx correctly */
7915 if (rxb->skb != NULL) {
7916 dev_kfree_skb_any(rxb->skb);
7917 rxb->skb = NULL;
7918 }
7919
7920 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
7921 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
7922 list_add_tail(&rxb->list, &priv->rxq->rx_used);
7923
7924 i = (i + 1) % RX_QUEUE_SIZE;
7925 }
7926
7927 /* Backtrack one entry */
7928 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
7929
7930 ipw_rx_queue_restock(priv);
7931 }
7932
7933 #define DEFAULT_RTS_THRESHOLD 2304U
7934 #define MIN_RTS_THRESHOLD 1U
7935 #define MAX_RTS_THRESHOLD 2304U
7936 #define DEFAULT_BEACON_INTERVAL 100U
7937 #define DEFAULT_SHORT_RETRY_LIMIT 7U
7938 #define DEFAULT_LONG_RETRY_LIMIT 4U
7939
7940 /**
7941 * ipw_sw_reset
7942 * @option: options to control different reset behaviour
7943 * 0 = reset everything except the 'disable' module_param
7944 * 1 = reset everything and print out driver info (for probe only)
7945 * 2 = reset everything
7946 */
7947 static int ipw_sw_reset(struct ipw_priv *priv, int option)
7948 {
7949 int band, modulation;
7950 int old_mode = priv->ieee->iw_mode;
7951
7952 /* Initialize module parameter values here */
7953 priv->config = 0;
7954
7955 /* We default to disabling the LED code as right now it causes
7956 * too many systems to lock up... */
7957 if (!led)
7958 priv->config |= CFG_NO_LED;
7959
7960 if (associate)
7961 priv->config |= CFG_ASSOCIATE;
7962 else
7963 IPW_DEBUG_INFO("Auto associate disabled.\n");
7964
7965 if (auto_create)
7966 priv->config |= CFG_ADHOC_CREATE;
7967 else
7968 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
7969
7970 priv->config &= ~CFG_STATIC_ESSID;
7971 priv->essid_len = 0;
7972 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7973
7974 if (disable && option) {
7975 priv->status |= STATUS_RF_KILL_SW;
7976 IPW_DEBUG_INFO("Radio disabled.\n");
7977 }
7978
7979 if (channel != 0) {
7980 priv->config |= CFG_STATIC_CHANNEL;
7981 priv->channel = channel;
7982 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7983 /* TODO: Validate that provided channel is in range */
7984 }
7985 #ifdef CONFIG_IPW_QOS
7986 ipw_qos_init(priv, qos_enable, qos_burst_enable,
7987 burst_duration_CCK, burst_duration_OFDM);
7988 #endif /* CONFIG_IPW_QOS */
7989
7990 switch (mode) {
7991 case 1:
7992 priv->ieee->iw_mode = IW_MODE_ADHOC;
7993 priv->net_dev->type = ARPHRD_ETHER;
7994
7995 break;
7996 #ifdef CONFIG_IPW2200_MONITOR
7997 case 2:
7998 priv->ieee->iw_mode = IW_MODE_MONITOR;
7999 #ifdef CONFIG_IEEE80211_RADIOTAP
8000 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8001 #else
8002 priv->net_dev->type = ARPHRD_IEEE80211;
8003 #endif
8004 break;
8005 #endif
8006 default:
8007 case 0:
8008 priv->net_dev->type = ARPHRD_ETHER;
8009 priv->ieee->iw_mode = IW_MODE_INFRA;
8010 break;
8011 }
8012
8013 if (hwcrypto) {
8014 priv->ieee->host_encrypt = 0;
8015 priv->ieee->host_encrypt_msdu = 0;
8016 priv->ieee->host_decrypt = 0;
8017 priv->ieee->host_mc_decrypt = 0;
8018 }
8019 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8020
8021 /* IPW2200/2915 is abled to do hardware fragmentation. */
8022 priv->ieee->host_open_frag = 0;
8023
8024 if ((priv->pci_dev->device == 0x4223) ||
8025 (priv->pci_dev->device == 0x4224)) {
8026 if (option == 1)
8027 printk(KERN_INFO DRV_NAME
8028 ": Detected Intel PRO/Wireless 2915ABG Network "
8029 "Connection\n");
8030 priv->ieee->abg_true = 1;
8031 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8032 modulation = IEEE80211_OFDM_MODULATION |
8033 IEEE80211_CCK_MODULATION;
8034 priv->adapter = IPW_2915ABG;
8035 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8036 } else {
8037 if (option == 1)
8038 printk(KERN_INFO DRV_NAME
8039 ": Detected Intel PRO/Wireless 2200BG Network "
8040 "Connection\n");
8041
8042 priv->ieee->abg_true = 0;
8043 band = IEEE80211_24GHZ_BAND;
8044 modulation = IEEE80211_OFDM_MODULATION |
8045 IEEE80211_CCK_MODULATION;
8046 priv->adapter = IPW_2200BG;
8047 priv->ieee->mode = IEEE_G | IEEE_B;
8048 }
8049
8050 priv->ieee->freq_band = band;
8051 priv->ieee->modulation = modulation;
8052
8053 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8054
8055 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8056 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8057
8058 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8059 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8060 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8061
8062 /* If power management is turned on, default to AC mode */
8063 priv->power_mode = IPW_POWER_AC;
8064 priv->tx_power = IPW_TX_POWER_DEFAULT;
8065
8066 return old_mode == priv->ieee->iw_mode;
8067 }
8068
8069 /*
8070 * This file defines the Wireless Extension handlers. It does not
8071 * define any methods of hardware manipulation and relies on the
8072 * functions defined in ipw_main to provide the HW interaction.
8073 *
8074 * The exception to this is the use of the ipw_get_ordinal()
8075 * function used to poll the hardware vs. making unecessary calls.
8076 *
8077 */
8078
8079 static int ipw_wx_get_name(struct net_device *dev,
8080 struct iw_request_info *info,
8081 union iwreq_data *wrqu, char *extra)
8082 {
8083 struct ipw_priv *priv = ieee80211_priv(dev);
8084 mutex_lock(&priv->mutex);
8085 if (priv->status & STATUS_RF_KILL_MASK)
8086 strcpy(wrqu->name, "radio off");
8087 else if (!(priv->status & STATUS_ASSOCIATED))
8088 strcpy(wrqu->name, "unassociated");
8089 else
8090 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8091 ipw_modes[priv->assoc_request.ieee_mode]);
8092 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8093 mutex_unlock(&priv->mutex);
8094 return 0;
8095 }
8096
8097 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8098 {
8099 if (channel == 0) {
8100 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8101 priv->config &= ~CFG_STATIC_CHANNEL;
8102 IPW_DEBUG_ASSOC("Attempting to associate with new "
8103 "parameters.\n");
8104 ipw_associate(priv);
8105 return 0;
8106 }
8107
8108 priv->config |= CFG_STATIC_CHANNEL;
8109
8110 if (priv->channel == channel) {
8111 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8112 channel);
8113 return 0;
8114 }
8115
8116 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8117 priv->channel = channel;
8118
8119 #ifdef CONFIG_IPW2200_MONITOR
8120 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8121 int i;
8122 if (priv->status & STATUS_SCANNING) {
8123 IPW_DEBUG_SCAN("Scan abort triggered due to "
8124 "channel change.\n");
8125 ipw_abort_scan(priv);
8126 }
8127
8128 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8129 udelay(10);
8130
8131 if (priv->status & STATUS_SCANNING)
8132 IPW_DEBUG_SCAN("Still scanning...\n");
8133 else
8134 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8135 1000 - i);
8136
8137 return 0;
8138 }
8139 #endif /* CONFIG_IPW2200_MONITOR */
8140
8141 /* Network configuration changed -- force [re]association */
8142 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8143 if (!ipw_disassociate(priv))
8144 ipw_associate(priv);
8145
8146 return 0;
8147 }
8148
8149 static int ipw_wx_set_freq(struct net_device *dev,
8150 struct iw_request_info *info,
8151 union iwreq_data *wrqu, char *extra)
8152 {
8153 struct ipw_priv *priv = ieee80211_priv(dev);
8154 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8155 struct iw_freq *fwrq = &wrqu->freq;
8156 int ret = 0, i;
8157 u8 channel, flags;
8158 int band;
8159
8160 if (fwrq->m == 0) {
8161 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8162 mutex_lock(&priv->mutex);
8163 ret = ipw_set_channel(priv, 0);
8164 mutex_unlock(&priv->mutex);
8165 return ret;
8166 }
8167 /* if setting by freq convert to channel */
8168 if (fwrq->e == 1) {
8169 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8170 if (channel == 0)
8171 return -EINVAL;
8172 } else
8173 channel = fwrq->m;
8174
8175 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8176 return -EINVAL;
8177
8178 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8179 i = ieee80211_channel_to_index(priv->ieee, channel);
8180 if (i == -1)
8181 return -EINVAL;
8182
8183 flags = (band == IEEE80211_24GHZ_BAND) ?
8184 geo->bg[i].flags : geo->a[i].flags;
8185 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8186 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8187 return -EINVAL;
8188 }
8189 }
8190
8191 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8192 mutex_lock(&priv->mutex);
8193 ret = ipw_set_channel(priv, channel);
8194 mutex_unlock(&priv->mutex);
8195 return ret;
8196 }
8197
8198 static int ipw_wx_get_freq(struct net_device *dev,
8199 struct iw_request_info *info,
8200 union iwreq_data *wrqu, char *extra)
8201 {
8202 struct ipw_priv *priv = ieee80211_priv(dev);
8203
8204 wrqu->freq.e = 0;
8205
8206 /* If we are associated, trying to associate, or have a statically
8207 * configured CHANNEL then return that; otherwise return ANY */
8208 mutex_lock(&priv->mutex);
8209 if (priv->config & CFG_STATIC_CHANNEL ||
8210 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
8211 wrqu->freq.m = priv->channel;
8212 else
8213 wrqu->freq.m = 0;
8214
8215 mutex_unlock(&priv->mutex);
8216 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8217 return 0;
8218 }
8219
8220 static int ipw_wx_set_mode(struct net_device *dev,
8221 struct iw_request_info *info,
8222 union iwreq_data *wrqu, char *extra)
8223 {
8224 struct ipw_priv *priv = ieee80211_priv(dev);
8225 int err = 0;
8226
8227 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8228
8229 switch (wrqu->mode) {
8230 #ifdef CONFIG_IPW2200_MONITOR
8231 case IW_MODE_MONITOR:
8232 #endif
8233 case IW_MODE_ADHOC:
8234 case IW_MODE_INFRA:
8235 break;
8236 case IW_MODE_AUTO:
8237 wrqu->mode = IW_MODE_INFRA;
8238 break;
8239 default:
8240 return -EINVAL;
8241 }
8242 if (wrqu->mode == priv->ieee->iw_mode)
8243 return 0;
8244
8245 mutex_lock(&priv->mutex);
8246
8247 ipw_sw_reset(priv, 0);
8248
8249 #ifdef CONFIG_IPW2200_MONITOR
8250 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8251 priv->net_dev->type = ARPHRD_ETHER;
8252
8253 if (wrqu->mode == IW_MODE_MONITOR)
8254 #ifdef CONFIG_IEEE80211_RADIOTAP
8255 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8256 #else
8257 priv->net_dev->type = ARPHRD_IEEE80211;
8258 #endif
8259 #endif /* CONFIG_IPW2200_MONITOR */
8260
8261 /* Free the existing firmware and reset the fw_loaded
8262 * flag so ipw_load() will bring in the new firmawre */
8263 free_firmware();
8264
8265 priv->ieee->iw_mode = wrqu->mode;
8266
8267 queue_work(priv->workqueue, &priv->adapter_restart);
8268 mutex_unlock(&priv->mutex);
8269 return err;
8270 }
8271
8272 static int ipw_wx_get_mode(struct net_device *dev,
8273 struct iw_request_info *info,
8274 union iwreq_data *wrqu, char *extra)
8275 {
8276 struct ipw_priv *priv = ieee80211_priv(dev);
8277 mutex_lock(&priv->mutex);
8278 wrqu->mode = priv->ieee->iw_mode;
8279 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8280 mutex_unlock(&priv->mutex);
8281 return 0;
8282 }
8283
8284 /* Values are in microsecond */
8285 static const s32 timeout_duration[] = {
8286 350000,
8287 250000,
8288 75000,
8289 37000,
8290 25000,
8291 };
8292
8293 static const s32 period_duration[] = {
8294 400000,
8295 700000,
8296 1000000,
8297 1000000,
8298 1000000
8299 };
8300
8301 static int ipw_wx_get_range(struct net_device *dev,
8302 struct iw_request_info *info,
8303 union iwreq_data *wrqu, char *extra)
8304 {
8305 struct ipw_priv *priv = ieee80211_priv(dev);
8306 struct iw_range *range = (struct iw_range *)extra;
8307 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8308 int i = 0, j;
8309
8310 wrqu->data.length = sizeof(*range);
8311 memset(range, 0, sizeof(*range));
8312
8313 /* 54Mbs == ~27 Mb/s real (802.11g) */
8314 range->throughput = 27 * 1000 * 1000;
8315
8316 range->max_qual.qual = 100;
8317 /* TODO: Find real max RSSI and stick here */
8318 range->max_qual.level = 0;
8319 range->max_qual.noise = 0;
8320 range->max_qual.updated = 7; /* Updated all three */
8321
8322 range->avg_qual.qual = 70;
8323 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8324 range->avg_qual.level = 0; /* FIXME to real average level */
8325 range->avg_qual.noise = 0;
8326 range->avg_qual.updated = 7; /* Updated all three */
8327 mutex_lock(&priv->mutex);
8328 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8329
8330 for (i = 0; i < range->num_bitrates; i++)
8331 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8332 500000;
8333
8334 range->max_rts = DEFAULT_RTS_THRESHOLD;
8335 range->min_frag = MIN_FRAG_THRESHOLD;
8336 range->max_frag = MAX_FRAG_THRESHOLD;
8337
8338 range->encoding_size[0] = 5;
8339 range->encoding_size[1] = 13;
8340 range->num_encoding_sizes = 2;
8341 range->max_encoding_tokens = WEP_KEYS;
8342
8343 /* Set the Wireless Extension versions */
8344 range->we_version_compiled = WIRELESS_EXT;
8345 range->we_version_source = 18;
8346
8347 i = 0;
8348 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8349 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8350 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8351 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8352 continue;
8353
8354 range->freq[i].i = geo->bg[j].channel;
8355 range->freq[i].m = geo->bg[j].freq * 100000;
8356 range->freq[i].e = 1;
8357 i++;
8358 }
8359 }
8360
8361 if (priv->ieee->mode & IEEE_A) {
8362 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8363 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8364 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8365 continue;
8366
8367 range->freq[i].i = geo->a[j].channel;
8368 range->freq[i].m = geo->a[j].freq * 100000;
8369 range->freq[i].e = 1;
8370 i++;
8371 }
8372 }
8373
8374 range->num_channels = i;
8375 range->num_frequency = i;
8376
8377 mutex_unlock(&priv->mutex);
8378
8379 /* Event capability (kernel + driver) */
8380 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8381 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8382 IW_EVENT_CAPA_MASK(SIOCGIWAP));
8383 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8384
8385 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8386 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8387
8388 IPW_DEBUG_WX("GET Range\n");
8389 return 0;
8390 }
8391
8392 static int ipw_wx_set_wap(struct net_device *dev,
8393 struct iw_request_info *info,
8394 union iwreq_data *wrqu, char *extra)
8395 {
8396 struct ipw_priv *priv = ieee80211_priv(dev);
8397
8398 static const unsigned char any[] = {
8399 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8400 };
8401 static const unsigned char off[] = {
8402 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8403 };
8404
8405 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8406 return -EINVAL;
8407 mutex_lock(&priv->mutex);
8408 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8409 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8410 /* we disable mandatory BSSID association */
8411 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8412 priv->config &= ~CFG_STATIC_BSSID;
8413 IPW_DEBUG_ASSOC("Attempting to associate with new "
8414 "parameters.\n");
8415 ipw_associate(priv);
8416 mutex_unlock(&priv->mutex);
8417 return 0;
8418 }
8419
8420 priv->config |= CFG_STATIC_BSSID;
8421 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8422 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8423 mutex_unlock(&priv->mutex);
8424 return 0;
8425 }
8426
8427 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8428 MAC_ARG(wrqu->ap_addr.sa_data));
8429
8430 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8431
8432 /* Network configuration changed -- force [re]association */
8433 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8434 if (!ipw_disassociate(priv))
8435 ipw_associate(priv);
8436
8437 mutex_unlock(&priv->mutex);
8438 return 0;
8439 }
8440
8441 static int ipw_wx_get_wap(struct net_device *dev,
8442 struct iw_request_info *info,
8443 union iwreq_data *wrqu, char *extra)
8444 {
8445 struct ipw_priv *priv = ieee80211_priv(dev);
8446 /* If we are associated, trying to associate, or have a statically
8447 * configured BSSID then return that; otherwise return ANY */
8448 mutex_lock(&priv->mutex);
8449 if (priv->config & CFG_STATIC_BSSID ||
8450 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8451 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8452 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8453 } else
8454 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8455
8456 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8457 MAC_ARG(wrqu->ap_addr.sa_data));
8458 mutex_unlock(&priv->mutex);
8459 return 0;
8460 }
8461
8462 static int ipw_wx_set_essid(struct net_device *dev,
8463 struct iw_request_info *info,
8464 union iwreq_data *wrqu, char *extra)
8465 {
8466 struct ipw_priv *priv = ieee80211_priv(dev);
8467 char *essid = ""; /* ANY */
8468 int length = 0;
8469 mutex_lock(&priv->mutex);
8470 if (wrqu->essid.flags && wrqu->essid.length) {
8471 length = wrqu->essid.length - 1;
8472 essid = extra;
8473 }
8474 if (length == 0) {
8475 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8476 if ((priv->config & CFG_STATIC_ESSID) &&
8477 !(priv->status & (STATUS_ASSOCIATED |
8478 STATUS_ASSOCIATING))) {
8479 IPW_DEBUG_ASSOC("Attempting to associate with new "
8480 "parameters.\n");
8481 priv->config &= ~CFG_STATIC_ESSID;
8482 ipw_associate(priv);
8483 }
8484 mutex_unlock(&priv->mutex);
8485 return 0;
8486 }
8487
8488 length = min(length, IW_ESSID_MAX_SIZE);
8489
8490 priv->config |= CFG_STATIC_ESSID;
8491
8492 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
8493 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8494 mutex_unlock(&priv->mutex);
8495 return 0;
8496 }
8497
8498 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
8499 length);
8500
8501 priv->essid_len = length;
8502 memcpy(priv->essid, essid, priv->essid_len);
8503
8504 /* Network configuration changed -- force [re]association */
8505 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8506 if (!ipw_disassociate(priv))
8507 ipw_associate(priv);
8508
8509 mutex_unlock(&priv->mutex);
8510 return 0;
8511 }
8512
8513 static int ipw_wx_get_essid(struct net_device *dev,
8514 struct iw_request_info *info,
8515 union iwreq_data *wrqu, char *extra)
8516 {
8517 struct ipw_priv *priv = ieee80211_priv(dev);
8518
8519 /* If we are associated, trying to associate, or have a statically
8520 * configured ESSID then return that; otherwise return ANY */
8521 mutex_lock(&priv->mutex);
8522 if (priv->config & CFG_STATIC_ESSID ||
8523 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8524 IPW_DEBUG_WX("Getting essid: '%s'\n",
8525 escape_essid(priv->essid, priv->essid_len));
8526 memcpy(extra, priv->essid, priv->essid_len);
8527 wrqu->essid.length = priv->essid_len;
8528 wrqu->essid.flags = 1; /* active */
8529 } else {
8530 IPW_DEBUG_WX("Getting essid: ANY\n");
8531 wrqu->essid.length = 0;
8532 wrqu->essid.flags = 0; /* active */
8533 }
8534 mutex_unlock(&priv->mutex);
8535 return 0;
8536 }
8537
8538 static int ipw_wx_set_nick(struct net_device *dev,
8539 struct iw_request_info *info,
8540 union iwreq_data *wrqu, char *extra)
8541 {
8542 struct ipw_priv *priv = ieee80211_priv(dev);
8543
8544 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8545 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8546 return -E2BIG;
8547 mutex_lock(&priv->mutex);
8548 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8549 memset(priv->nick, 0, sizeof(priv->nick));
8550 memcpy(priv->nick, extra, wrqu->data.length);
8551 IPW_DEBUG_TRACE("<<\n");
8552 mutex_unlock(&priv->mutex);
8553 return 0;
8554
8555 }
8556
8557 static int ipw_wx_get_nick(struct net_device *dev,
8558 struct iw_request_info *info,
8559 union iwreq_data *wrqu, char *extra)
8560 {
8561 struct ipw_priv *priv = ieee80211_priv(dev);
8562 IPW_DEBUG_WX("Getting nick\n");
8563 mutex_lock(&priv->mutex);
8564 wrqu->data.length = strlen(priv->nick) + 1;
8565 memcpy(extra, priv->nick, wrqu->data.length);
8566 wrqu->data.flags = 1; /* active */
8567 mutex_unlock(&priv->mutex);
8568 return 0;
8569 }
8570
8571 static int ipw_wx_set_sens(struct net_device *dev,
8572 struct iw_request_info *info,
8573 union iwreq_data *wrqu, char *extra)
8574 {
8575 struct ipw_priv *priv = ieee80211_priv(dev);
8576 int err = 0;
8577
8578 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
8579 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
8580 mutex_lock(&priv->mutex);
8581
8582 if (wrqu->sens.fixed == 0)
8583 {
8584 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8585 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8586 goto out;
8587 }
8588 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
8589 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
8590 err = -EINVAL;
8591 goto out;
8592 }
8593
8594 priv->roaming_threshold = wrqu->sens.value;
8595 priv->disassociate_threshold = 3*wrqu->sens.value;
8596 out:
8597 mutex_unlock(&priv->mutex);
8598 return err;
8599 }
8600
8601 static int ipw_wx_get_sens(struct net_device *dev,
8602 struct iw_request_info *info,
8603 union iwreq_data *wrqu, char *extra)
8604 {
8605 struct ipw_priv *priv = ieee80211_priv(dev);
8606 mutex_lock(&priv->mutex);
8607 wrqu->sens.fixed = 1;
8608 wrqu->sens.value = priv->roaming_threshold;
8609 mutex_unlock(&priv->mutex);
8610
8611 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
8612 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8613
8614 return 0;
8615 }
8616
8617 static int ipw_wx_set_rate(struct net_device *dev,
8618 struct iw_request_info *info,
8619 union iwreq_data *wrqu, char *extra)
8620 {
8621 /* TODO: We should use semaphores or locks for access to priv */
8622 struct ipw_priv *priv = ieee80211_priv(dev);
8623 u32 target_rate = wrqu->bitrate.value;
8624 u32 fixed, mask;
8625
8626 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
8627 /* value = X, fixed = 1 means only rate X */
8628 /* value = X, fixed = 0 means all rates lower equal X */
8629
8630 if (target_rate == -1) {
8631 fixed = 0;
8632 mask = IEEE80211_DEFAULT_RATES_MASK;
8633 /* Now we should reassociate */
8634 goto apply;
8635 }
8636
8637 mask = 0;
8638 fixed = wrqu->bitrate.fixed;
8639
8640 if (target_rate == 1000000 || !fixed)
8641 mask |= IEEE80211_CCK_RATE_1MB_MASK;
8642 if (target_rate == 1000000)
8643 goto apply;
8644
8645 if (target_rate == 2000000 || !fixed)
8646 mask |= IEEE80211_CCK_RATE_2MB_MASK;
8647 if (target_rate == 2000000)
8648 goto apply;
8649
8650 if (target_rate == 5500000 || !fixed)
8651 mask |= IEEE80211_CCK_RATE_5MB_MASK;
8652 if (target_rate == 5500000)
8653 goto apply;
8654
8655 if (target_rate == 6000000 || !fixed)
8656 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
8657 if (target_rate == 6000000)
8658 goto apply;
8659
8660 if (target_rate == 9000000 || !fixed)
8661 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
8662 if (target_rate == 9000000)
8663 goto apply;
8664
8665 if (target_rate == 11000000 || !fixed)
8666 mask |= IEEE80211_CCK_RATE_11MB_MASK;
8667 if (target_rate == 11000000)
8668 goto apply;
8669
8670 if (target_rate == 12000000 || !fixed)
8671 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
8672 if (target_rate == 12000000)
8673 goto apply;
8674
8675 if (target_rate == 18000000 || !fixed)
8676 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
8677 if (target_rate == 18000000)
8678 goto apply;
8679
8680 if (target_rate == 24000000 || !fixed)
8681 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
8682 if (target_rate == 24000000)
8683 goto apply;
8684
8685 if (target_rate == 36000000 || !fixed)
8686 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
8687 if (target_rate == 36000000)
8688 goto apply;
8689
8690 if (target_rate == 48000000 || !fixed)
8691 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
8692 if (target_rate == 48000000)
8693 goto apply;
8694
8695 if (target_rate == 54000000 || !fixed)
8696 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
8697 if (target_rate == 54000000)
8698 goto apply;
8699
8700 IPW_DEBUG_WX("invalid rate specified, returning error\n");
8701 return -EINVAL;
8702
8703 apply:
8704 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
8705 mask, fixed ? "fixed" : "sub-rates");
8706 mutex_lock(&priv->mutex);
8707 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
8708 priv->config &= ~CFG_FIXED_RATE;
8709 ipw_set_fixed_rate(priv, priv->ieee->mode);
8710 } else
8711 priv->config |= CFG_FIXED_RATE;
8712
8713 if (priv->rates_mask == mask) {
8714 IPW_DEBUG_WX("Mask set to current mask.\n");
8715 mutex_unlock(&priv->mutex);
8716 return 0;
8717 }
8718
8719 priv->rates_mask = mask;
8720
8721 /* Network configuration changed -- force [re]association */
8722 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
8723 if (!ipw_disassociate(priv))
8724 ipw_associate(priv);
8725
8726 mutex_unlock(&priv->mutex);
8727 return 0;
8728 }
8729
8730 static int ipw_wx_get_rate(struct net_device *dev,
8731 struct iw_request_info *info,
8732 union iwreq_data *wrqu, char *extra)
8733 {
8734 struct ipw_priv *priv = ieee80211_priv(dev);
8735 mutex_lock(&priv->mutex);
8736 wrqu->bitrate.value = priv->last_rate;
8737 mutex_unlock(&priv->mutex);
8738 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
8739 return 0;
8740 }
8741
8742 static int ipw_wx_set_rts(struct net_device *dev,
8743 struct iw_request_info *info,
8744 union iwreq_data *wrqu, char *extra)
8745 {
8746 struct ipw_priv *priv = ieee80211_priv(dev);
8747 mutex_lock(&priv->mutex);
8748 if (wrqu->rts.disabled)
8749 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8750 else {
8751 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
8752 wrqu->rts.value > MAX_RTS_THRESHOLD) {
8753 mutex_unlock(&priv->mutex);
8754 return -EINVAL;
8755 }
8756 priv->rts_threshold = wrqu->rts.value;
8757 }
8758
8759 ipw_send_rts_threshold(priv, priv->rts_threshold);
8760 mutex_unlock(&priv->mutex);
8761 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
8762 return 0;
8763 }
8764
8765 static int ipw_wx_get_rts(struct net_device *dev,
8766 struct iw_request_info *info,
8767 union iwreq_data *wrqu, char *extra)
8768 {
8769 struct ipw_priv *priv = ieee80211_priv(dev);
8770 mutex_lock(&priv->mutex);
8771 wrqu->rts.value = priv->rts_threshold;
8772 wrqu->rts.fixed = 0; /* no auto select */
8773 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
8774 mutex_unlock(&priv->mutex);
8775 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
8776 return 0;
8777 }
8778
8779 static int ipw_wx_set_txpow(struct net_device *dev,
8780 struct iw_request_info *info,
8781 union iwreq_data *wrqu, char *extra)
8782 {
8783 struct ipw_priv *priv = ieee80211_priv(dev);
8784 int err = 0;
8785
8786 mutex_lock(&priv->mutex);
8787 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
8788 err = -EINPROGRESS;
8789 goto out;
8790 }
8791
8792 if (!wrqu->power.fixed)
8793 wrqu->power.value = IPW_TX_POWER_DEFAULT;
8794
8795 if (wrqu->power.flags != IW_TXPOW_DBM) {
8796 err = -EINVAL;
8797 goto out;
8798 }
8799
8800 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
8801 (wrqu->power.value < IPW_TX_POWER_MIN)) {
8802 err = -EINVAL;
8803 goto out;
8804 }
8805
8806 priv->tx_power = wrqu->power.value;
8807 err = ipw_set_tx_power(priv);
8808 out:
8809 mutex_unlock(&priv->mutex);
8810 return err;
8811 }
8812
8813 static int ipw_wx_get_txpow(struct net_device *dev,
8814 struct iw_request_info *info,
8815 union iwreq_data *wrqu, char *extra)
8816 {
8817 struct ipw_priv *priv = ieee80211_priv(dev);
8818 mutex_lock(&priv->mutex);
8819 wrqu->power.value = priv->tx_power;
8820 wrqu->power.fixed = 1;
8821 wrqu->power.flags = IW_TXPOW_DBM;
8822 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
8823 mutex_unlock(&priv->mutex);
8824
8825 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
8826 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8827
8828 return 0;
8829 }
8830
8831 static int ipw_wx_set_frag(struct net_device *dev,
8832 struct iw_request_info *info,
8833 union iwreq_data *wrqu, char *extra)
8834 {
8835 struct ipw_priv *priv = ieee80211_priv(dev);
8836 mutex_lock(&priv->mutex);
8837 if (wrqu->frag.disabled)
8838 priv->ieee->fts = DEFAULT_FTS;
8839 else {
8840 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
8841 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
8842 mutex_unlock(&priv->mutex);
8843 return -EINVAL;
8844 }
8845
8846 priv->ieee->fts = wrqu->frag.value & ~0x1;
8847 }
8848
8849 ipw_send_frag_threshold(priv, wrqu->frag.value);
8850 mutex_unlock(&priv->mutex);
8851 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
8852 return 0;
8853 }
8854
8855 static int ipw_wx_get_frag(struct net_device *dev,
8856 struct iw_request_info *info,
8857 union iwreq_data *wrqu, char *extra)
8858 {
8859 struct ipw_priv *priv = ieee80211_priv(dev);
8860 mutex_lock(&priv->mutex);
8861 wrqu->frag.value = priv->ieee->fts;
8862 wrqu->frag.fixed = 0; /* no auto select */
8863 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
8864 mutex_unlock(&priv->mutex);
8865 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
8866
8867 return 0;
8868 }
8869
8870 static int ipw_wx_set_retry(struct net_device *dev,
8871 struct iw_request_info *info,
8872 union iwreq_data *wrqu, char *extra)
8873 {
8874 struct ipw_priv *priv = ieee80211_priv(dev);
8875
8876 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
8877 return -EINVAL;
8878
8879 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
8880 return 0;
8881
8882 if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
8883 return -EINVAL;
8884
8885 mutex_lock(&priv->mutex);
8886 if (wrqu->retry.flags & IW_RETRY_MIN)
8887 priv->short_retry_limit = (u8) wrqu->retry.value;
8888 else if (wrqu->retry.flags & IW_RETRY_MAX)
8889 priv->long_retry_limit = (u8) wrqu->retry.value;
8890 else {
8891 priv->short_retry_limit = (u8) wrqu->retry.value;
8892 priv->long_retry_limit = (u8) wrqu->retry.value;
8893 }
8894
8895 ipw_send_retry_limit(priv, priv->short_retry_limit,
8896 priv->long_retry_limit);
8897 mutex_unlock(&priv->mutex);
8898 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
8899 priv->short_retry_limit, priv->long_retry_limit);
8900 return 0;
8901 }
8902
8903 static int ipw_wx_get_retry(struct net_device *dev,
8904 struct iw_request_info *info,
8905 union iwreq_data *wrqu, char *extra)
8906 {
8907 struct ipw_priv *priv = ieee80211_priv(dev);
8908
8909 mutex_lock(&priv->mutex);
8910 wrqu->retry.disabled = 0;
8911
8912 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
8913 mutex_unlock(&priv->mutex);
8914 return -EINVAL;
8915 }
8916
8917 if (wrqu->retry.flags & IW_RETRY_MAX) {
8918 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
8919 wrqu->retry.value = priv->long_retry_limit;
8920 } else if (wrqu->retry.flags & IW_RETRY_MIN) {
8921 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
8922 wrqu->retry.value = priv->short_retry_limit;
8923 } else {
8924 wrqu->retry.flags = IW_RETRY_LIMIT;
8925 wrqu->retry.value = priv->short_retry_limit;
8926 }
8927 mutex_unlock(&priv->mutex);
8928
8929 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
8930
8931 return 0;
8932 }
8933
8934 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8935 int essid_len)
8936 {
8937 struct ipw_scan_request_ext scan;
8938 int err = 0, scan_type;
8939
8940 if (!(priv->status & STATUS_INIT) ||
8941 (priv->status & STATUS_EXIT_PENDING))
8942 return 0;
8943
8944 mutex_lock(&priv->mutex);
8945
8946 if (priv->status & STATUS_RF_KILL_MASK) {
8947 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
8948 priv->status |= STATUS_SCAN_PENDING;
8949 goto done;
8950 }
8951
8952 IPW_DEBUG_HC("starting request direct scan!\n");
8953
8954 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
8955 /* We should not sleep here; otherwise we will block most
8956 * of the system (for instance, we hold rtnl_lock when we
8957 * get here).
8958 */
8959 err = -EAGAIN;
8960 goto done;
8961 }
8962 memset(&scan, 0, sizeof(scan));
8963
8964 if (priv->config & CFG_SPEED_SCAN)
8965 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8966 cpu_to_le16(30);
8967 else
8968 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8969 cpu_to_le16(20);
8970
8971 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
8972 cpu_to_le16(20);
8973 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
8974 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
8975
8976 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
8977
8978 err = ipw_send_ssid(priv, essid, essid_len);
8979 if (err) {
8980 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
8981 goto done;
8982 }
8983 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
8984
8985 ipw_add_scan_channels(priv, &scan, scan_type);
8986
8987 err = ipw_send_scan_request_ext(priv, &scan);
8988 if (err) {
8989 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
8990 goto done;
8991 }
8992
8993 priv->status |= STATUS_SCANNING;
8994
8995 done:
8996 mutex_unlock(&priv->mutex);
8997 return err;
8998 }
8999
9000 static int ipw_wx_set_scan(struct net_device *dev,
9001 struct iw_request_info *info,
9002 union iwreq_data *wrqu, char *extra)
9003 {
9004 struct ipw_priv *priv = ieee80211_priv(dev);
9005 struct iw_scan_req *req = NULL;
9006 if (wrqu->data.length
9007 && wrqu->data.length == sizeof(struct iw_scan_req)) {
9008 req = (struct iw_scan_req *)extra;
9009 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9010 ipw_request_direct_scan(priv, req->essid,
9011 req->essid_len);
9012 return 0;
9013 }
9014 }
9015
9016 IPW_DEBUG_WX("Start scan\n");
9017
9018 queue_work(priv->workqueue, &priv->request_scan);
9019
9020 return 0;
9021 }
9022
9023 static int ipw_wx_get_scan(struct net_device *dev,
9024 struct iw_request_info *info,
9025 union iwreq_data *wrqu, char *extra)
9026 {
9027 struct ipw_priv *priv = ieee80211_priv(dev);
9028 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9029 }
9030
9031 static int ipw_wx_set_encode(struct net_device *dev,
9032 struct iw_request_info *info,
9033 union iwreq_data *wrqu, char *key)
9034 {
9035 struct ipw_priv *priv = ieee80211_priv(dev);
9036 int ret;
9037 u32 cap = priv->capability;
9038
9039 mutex_lock(&priv->mutex);
9040 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9041
9042 /* In IBSS mode, we need to notify the firmware to update
9043 * the beacon info after we changed the capability. */
9044 if (cap != priv->capability &&
9045 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9046 priv->status & STATUS_ASSOCIATED)
9047 ipw_disassociate(priv);
9048
9049 mutex_unlock(&priv->mutex);
9050 return ret;
9051 }
9052
9053 static int ipw_wx_get_encode(struct net_device *dev,
9054 struct iw_request_info *info,
9055 union iwreq_data *wrqu, char *key)
9056 {
9057 struct ipw_priv *priv = ieee80211_priv(dev);
9058 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9059 }
9060
9061 static int ipw_wx_set_power(struct net_device *dev,
9062 struct iw_request_info *info,
9063 union iwreq_data *wrqu, char *extra)
9064 {
9065 struct ipw_priv *priv = ieee80211_priv(dev);
9066 int err;
9067 mutex_lock(&priv->mutex);
9068 if (wrqu->power.disabled) {
9069 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9070 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9071 if (err) {
9072 IPW_DEBUG_WX("failed setting power mode.\n");
9073 mutex_unlock(&priv->mutex);
9074 return err;
9075 }
9076 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9077 mutex_unlock(&priv->mutex);
9078 return 0;
9079 }
9080
9081 switch (wrqu->power.flags & IW_POWER_MODE) {
9082 case IW_POWER_ON: /* If not specified */
9083 case IW_POWER_MODE: /* If set all mask */
9084 case IW_POWER_ALL_R: /* If explicitely state all */
9085 break;
9086 default: /* Otherwise we don't support it */
9087 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9088 wrqu->power.flags);
9089 mutex_unlock(&priv->mutex);
9090 return -EOPNOTSUPP;
9091 }
9092
9093 /* If the user hasn't specified a power management mode yet, default
9094 * to BATTERY */
9095 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9096 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9097 else
9098 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9099 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9100 if (err) {
9101 IPW_DEBUG_WX("failed setting power mode.\n");
9102 mutex_unlock(&priv->mutex);
9103 return err;
9104 }
9105
9106 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9107 mutex_unlock(&priv->mutex);
9108 return 0;
9109 }
9110
9111 static int ipw_wx_get_power(struct net_device *dev,
9112 struct iw_request_info *info,
9113 union iwreq_data *wrqu, char *extra)
9114 {
9115 struct ipw_priv *priv = ieee80211_priv(dev);
9116 mutex_lock(&priv->mutex);
9117 if (!(priv->power_mode & IPW_POWER_ENABLED))
9118 wrqu->power.disabled = 1;
9119 else
9120 wrqu->power.disabled = 0;
9121
9122 mutex_unlock(&priv->mutex);
9123 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9124
9125 return 0;
9126 }
9127
9128 static int ipw_wx_set_powermode(struct net_device *dev,
9129 struct iw_request_info *info,
9130 union iwreq_data *wrqu, char *extra)
9131 {
9132 struct ipw_priv *priv = ieee80211_priv(dev);
9133 int mode = *(int *)extra;
9134 int err;
9135 mutex_lock(&priv->mutex);
9136 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9137 mode = IPW_POWER_AC;
9138 priv->power_mode = mode;
9139 } else {
9140 priv->power_mode = IPW_POWER_ENABLED | mode;
9141 }
9142
9143 if (priv->power_mode != mode) {
9144 err = ipw_send_power_mode(priv, mode);
9145
9146 if (err) {
9147 IPW_DEBUG_WX("failed setting power mode.\n");
9148 mutex_unlock(&priv->mutex);
9149 return err;
9150 }
9151 }
9152 mutex_unlock(&priv->mutex);
9153 return 0;
9154 }
9155
9156 #define MAX_WX_STRING 80
9157 static int ipw_wx_get_powermode(struct net_device *dev,
9158 struct iw_request_info *info,
9159 union iwreq_data *wrqu, char *extra)
9160 {
9161 struct ipw_priv *priv = ieee80211_priv(dev);
9162 int level = IPW_POWER_LEVEL(priv->power_mode);
9163 char *p = extra;
9164
9165 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9166
9167 switch (level) {
9168 case IPW_POWER_AC:
9169 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9170 break;
9171 case IPW_POWER_BATTERY:
9172 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9173 break;
9174 default:
9175 p += snprintf(p, MAX_WX_STRING - (p - extra),
9176 "(Timeout %dms, Period %dms)",
9177 timeout_duration[level - 1] / 1000,
9178 period_duration[level - 1] / 1000);
9179 }
9180
9181 if (!(priv->power_mode & IPW_POWER_ENABLED))
9182 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9183
9184 wrqu->data.length = p - extra + 1;
9185
9186 return 0;
9187 }
9188
9189 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9190 struct iw_request_info *info,
9191 union iwreq_data *wrqu, char *extra)
9192 {
9193 struct ipw_priv *priv = ieee80211_priv(dev);
9194 int mode = *(int *)extra;
9195 u8 band = 0, modulation = 0;
9196
9197 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9198 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9199 return -EINVAL;
9200 }
9201 mutex_lock(&priv->mutex);
9202 if (priv->adapter == IPW_2915ABG) {
9203 priv->ieee->abg_true = 1;
9204 if (mode & IEEE_A) {
9205 band |= IEEE80211_52GHZ_BAND;
9206 modulation |= IEEE80211_OFDM_MODULATION;
9207 } else
9208 priv->ieee->abg_true = 0;
9209 } else {
9210 if (mode & IEEE_A) {
9211 IPW_WARNING("Attempt to set 2200BG into "
9212 "802.11a mode\n");
9213 mutex_unlock(&priv->mutex);
9214 return -EINVAL;
9215 }
9216
9217 priv->ieee->abg_true = 0;
9218 }
9219
9220 if (mode & IEEE_B) {
9221 band |= IEEE80211_24GHZ_BAND;
9222 modulation |= IEEE80211_CCK_MODULATION;
9223 } else
9224 priv->ieee->abg_true = 0;
9225
9226 if (mode & IEEE_G) {
9227 band |= IEEE80211_24GHZ_BAND;
9228 modulation |= IEEE80211_OFDM_MODULATION;
9229 } else
9230 priv->ieee->abg_true = 0;
9231
9232 priv->ieee->mode = mode;
9233 priv->ieee->freq_band = band;
9234 priv->ieee->modulation = modulation;
9235 init_supported_rates(priv, &priv->rates);
9236
9237 /* Network configuration changed -- force [re]association */
9238 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9239 if (!ipw_disassociate(priv)) {
9240 ipw_send_supported_rates(priv, &priv->rates);
9241 ipw_associate(priv);
9242 }
9243
9244 /* Update the band LEDs */
9245 ipw_led_band_on(priv);
9246
9247 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9248 mode & IEEE_A ? 'a' : '.',
9249 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9250 mutex_unlock(&priv->mutex);
9251 return 0;
9252 }
9253
9254 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9255 struct iw_request_info *info,
9256 union iwreq_data *wrqu, char *extra)
9257 {
9258 struct ipw_priv *priv = ieee80211_priv(dev);
9259 mutex_lock(&priv->mutex);
9260 switch (priv->ieee->mode) {
9261 case IEEE_A:
9262 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9263 break;
9264 case IEEE_B:
9265 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9266 break;
9267 case IEEE_A | IEEE_B:
9268 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9269 break;
9270 case IEEE_G:
9271 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9272 break;
9273 case IEEE_A | IEEE_G:
9274 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9275 break;
9276 case IEEE_B | IEEE_G:
9277 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9278 break;
9279 case IEEE_A | IEEE_B | IEEE_G:
9280 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9281 break;
9282 default:
9283 strncpy(extra, "unknown", MAX_WX_STRING);
9284 break;
9285 }
9286
9287 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9288
9289 wrqu->data.length = strlen(extra) + 1;
9290 mutex_unlock(&priv->mutex);
9291
9292 return 0;
9293 }
9294
9295 static int ipw_wx_set_preamble(struct net_device *dev,
9296 struct iw_request_info *info,
9297 union iwreq_data *wrqu, char *extra)
9298 {
9299 struct ipw_priv *priv = ieee80211_priv(dev);
9300 int mode = *(int *)extra;
9301 mutex_lock(&priv->mutex);
9302 /* Switching from SHORT -> LONG requires a disassociation */
9303 if (mode == 1) {
9304 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9305 priv->config |= CFG_PREAMBLE_LONG;
9306
9307 /* Network configuration changed -- force [re]association */
9308 IPW_DEBUG_ASSOC
9309 ("[re]association triggered due to preamble change.\n");
9310 if (!ipw_disassociate(priv))
9311 ipw_associate(priv);
9312 }
9313 goto done;
9314 }
9315
9316 if (mode == 0) {
9317 priv->config &= ~CFG_PREAMBLE_LONG;
9318 goto done;
9319 }
9320 mutex_unlock(&priv->mutex);
9321 return -EINVAL;
9322
9323 done:
9324 mutex_unlock(&priv->mutex);
9325 return 0;
9326 }
9327
9328 static int ipw_wx_get_preamble(struct net_device *dev,
9329 struct iw_request_info *info,
9330 union iwreq_data *wrqu, char *extra)
9331 {
9332 struct ipw_priv *priv = ieee80211_priv(dev);
9333 mutex_lock(&priv->mutex);
9334 if (priv->config & CFG_PREAMBLE_LONG)
9335 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9336 else
9337 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9338 mutex_unlock(&priv->mutex);
9339 return 0;
9340 }
9341
9342 #ifdef CONFIG_IPW2200_MONITOR
9343 static int ipw_wx_set_monitor(struct net_device *dev,
9344 struct iw_request_info *info,
9345 union iwreq_data *wrqu, char *extra)
9346 {
9347 struct ipw_priv *priv = ieee80211_priv(dev);
9348 int *parms = (int *)extra;
9349 int enable = (parms[0] > 0);
9350 mutex_lock(&priv->mutex);
9351 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9352 if (enable) {
9353 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9354 #ifdef CONFIG_IEEE80211_RADIOTAP
9355 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9356 #else
9357 priv->net_dev->type = ARPHRD_IEEE80211;
9358 #endif
9359 queue_work(priv->workqueue, &priv->adapter_restart);
9360 }
9361
9362 ipw_set_channel(priv, parms[1]);
9363 } else {
9364 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9365 mutex_unlock(&priv->mutex);
9366 return 0;
9367 }
9368 priv->net_dev->type = ARPHRD_ETHER;
9369 queue_work(priv->workqueue, &priv->adapter_restart);
9370 }
9371 mutex_unlock(&priv->mutex);
9372 return 0;
9373 }
9374
9375 #endif // CONFIG_IPW2200_MONITOR
9376
9377 static int ipw_wx_reset(struct net_device *dev,
9378 struct iw_request_info *info,
9379 union iwreq_data *wrqu, char *extra)
9380 {
9381 struct ipw_priv *priv = ieee80211_priv(dev);
9382 IPW_DEBUG_WX("RESET\n");
9383 queue_work(priv->workqueue, &priv->adapter_restart);
9384 return 0;
9385 }
9386
9387 static int ipw_wx_sw_reset(struct net_device *dev,
9388 struct iw_request_info *info,
9389 union iwreq_data *wrqu, char *extra)
9390 {
9391 struct ipw_priv *priv = ieee80211_priv(dev);
9392 union iwreq_data wrqu_sec = {
9393 .encoding = {
9394 .flags = IW_ENCODE_DISABLED,
9395 },
9396 };
9397 int ret;
9398
9399 IPW_DEBUG_WX("SW_RESET\n");
9400
9401 mutex_lock(&priv->mutex);
9402
9403 ret = ipw_sw_reset(priv, 2);
9404 if (!ret) {
9405 free_firmware();
9406 ipw_adapter_restart(priv);
9407 }
9408
9409 /* The SW reset bit might have been toggled on by the 'disable'
9410 * module parameter, so take appropriate action */
9411 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9412
9413 mutex_unlock(&priv->mutex);
9414 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9415 mutex_lock(&priv->mutex);
9416
9417 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9418 /* Configuration likely changed -- force [re]association */
9419 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9420 "reset.\n");
9421 if (!ipw_disassociate(priv))
9422 ipw_associate(priv);
9423 }
9424
9425 mutex_unlock(&priv->mutex);
9426
9427 return 0;
9428 }
9429
9430 /* Rebase the WE IOCTLs to zero for the handler array */
9431 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9432 static iw_handler ipw_wx_handlers[] = {
9433 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9434 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9435 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9436 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9437 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9438 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9439 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9440 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9441 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9442 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9443 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9444 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9445 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9446 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9447 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9448 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9449 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9450 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9451 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9452 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9453 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9454 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9455 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9456 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9457 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9458 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9459 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9460 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9461 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9462 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9463 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9464 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9465 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9466 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9467 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9468 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9469 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9470 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9471 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9472 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9473 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9474 };
9475
9476 enum {
9477 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9478 IPW_PRIV_GET_POWER,
9479 IPW_PRIV_SET_MODE,
9480 IPW_PRIV_GET_MODE,
9481 IPW_PRIV_SET_PREAMBLE,
9482 IPW_PRIV_GET_PREAMBLE,
9483 IPW_PRIV_RESET,
9484 IPW_PRIV_SW_RESET,
9485 #ifdef CONFIG_IPW2200_MONITOR
9486 IPW_PRIV_SET_MONITOR,
9487 #endif
9488 };
9489
9490 static struct iw_priv_args ipw_priv_args[] = {
9491 {
9492 .cmd = IPW_PRIV_SET_POWER,
9493 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9494 .name = "set_power"},
9495 {
9496 .cmd = IPW_PRIV_GET_POWER,
9497 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9498 .name = "get_power"},
9499 {
9500 .cmd = IPW_PRIV_SET_MODE,
9501 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9502 .name = "set_mode"},
9503 {
9504 .cmd = IPW_PRIV_GET_MODE,
9505 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9506 .name = "get_mode"},
9507 {
9508 .cmd = IPW_PRIV_SET_PREAMBLE,
9509 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9510 .name = "set_preamble"},
9511 {
9512 .cmd = IPW_PRIV_GET_PREAMBLE,
9513 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9514 .name = "get_preamble"},
9515 {
9516 IPW_PRIV_RESET,
9517 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9518 {
9519 IPW_PRIV_SW_RESET,
9520 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9521 #ifdef CONFIG_IPW2200_MONITOR
9522 {
9523 IPW_PRIV_SET_MONITOR,
9524 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9525 #endif /* CONFIG_IPW2200_MONITOR */
9526 };
9527
9528 static iw_handler ipw_priv_handler[] = {
9529 ipw_wx_set_powermode,
9530 ipw_wx_get_powermode,
9531 ipw_wx_set_wireless_mode,
9532 ipw_wx_get_wireless_mode,
9533 ipw_wx_set_preamble,
9534 ipw_wx_get_preamble,
9535 ipw_wx_reset,
9536 ipw_wx_sw_reset,
9537 #ifdef CONFIG_IPW2200_MONITOR
9538 ipw_wx_set_monitor,
9539 #endif
9540 };
9541
9542 static struct iw_handler_def ipw_wx_handler_def = {
9543 .standard = ipw_wx_handlers,
9544 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9545 .num_private = ARRAY_SIZE(ipw_priv_handler),
9546 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9547 .private = ipw_priv_handler,
9548 .private_args = ipw_priv_args,
9549 .get_wireless_stats = ipw_get_wireless_stats,
9550 };
9551
9552 /*
9553 * Get wireless statistics.
9554 * Called by /proc/net/wireless
9555 * Also called by SIOCGIWSTATS
9556 */
9557 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9558 {
9559 struct ipw_priv *priv = ieee80211_priv(dev);
9560 struct iw_statistics *wstats;
9561
9562 wstats = &priv->wstats;
9563
9564 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9565 * netdev->get_wireless_stats seems to be called before fw is
9566 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9567 * and associated; if not associcated, the values are all meaningless
9568 * anyway, so set them all to NULL and INVALID */
9569 if (!(priv->status & STATUS_ASSOCIATED)) {
9570 wstats->miss.beacon = 0;
9571 wstats->discard.retries = 0;
9572 wstats->qual.qual = 0;
9573 wstats->qual.level = 0;
9574 wstats->qual.noise = 0;
9575 wstats->qual.updated = 7;
9576 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9577 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9578 return wstats;
9579 }
9580
9581 wstats->qual.qual = priv->quality;
9582 wstats->qual.level = average_value(&priv->average_rssi);
9583 wstats->qual.noise = average_value(&priv->average_noise);
9584 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9585 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
9586
9587 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9588 wstats->discard.retries = priv->last_tx_failures;
9589 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9590
9591 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9592 goto fail_get_ordinal;
9593 wstats->discard.retries += tx_retry; */
9594
9595 return wstats;
9596 }
9597
9598 /* net device stuff */
9599
9600 static void init_sys_config(struct ipw_sys_config *sys_config)
9601 {
9602 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9603 sys_config->bt_coexistence = 0;
9604 sys_config->answer_broadcast_ssid_probe = 0;
9605 sys_config->accept_all_data_frames = 0;
9606 sys_config->accept_non_directed_frames = 1;
9607 sys_config->exclude_unicast_unencrypted = 0;
9608 sys_config->disable_unicast_decryption = 1;
9609 sys_config->exclude_multicast_unencrypted = 0;
9610 sys_config->disable_multicast_decryption = 1;
9611 sys_config->antenna_diversity = CFG_SYS_ANTENNA_SLOW_DIV;
9612 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
9613 sys_config->dot11g_auto_detection = 0;
9614 sys_config->enable_cts_to_self = 0;
9615 sys_config->bt_coexist_collision_thr = 0;
9616 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256
9617 sys_config->silence_threshold = 0x1e;
9618 }
9619
9620 static int ipw_net_open(struct net_device *dev)
9621 {
9622 struct ipw_priv *priv = ieee80211_priv(dev);
9623 IPW_DEBUG_INFO("dev->open\n");
9624 /* we should be verifying the device is ready to be opened */
9625 mutex_lock(&priv->mutex);
9626 if (!(priv->status & STATUS_RF_KILL_MASK) &&
9627 (priv->status & STATUS_ASSOCIATED))
9628 netif_start_queue(dev);
9629 mutex_unlock(&priv->mutex);
9630 return 0;
9631 }
9632
9633 static int ipw_net_stop(struct net_device *dev)
9634 {
9635 IPW_DEBUG_INFO("dev->close\n");
9636 netif_stop_queue(dev);
9637 return 0;
9638 }
9639
9640 /*
9641 todo:
9642
9643 modify to send one tfd per fragment instead of using chunking. otherwise
9644 we need to heavily modify the ieee80211_skb_to_txb.
9645 */
9646
9647 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9648 int pri)
9649 {
9650 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
9651 txb->fragments[0]->data;
9652 int i = 0;
9653 struct tfd_frame *tfd;
9654 #ifdef CONFIG_IPW_QOS
9655 int tx_id = ipw_get_tx_queue_number(priv, pri);
9656 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9657 #else
9658 struct clx2_tx_queue *txq = &priv->txq[0];
9659 #endif
9660 struct clx2_queue *q = &txq->q;
9661 u8 id, hdr_len, unicast;
9662 u16 remaining_bytes;
9663 int fc;
9664
9665 switch (priv->ieee->iw_mode) {
9666 case IW_MODE_ADHOC:
9667 hdr_len = IEEE80211_3ADDR_LEN;
9668 unicast = !is_multicast_ether_addr(hdr->addr1);
9669 id = ipw_find_station(priv, hdr->addr1);
9670 if (id == IPW_INVALID_STATION) {
9671 id = ipw_add_station(priv, hdr->addr1);
9672 if (id == IPW_INVALID_STATION) {
9673 IPW_WARNING("Attempt to send data to "
9674 "invalid cell: " MAC_FMT "\n",
9675 MAC_ARG(hdr->addr1));
9676 goto drop;
9677 }
9678 }
9679 break;
9680
9681 case IW_MODE_INFRA:
9682 default:
9683 unicast = !is_multicast_ether_addr(hdr->addr3);
9684 hdr_len = IEEE80211_3ADDR_LEN;
9685 id = 0;
9686 break;
9687 }
9688
9689 tfd = &txq->bd[q->first_empty];
9690 txq->txb[q->first_empty] = txb;
9691 memset(tfd, 0, sizeof(*tfd));
9692 tfd->u.data.station_number = id;
9693
9694 tfd->control_flags.message_type = TX_FRAME_TYPE;
9695 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
9696
9697 tfd->u.data.cmd_id = DINO_CMD_TX;
9698 tfd->u.data.len = cpu_to_le16(txb->payload_size);
9699 remaining_bytes = txb->payload_size;
9700
9701 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
9702 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
9703 else
9704 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
9705
9706 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
9707 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
9708
9709 fc = le16_to_cpu(hdr->frame_ctl);
9710 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
9711
9712 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
9713
9714 if (likely(unicast))
9715 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9716
9717 if (txb->encrypted && !priv->ieee->host_encrypt) {
9718 switch (priv->ieee->sec.level) {
9719 case SEC_LEVEL_3:
9720 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9721 IEEE80211_FCTL_PROTECTED;
9722 /* XXX: ACK flag must be set for CCMP even if it
9723 * is a multicast/broadcast packet, because CCMP
9724 * group communication encrypted by GTK is
9725 * actually done by the AP. */
9726 if (!unicast)
9727 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9728
9729 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9730 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
9731 tfd->u.data.key_index = 0;
9732 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
9733 break;
9734 case SEC_LEVEL_2:
9735 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9736 IEEE80211_FCTL_PROTECTED;
9737 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9738 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
9739 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
9740 break;
9741 case SEC_LEVEL_1:
9742 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9743 IEEE80211_FCTL_PROTECTED;
9744 tfd->u.data.key_index = priv->ieee->tx_keyidx;
9745 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
9746 40)
9747 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
9748 else
9749 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
9750 break;
9751 case SEC_LEVEL_0:
9752 break;
9753 default:
9754 printk(KERN_ERR "Unknow security level %d\n",
9755 priv->ieee->sec.level);
9756 break;
9757 }
9758 } else
9759 /* No hardware encryption */
9760 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
9761
9762 #ifdef CONFIG_IPW_QOS
9763 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data), unicast);
9764 #endif /* CONFIG_IPW_QOS */
9765
9766 /* payload */
9767 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
9768 txb->nr_frags));
9769 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
9770 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
9771 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
9772 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
9773 i, le32_to_cpu(tfd->u.data.num_chunks),
9774 txb->fragments[i]->len - hdr_len);
9775 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
9776 i, tfd->u.data.num_chunks,
9777 txb->fragments[i]->len - hdr_len);
9778 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
9779 txb->fragments[i]->len - hdr_len);
9780
9781 tfd->u.data.chunk_ptr[i] =
9782 cpu_to_le32(pci_map_single
9783 (priv->pci_dev,
9784 txb->fragments[i]->data + hdr_len,
9785 txb->fragments[i]->len - hdr_len,
9786 PCI_DMA_TODEVICE));
9787 tfd->u.data.chunk_len[i] =
9788 cpu_to_le16(txb->fragments[i]->len - hdr_len);
9789 }
9790
9791 if (i != txb->nr_frags) {
9792 struct sk_buff *skb;
9793 u16 remaining_bytes = 0;
9794 int j;
9795
9796 for (j = i; j < txb->nr_frags; j++)
9797 remaining_bytes += txb->fragments[j]->len - hdr_len;
9798
9799 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
9800 remaining_bytes);
9801 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
9802 if (skb != NULL) {
9803 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
9804 for (j = i; j < txb->nr_frags; j++) {
9805 int size = txb->fragments[j]->len - hdr_len;
9806
9807 printk(KERN_INFO "Adding frag %d %d...\n",
9808 j, size);
9809 memcpy(skb_put(skb, size),
9810 txb->fragments[j]->data + hdr_len, size);
9811 }
9812 dev_kfree_skb_any(txb->fragments[i]);
9813 txb->fragments[i] = skb;
9814 tfd->u.data.chunk_ptr[i] =
9815 cpu_to_le32(pci_map_single
9816 (priv->pci_dev, skb->data,
9817 tfd->u.data.chunk_len[i],
9818 PCI_DMA_TODEVICE));
9819
9820 tfd->u.data.num_chunks =
9821 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
9822 1);
9823 }
9824 }
9825
9826 /* kick DMA */
9827 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
9828 ipw_write32(priv, q->reg_w, q->first_empty);
9829
9830 if (ipw_queue_space(q) < q->high_mark)
9831 netif_stop_queue(priv->net_dev);
9832
9833 return NETDEV_TX_OK;
9834
9835 drop:
9836 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
9837 ieee80211_txb_free(txb);
9838 return NETDEV_TX_OK;
9839 }
9840
9841 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
9842 {
9843 struct ipw_priv *priv = ieee80211_priv(dev);
9844 #ifdef CONFIG_IPW_QOS
9845 int tx_id = ipw_get_tx_queue_number(priv, pri);
9846 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9847 #else
9848 struct clx2_tx_queue *txq = &priv->txq[0];
9849 #endif /* CONFIG_IPW_QOS */
9850
9851 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
9852 return 1;
9853
9854 return 0;
9855 }
9856
9857 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
9858 struct net_device *dev, int pri)
9859 {
9860 struct ipw_priv *priv = ieee80211_priv(dev);
9861 unsigned long flags;
9862 int ret;
9863
9864 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
9865 spin_lock_irqsave(&priv->lock, flags);
9866
9867 if (!(priv->status & STATUS_ASSOCIATED)) {
9868 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
9869 priv->ieee->stats.tx_carrier_errors++;
9870 netif_stop_queue(dev);
9871 goto fail_unlock;
9872 }
9873
9874 ret = ipw_tx_skb(priv, txb, pri);
9875 if (ret == NETDEV_TX_OK)
9876 __ipw_led_activity_on(priv);
9877 spin_unlock_irqrestore(&priv->lock, flags);
9878
9879 return ret;
9880
9881 fail_unlock:
9882 spin_unlock_irqrestore(&priv->lock, flags);
9883 return 1;
9884 }
9885
9886 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
9887 {
9888 struct ipw_priv *priv = ieee80211_priv(dev);
9889
9890 priv->ieee->stats.tx_packets = priv->tx_packets;
9891 priv->ieee->stats.rx_packets = priv->rx_packets;
9892 return &priv->ieee->stats;
9893 }
9894
9895 static void ipw_net_set_multicast_list(struct net_device *dev)
9896 {
9897
9898 }
9899
9900 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
9901 {
9902 struct ipw_priv *priv = ieee80211_priv(dev);
9903 struct sockaddr *addr = p;
9904 if (!is_valid_ether_addr(addr->sa_data))
9905 return -EADDRNOTAVAIL;
9906 mutex_lock(&priv->mutex);
9907 priv->config |= CFG_CUSTOM_MAC;
9908 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
9909 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
9910 priv->net_dev->name, MAC_ARG(priv->mac_addr));
9911 queue_work(priv->workqueue, &priv->adapter_restart);
9912 mutex_unlock(&priv->mutex);
9913 return 0;
9914 }
9915
9916 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
9917 struct ethtool_drvinfo *info)
9918 {
9919 struct ipw_priv *p = ieee80211_priv(dev);
9920 char vers[64];
9921 char date[32];
9922 u32 len;
9923
9924 strcpy(info->driver, DRV_NAME);
9925 strcpy(info->version, DRV_VERSION);
9926
9927 len = sizeof(vers);
9928 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
9929 len = sizeof(date);
9930 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
9931
9932 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
9933 vers, date);
9934 strcpy(info->bus_info, pci_name(p->pci_dev));
9935 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
9936 }
9937
9938 static u32 ipw_ethtool_get_link(struct net_device *dev)
9939 {
9940 struct ipw_priv *priv = ieee80211_priv(dev);
9941 return (priv->status & STATUS_ASSOCIATED) != 0;
9942 }
9943
9944 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
9945 {
9946 return IPW_EEPROM_IMAGE_SIZE;
9947 }
9948
9949 static int ipw_ethtool_get_eeprom(struct net_device *dev,
9950 struct ethtool_eeprom *eeprom, u8 * bytes)
9951 {
9952 struct ipw_priv *p = ieee80211_priv(dev);
9953
9954 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9955 return -EINVAL;
9956 mutex_lock(&p->mutex);
9957 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
9958 mutex_unlock(&p->mutex);
9959 return 0;
9960 }
9961
9962 static int ipw_ethtool_set_eeprom(struct net_device *dev,
9963 struct ethtool_eeprom *eeprom, u8 * bytes)
9964 {
9965 struct ipw_priv *p = ieee80211_priv(dev);
9966 int i;
9967
9968 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9969 return -EINVAL;
9970 mutex_lock(&p->mutex);
9971 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
9972 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
9973 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
9974 mutex_unlock(&p->mutex);
9975 return 0;
9976 }
9977
9978 static struct ethtool_ops ipw_ethtool_ops = {
9979 .get_link = ipw_ethtool_get_link,
9980 .get_drvinfo = ipw_ethtool_get_drvinfo,
9981 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
9982 .get_eeprom = ipw_ethtool_get_eeprom,
9983 .set_eeprom = ipw_ethtool_set_eeprom,
9984 };
9985
9986 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
9987 {
9988 struct ipw_priv *priv = data;
9989 u32 inta, inta_mask;
9990
9991 if (!priv)
9992 return IRQ_NONE;
9993
9994 spin_lock(&priv->lock);
9995
9996 if (!(priv->status & STATUS_INT_ENABLED)) {
9997 /* Shared IRQ */
9998 goto none;
9999 }
10000
10001 inta = ipw_read32(priv, IPW_INTA_RW);
10002 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10003
10004 if (inta == 0xFFFFFFFF) {
10005 /* Hardware disappeared */
10006 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10007 goto none;
10008 }
10009
10010 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10011 /* Shared interrupt */
10012 goto none;
10013 }
10014
10015 /* tell the device to stop sending interrupts */
10016 ipw_disable_interrupts(priv);
10017
10018 /* ack current interrupts */
10019 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10020 ipw_write32(priv, IPW_INTA_RW, inta);
10021
10022 /* Cache INTA value for our tasklet */
10023 priv->isr_inta = inta;
10024
10025 tasklet_schedule(&priv->irq_tasklet);
10026
10027 spin_unlock(&priv->lock);
10028
10029 return IRQ_HANDLED;
10030 none:
10031 spin_unlock(&priv->lock);
10032 return IRQ_NONE;
10033 }
10034
10035 static void ipw_rf_kill(void *adapter)
10036 {
10037 struct ipw_priv *priv = adapter;
10038 unsigned long flags;
10039
10040 spin_lock_irqsave(&priv->lock, flags);
10041
10042 if (rf_kill_active(priv)) {
10043 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10044 if (priv->workqueue)
10045 queue_delayed_work(priv->workqueue,
10046 &priv->rf_kill, 2 * HZ);
10047 goto exit_unlock;
10048 }
10049
10050 /* RF Kill is now disabled, so bring the device back up */
10051
10052 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10053 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10054 "device\n");
10055
10056 /* we can not do an adapter restart while inside an irq lock */
10057 queue_work(priv->workqueue, &priv->adapter_restart);
10058 } else
10059 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10060 "enabled\n");
10061
10062 exit_unlock:
10063 spin_unlock_irqrestore(&priv->lock, flags);
10064 }
10065
10066 static void ipw_bg_rf_kill(void *data)
10067 {
10068 struct ipw_priv *priv = data;
10069 mutex_lock(&priv->mutex);
10070 ipw_rf_kill(data);
10071 mutex_unlock(&priv->mutex);
10072 }
10073
10074 static void ipw_link_up(struct ipw_priv *priv)
10075 {
10076 priv->last_seq_num = -1;
10077 priv->last_frag_num = -1;
10078 priv->last_packet_time = 0;
10079
10080 netif_carrier_on(priv->net_dev);
10081 if (netif_queue_stopped(priv->net_dev)) {
10082 IPW_DEBUG_NOTIF("waking queue\n");
10083 netif_wake_queue(priv->net_dev);
10084 } else {
10085 IPW_DEBUG_NOTIF("starting queue\n");
10086 netif_start_queue(priv->net_dev);
10087 }
10088
10089 cancel_delayed_work(&priv->request_scan);
10090 ipw_reset_stats(priv);
10091 /* Ensure the rate is updated immediately */
10092 priv->last_rate = ipw_get_current_rate(priv);
10093 ipw_gather_stats(priv);
10094 ipw_led_link_up(priv);
10095 notify_wx_assoc_event(priv);
10096
10097 if (priv->config & CFG_BACKGROUND_SCAN)
10098 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10099 }
10100
10101 static void ipw_bg_link_up(void *data)
10102 {
10103 struct ipw_priv *priv = data;
10104 mutex_lock(&priv->mutex);
10105 ipw_link_up(data);
10106 mutex_unlock(&priv->mutex);
10107 }
10108
10109 static void ipw_link_down(struct ipw_priv *priv)
10110 {
10111 ipw_led_link_down(priv);
10112 netif_carrier_off(priv->net_dev);
10113 netif_stop_queue(priv->net_dev);
10114 notify_wx_assoc_event(priv);
10115
10116 /* Cancel any queued work ... */
10117 cancel_delayed_work(&priv->request_scan);
10118 cancel_delayed_work(&priv->adhoc_check);
10119 cancel_delayed_work(&priv->gather_stats);
10120
10121 ipw_reset_stats(priv);
10122
10123 if (!(priv->status & STATUS_EXIT_PENDING)) {
10124 /* Queue up another scan... */
10125 queue_work(priv->workqueue, &priv->request_scan);
10126 }
10127 }
10128
10129 static void ipw_bg_link_down(void *data)
10130 {
10131 struct ipw_priv *priv = data;
10132 mutex_lock(&priv->mutex);
10133 ipw_link_down(data);
10134 mutex_unlock(&priv->mutex);
10135 }
10136
10137 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10138 {
10139 int ret = 0;
10140
10141 priv->workqueue = create_workqueue(DRV_NAME);
10142 init_waitqueue_head(&priv->wait_command_queue);
10143 init_waitqueue_head(&priv->wait_state);
10144
10145 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10146 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10147 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10148 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10149 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10150 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10151 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10152 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10153 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10154 INIT_WORK(&priv->request_scan,
10155 (void (*)(void *))ipw_request_scan, priv);
10156 INIT_WORK(&priv->gather_stats,
10157 (void (*)(void *))ipw_bg_gather_stats, priv);
10158 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10159 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10160 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10161 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10162 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10163 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10164 priv);
10165 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10166 priv);
10167 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10168 priv);
10169 INIT_WORK(&priv->merge_networks,
10170 (void (*)(void *))ipw_merge_adhoc_network, priv);
10171
10172 #ifdef CONFIG_IPW_QOS
10173 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10174 priv);
10175 #endif /* CONFIG_IPW_QOS */
10176
10177 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10178 ipw_irq_tasklet, (unsigned long)priv);
10179
10180 return ret;
10181 }
10182
10183 static void shim__set_security(struct net_device *dev,
10184 struct ieee80211_security *sec)
10185 {
10186 struct ipw_priv *priv = ieee80211_priv(dev);
10187 int i;
10188 for (i = 0; i < 4; i++) {
10189 if (sec->flags & (1 << i)) {
10190 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10191 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10192 if (sec->key_sizes[i] == 0)
10193 priv->ieee->sec.flags &= ~(1 << i);
10194 else {
10195 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10196 sec->key_sizes[i]);
10197 priv->ieee->sec.flags |= (1 << i);
10198 }
10199 priv->status |= STATUS_SECURITY_UPDATED;
10200 } else if (sec->level != SEC_LEVEL_1)
10201 priv->ieee->sec.flags &= ~(1 << i);
10202 }
10203
10204 if (sec->flags & SEC_ACTIVE_KEY) {
10205 if (sec->active_key <= 3) {
10206 priv->ieee->sec.active_key = sec->active_key;
10207 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10208 } else
10209 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10210 priv->status |= STATUS_SECURITY_UPDATED;
10211 } else
10212 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10213
10214 if ((sec->flags & SEC_AUTH_MODE) &&
10215 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10216 priv->ieee->sec.auth_mode = sec->auth_mode;
10217 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10218 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10219 priv->capability |= CAP_SHARED_KEY;
10220 else
10221 priv->capability &= ~CAP_SHARED_KEY;
10222 priv->status |= STATUS_SECURITY_UPDATED;
10223 }
10224
10225 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10226 priv->ieee->sec.flags |= SEC_ENABLED;
10227 priv->ieee->sec.enabled = sec->enabled;
10228 priv->status |= STATUS_SECURITY_UPDATED;
10229 if (sec->enabled)
10230 priv->capability |= CAP_PRIVACY_ON;
10231 else
10232 priv->capability &= ~CAP_PRIVACY_ON;
10233 }
10234
10235 if (sec->flags & SEC_ENCRYPT)
10236 priv->ieee->sec.encrypt = sec->encrypt;
10237
10238 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10239 priv->ieee->sec.level = sec->level;
10240 priv->ieee->sec.flags |= SEC_LEVEL;
10241 priv->status |= STATUS_SECURITY_UPDATED;
10242 }
10243
10244 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10245 ipw_set_hwcrypto_keys(priv);
10246
10247 /* To match current functionality of ipw2100 (which works well w/
10248 * various supplicants, we don't force a disassociate if the
10249 * privacy capability changes ... */
10250 #if 0
10251 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10252 (((priv->assoc_request.capability &
10253 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10254 (!(priv->assoc_request.capability &
10255 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10256 IPW_DEBUG_ASSOC("Disassociating due to capability "
10257 "change.\n");
10258 ipw_disassociate(priv);
10259 }
10260 #endif
10261 }
10262
10263 static int init_supported_rates(struct ipw_priv *priv,
10264 struct ipw_supported_rates *rates)
10265 {
10266 /* TODO: Mask out rates based on priv->rates_mask */
10267
10268 memset(rates, 0, sizeof(*rates));
10269 /* configure supported rates */
10270 switch (priv->ieee->freq_band) {
10271 case IEEE80211_52GHZ_BAND:
10272 rates->ieee_mode = IPW_A_MODE;
10273 rates->purpose = IPW_RATE_CAPABILITIES;
10274 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10275 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10276 break;
10277
10278 default: /* Mixed or 2.4Ghz */
10279 rates->ieee_mode = IPW_G_MODE;
10280 rates->purpose = IPW_RATE_CAPABILITIES;
10281 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10282 IEEE80211_CCK_DEFAULT_RATES_MASK);
10283 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10284 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10285 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10286 }
10287 break;
10288 }
10289
10290 return 0;
10291 }
10292
10293 static int ipw_config(struct ipw_priv *priv)
10294 {
10295 /* This is only called from ipw_up, which resets/reloads the firmware
10296 so, we don't need to first disable the card before we configure
10297 it */
10298 if (ipw_set_tx_power(priv))
10299 goto error;
10300
10301 /* initialize adapter address */
10302 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10303 goto error;
10304
10305 /* set basic system config settings */
10306 init_sys_config(&priv->sys_config);
10307
10308 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10309 * Does not support BT priority yet (don't abort or defer our Tx) */
10310 if (bt_coexist) {
10311 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10312
10313 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10314 priv->sys_config.bt_coexistence
10315 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10316 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10317 priv->sys_config.bt_coexistence
10318 |= CFG_BT_COEXISTENCE_OOB;
10319 }
10320
10321 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10322 priv->sys_config.answer_broadcast_ssid_probe = 1;
10323 else
10324 priv->sys_config.answer_broadcast_ssid_probe = 0;
10325
10326 if (ipw_send_system_config(priv, &priv->sys_config))
10327 goto error;
10328
10329 init_supported_rates(priv, &priv->rates);
10330 if (ipw_send_supported_rates(priv, &priv->rates))
10331 goto error;
10332
10333 /* Set request-to-send threshold */
10334 if (priv->rts_threshold) {
10335 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10336 goto error;
10337 }
10338 #ifdef CONFIG_IPW_QOS
10339 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10340 ipw_qos_activate(priv, NULL);
10341 #endif /* CONFIG_IPW_QOS */
10342
10343 if (ipw_set_random_seed(priv))
10344 goto error;
10345
10346 /* final state transition to the RUN state */
10347 if (ipw_send_host_complete(priv))
10348 goto error;
10349
10350 priv->status |= STATUS_INIT;
10351
10352 ipw_led_init(priv);
10353 ipw_led_radio_on(priv);
10354 priv->notif_missed_beacons = 0;
10355
10356 /* Set hardware WEP key if it is configured. */
10357 if ((priv->capability & CAP_PRIVACY_ON) &&
10358 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10359 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10360 ipw_set_hwcrypto_keys(priv);
10361
10362 return 0;
10363
10364 error:
10365 return -EIO;
10366 }
10367
10368 /*
10369 * NOTE:
10370 *
10371 * These tables have been tested in conjunction with the
10372 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10373 *
10374 * Altering this values, using it on other hardware, or in geographies
10375 * not intended for resale of the above mentioned Intel adapters has
10376 * not been tested.
10377 *
10378 * Remember to update the table in README.ipw2200 when changing this
10379 * table.
10380 *
10381 */
10382 static const struct ieee80211_geo ipw_geos[] = {
10383 { /* Restricted */
10384 "---",
10385 .bg_channels = 11,
10386 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10387 {2427, 4}, {2432, 5}, {2437, 6},
10388 {2442, 7}, {2447, 8}, {2452, 9},
10389 {2457, 10}, {2462, 11}},
10390 },
10391
10392 { /* Custom US/Canada */
10393 "ZZF",
10394 .bg_channels = 11,
10395 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10396 {2427, 4}, {2432, 5}, {2437, 6},
10397 {2442, 7}, {2447, 8}, {2452, 9},
10398 {2457, 10}, {2462, 11}},
10399 .a_channels = 8,
10400 .a = {{5180, 36},
10401 {5200, 40},
10402 {5220, 44},
10403 {5240, 48},
10404 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10405 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10406 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10407 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10408 },
10409
10410 { /* Rest of World */
10411 "ZZD",
10412 .bg_channels = 13,
10413 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10414 {2427, 4}, {2432, 5}, {2437, 6},
10415 {2442, 7}, {2447, 8}, {2452, 9},
10416 {2457, 10}, {2462, 11}, {2467, 12},
10417 {2472, 13}},
10418 },
10419
10420 { /* Custom USA & Europe & High */
10421 "ZZA",
10422 .bg_channels = 11,
10423 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10424 {2427, 4}, {2432, 5}, {2437, 6},
10425 {2442, 7}, {2447, 8}, {2452, 9},
10426 {2457, 10}, {2462, 11}},
10427 .a_channels = 13,
10428 .a = {{5180, 36},
10429 {5200, 40},
10430 {5220, 44},
10431 {5240, 48},
10432 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10433 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10434 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10435 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10436 {5745, 149},
10437 {5765, 153},
10438 {5785, 157},
10439 {5805, 161},
10440 {5825, 165}},
10441 },
10442
10443 { /* Custom NA & Europe */
10444 "ZZB",
10445 .bg_channels = 11,
10446 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10447 {2427, 4}, {2432, 5}, {2437, 6},
10448 {2442, 7}, {2447, 8}, {2452, 9},
10449 {2457, 10}, {2462, 11}},
10450 .a_channels = 13,
10451 .a = {{5180, 36},
10452 {5200, 40},
10453 {5220, 44},
10454 {5240, 48},
10455 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10456 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10457 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10458 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10459 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10460 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10461 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10462 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10463 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10464 },
10465
10466 { /* Custom Japan */
10467 "ZZC",
10468 .bg_channels = 11,
10469 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10470 {2427, 4}, {2432, 5}, {2437, 6},
10471 {2442, 7}, {2447, 8}, {2452, 9},
10472 {2457, 10}, {2462, 11}},
10473 .a_channels = 4,
10474 .a = {{5170, 34}, {5190, 38},
10475 {5210, 42}, {5230, 46}},
10476 },
10477
10478 { /* Custom */
10479 "ZZM",
10480 .bg_channels = 11,
10481 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10482 {2427, 4}, {2432, 5}, {2437, 6},
10483 {2442, 7}, {2447, 8}, {2452, 9},
10484 {2457, 10}, {2462, 11}},
10485 },
10486
10487 { /* Europe */
10488 "ZZE",
10489 .bg_channels = 13,
10490 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10491 {2427, 4}, {2432, 5}, {2437, 6},
10492 {2442, 7}, {2447, 8}, {2452, 9},
10493 {2457, 10}, {2462, 11}, {2467, 12},
10494 {2472, 13}},
10495 .a_channels = 19,
10496 .a = {{5180, 36},
10497 {5200, 40},
10498 {5220, 44},
10499 {5240, 48},
10500 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10501 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10502 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10503 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10504 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10505 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10506 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10507 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10508 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10509 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10510 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10511 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10512 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10513 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10514 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
10515 },
10516
10517 { /* Custom Japan */
10518 "ZZJ",
10519 .bg_channels = 14,
10520 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10521 {2427, 4}, {2432, 5}, {2437, 6},
10522 {2442, 7}, {2447, 8}, {2452, 9},
10523 {2457, 10}, {2462, 11}, {2467, 12},
10524 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
10525 .a_channels = 4,
10526 .a = {{5170, 34}, {5190, 38},
10527 {5210, 42}, {5230, 46}},
10528 },
10529
10530 { /* Rest of World */
10531 "ZZR",
10532 .bg_channels = 14,
10533 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10534 {2427, 4}, {2432, 5}, {2437, 6},
10535 {2442, 7}, {2447, 8}, {2452, 9},
10536 {2457, 10}, {2462, 11}, {2467, 12},
10537 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
10538 IEEE80211_CH_PASSIVE_ONLY}},
10539 },
10540
10541 { /* High Band */
10542 "ZZH",
10543 .bg_channels = 13,
10544 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10545 {2427, 4}, {2432, 5}, {2437, 6},
10546 {2442, 7}, {2447, 8}, {2452, 9},
10547 {2457, 10}, {2462, 11},
10548 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10549 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10550 .a_channels = 4,
10551 .a = {{5745, 149}, {5765, 153},
10552 {5785, 157}, {5805, 161}},
10553 },
10554
10555 { /* Custom Europe */
10556 "ZZG",
10557 .bg_channels = 13,
10558 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10559 {2427, 4}, {2432, 5}, {2437, 6},
10560 {2442, 7}, {2447, 8}, {2452, 9},
10561 {2457, 10}, {2462, 11},
10562 {2467, 12}, {2472, 13}},
10563 .a_channels = 4,
10564 .a = {{5180, 36}, {5200, 40},
10565 {5220, 44}, {5240, 48}},
10566 },
10567
10568 { /* Europe */
10569 "ZZK",
10570 .bg_channels = 13,
10571 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10572 {2427, 4}, {2432, 5}, {2437, 6},
10573 {2442, 7}, {2447, 8}, {2452, 9},
10574 {2457, 10}, {2462, 11},
10575 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10576 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10577 .a_channels = 24,
10578 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10579 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10580 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10581 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10582 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10583 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10584 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10585 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10586 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10587 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10588 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10589 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10590 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10591 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10592 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10593 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10594 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10595 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10596 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
10597 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10598 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10599 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10600 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10601 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10602 },
10603
10604 { /* Europe */
10605 "ZZL",
10606 .bg_channels = 11,
10607 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10608 {2427, 4}, {2432, 5}, {2437, 6},
10609 {2442, 7}, {2447, 8}, {2452, 9},
10610 {2457, 10}, {2462, 11}},
10611 .a_channels = 13,
10612 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10613 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10614 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10615 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10616 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10617 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10618 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10619 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10620 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10621 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10622 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10623 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10624 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10625 }
10626 };
10627
10628 #define MAX_HW_RESTARTS 5
10629 static int ipw_up(struct ipw_priv *priv)
10630 {
10631 int rc, i, j;
10632
10633 if (priv->status & STATUS_EXIT_PENDING)
10634 return -EIO;
10635
10636 if (cmdlog && !priv->cmdlog) {
10637 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
10638 GFP_KERNEL);
10639 if (priv->cmdlog == NULL) {
10640 IPW_ERROR("Error allocating %d command log entries.\n",
10641 cmdlog);
10642 } else {
10643 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
10644 priv->cmdlog_len = cmdlog;
10645 }
10646 }
10647
10648 for (i = 0; i < MAX_HW_RESTARTS; i++) {
10649 /* Load the microcode, firmware, and eeprom.
10650 * Also start the clocks. */
10651 rc = ipw_load(priv);
10652 if (rc) {
10653 IPW_ERROR("Unable to load firmware: %d\n", rc);
10654 return rc;
10655 }
10656
10657 ipw_init_ordinals(priv);
10658 if (!(priv->config & CFG_CUSTOM_MAC))
10659 eeprom_parse_mac(priv, priv->mac_addr);
10660 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
10661
10662 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
10663 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
10664 ipw_geos[j].name, 3))
10665 break;
10666 }
10667 if (j == ARRAY_SIZE(ipw_geos)) {
10668 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
10669 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
10670 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
10671 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
10672 j = 0;
10673 }
10674 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
10675 IPW_WARNING("Could not set geography.");
10676 return 0;
10677 }
10678
10679 if (priv->status & STATUS_RF_KILL_SW) {
10680 IPW_WARNING("Radio disabled by module parameter.\n");
10681 return 0;
10682 } else if (rf_kill_active(priv)) {
10683 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
10684 "Kill switch must be turned off for "
10685 "wireless networking to work.\n");
10686 queue_delayed_work(priv->workqueue, &priv->rf_kill,
10687 2 * HZ);
10688 return 0;
10689 }
10690
10691 rc = ipw_config(priv);
10692 if (!rc) {
10693 IPW_DEBUG_INFO("Configured device on count %i\n", i);
10694
10695 /* If configure to try and auto-associate, kick
10696 * off a scan. */
10697 queue_work(priv->workqueue, &priv->request_scan);
10698
10699 return 0;
10700 }
10701
10702 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
10703 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
10704 i, MAX_HW_RESTARTS);
10705
10706 /* We had an error bringing up the hardware, so take it
10707 * all the way back down so we can try again */
10708 ipw_down(priv);
10709 }
10710
10711 /* tried to restart and config the device for as long as our
10712 * patience could withstand */
10713 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
10714
10715 return -EIO;
10716 }
10717
10718 static void ipw_bg_up(void *data)
10719 {
10720 struct ipw_priv *priv = data;
10721 mutex_lock(&priv->mutex);
10722 ipw_up(data);
10723 mutex_unlock(&priv->mutex);
10724 }
10725
10726 static void ipw_deinit(struct ipw_priv *priv)
10727 {
10728 int i;
10729
10730 if (priv->status & STATUS_SCANNING) {
10731 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
10732 ipw_abort_scan(priv);
10733 }
10734
10735 if (priv->status & STATUS_ASSOCIATED) {
10736 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
10737 ipw_disassociate(priv);
10738 }
10739
10740 ipw_led_shutdown(priv);
10741
10742 /* Wait up to 1s for status to change to not scanning and not
10743 * associated (disassociation can take a while for a ful 802.11
10744 * exchange */
10745 for (i = 1000; i && (priv->status &
10746 (STATUS_DISASSOCIATING |
10747 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
10748 udelay(10);
10749
10750 if (priv->status & (STATUS_DISASSOCIATING |
10751 STATUS_ASSOCIATED | STATUS_SCANNING))
10752 IPW_DEBUG_INFO("Still associated or scanning...\n");
10753 else
10754 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
10755
10756 /* Attempt to disable the card */
10757 ipw_send_card_disable(priv, 0);
10758
10759 priv->status &= ~STATUS_INIT;
10760 }
10761
10762 static void ipw_down(struct ipw_priv *priv)
10763 {
10764 int exit_pending = priv->status & STATUS_EXIT_PENDING;
10765
10766 priv->status |= STATUS_EXIT_PENDING;
10767
10768 if (ipw_is_init(priv))
10769 ipw_deinit(priv);
10770
10771 /* Wipe out the EXIT_PENDING status bit if we are not actually
10772 * exiting the module */
10773 if (!exit_pending)
10774 priv->status &= ~STATUS_EXIT_PENDING;
10775
10776 /* tell the device to stop sending interrupts */
10777 ipw_disable_interrupts(priv);
10778
10779 /* Clear all bits but the RF Kill */
10780 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
10781 netif_carrier_off(priv->net_dev);
10782 netif_stop_queue(priv->net_dev);
10783
10784 ipw_stop_nic(priv);
10785
10786 ipw_led_radio_off(priv);
10787 }
10788
10789 static void ipw_bg_down(void *data)
10790 {
10791 struct ipw_priv *priv = data;
10792 mutex_lock(&priv->mutex);
10793 ipw_down(data);
10794 mutex_unlock(&priv->mutex);
10795 }
10796
10797 /* Called by register_netdev() */
10798 static int ipw_net_init(struct net_device *dev)
10799 {
10800 struct ipw_priv *priv = ieee80211_priv(dev);
10801 mutex_lock(&priv->mutex);
10802
10803 if (ipw_up(priv)) {
10804 mutex_unlock(&priv->mutex);
10805 return -EIO;
10806 }
10807
10808 mutex_unlock(&priv->mutex);
10809 return 0;
10810 }
10811
10812 /* PCI driver stuff */
10813 static struct pci_device_id card_ids[] = {
10814 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
10815 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
10816 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
10817 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
10818 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
10819 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
10820 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
10821 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
10822 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
10823 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
10824 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
10825 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
10826 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
10827 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
10828 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
10829 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
10830 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
10831 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
10832 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10833 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10834 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10835 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10836
10837 /* required last entry */
10838 {0,}
10839 };
10840
10841 MODULE_DEVICE_TABLE(pci, card_ids);
10842
10843 static struct attribute *ipw_sysfs_entries[] = {
10844 &dev_attr_rf_kill.attr,
10845 &dev_attr_direct_dword.attr,
10846 &dev_attr_indirect_byte.attr,
10847 &dev_attr_indirect_dword.attr,
10848 &dev_attr_mem_gpio_reg.attr,
10849 &dev_attr_command_event_reg.attr,
10850 &dev_attr_nic_type.attr,
10851 &dev_attr_status.attr,
10852 &dev_attr_cfg.attr,
10853 &dev_attr_error.attr,
10854 &dev_attr_event_log.attr,
10855 &dev_attr_cmd_log.attr,
10856 &dev_attr_eeprom_delay.attr,
10857 &dev_attr_ucode_version.attr,
10858 &dev_attr_rtc.attr,
10859 &dev_attr_scan_age.attr,
10860 &dev_attr_led.attr,
10861 &dev_attr_speed_scan.attr,
10862 &dev_attr_net_stats.attr,
10863 NULL
10864 };
10865
10866 static struct attribute_group ipw_attribute_group = {
10867 .name = NULL, /* put in device directory */
10868 .attrs = ipw_sysfs_entries,
10869 };
10870
10871 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10872 {
10873 int err = 0;
10874 struct net_device *net_dev;
10875 void __iomem *base;
10876 u32 length, val;
10877 struct ipw_priv *priv;
10878 int i;
10879
10880 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
10881 if (net_dev == NULL) {
10882 err = -ENOMEM;
10883 goto out;
10884 }
10885
10886 priv = ieee80211_priv(net_dev);
10887 priv->ieee = netdev_priv(net_dev);
10888
10889 priv->net_dev = net_dev;
10890 priv->pci_dev = pdev;
10891 #ifdef CONFIG_IPW2200_DEBUG
10892 ipw_debug_level = debug;
10893 #endif
10894 spin_lock_init(&priv->lock);
10895 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
10896 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
10897
10898 mutex_init(&priv->mutex);
10899 if (pci_enable_device(pdev)) {
10900 err = -ENODEV;
10901 goto out_free_ieee80211;
10902 }
10903
10904 pci_set_master(pdev);
10905
10906 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10907 if (!err)
10908 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
10909 if (err) {
10910 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
10911 goto out_pci_disable_device;
10912 }
10913
10914 pci_set_drvdata(pdev, priv);
10915
10916 err = pci_request_regions(pdev, DRV_NAME);
10917 if (err)
10918 goto out_pci_disable_device;
10919
10920 /* We disable the RETRY_TIMEOUT register (0x41) to keep
10921 * PCI Tx retries from interfering with C3 CPU state */
10922 pci_read_config_dword(pdev, 0x40, &val);
10923 if ((val & 0x0000ff00) != 0)
10924 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
10925
10926 length = pci_resource_len(pdev, 0);
10927 priv->hw_len = length;
10928
10929 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
10930 if (!base) {
10931 err = -ENODEV;
10932 goto out_pci_release_regions;
10933 }
10934
10935 priv->hw_base = base;
10936 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
10937 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
10938
10939 err = ipw_setup_deferred_work(priv);
10940 if (err) {
10941 IPW_ERROR("Unable to setup deferred work\n");
10942 goto out_iounmap;
10943 }
10944
10945 ipw_sw_reset(priv, 1);
10946
10947 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
10948 if (err) {
10949 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
10950 goto out_destroy_workqueue;
10951 }
10952
10953 SET_MODULE_OWNER(net_dev);
10954 SET_NETDEV_DEV(net_dev, &pdev->dev);
10955
10956 mutex_lock(&priv->mutex);
10957
10958 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
10959 priv->ieee->set_security = shim__set_security;
10960 priv->ieee->is_queue_full = ipw_net_is_queue_full;
10961
10962 #ifdef CONFIG_IPW_QOS
10963 priv->ieee->handle_probe_response = ipw_handle_beacon;
10964 priv->ieee->handle_beacon = ipw_handle_probe_response;
10965 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
10966 #endif /* CONFIG_IPW_QOS */
10967
10968 priv->ieee->perfect_rssi = -20;
10969 priv->ieee->worst_rssi = -85;
10970
10971 net_dev->open = ipw_net_open;
10972 net_dev->stop = ipw_net_stop;
10973 net_dev->init = ipw_net_init;
10974 net_dev->get_stats = ipw_net_get_stats;
10975 net_dev->set_multicast_list = ipw_net_set_multicast_list;
10976 net_dev->set_mac_address = ipw_net_set_mac_address;
10977 priv->wireless_data.spy_data = &priv->ieee->spy_data;
10978 net_dev->wireless_data = &priv->wireless_data;
10979 net_dev->wireless_handlers = &ipw_wx_handler_def;
10980 net_dev->ethtool_ops = &ipw_ethtool_ops;
10981 net_dev->irq = pdev->irq;
10982 net_dev->base_addr = (unsigned long)priv->hw_base;
10983 net_dev->mem_start = pci_resource_start(pdev, 0);
10984 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
10985
10986 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
10987 if (err) {
10988 IPW_ERROR("failed to create sysfs device attributes\n");
10989 mutex_unlock(&priv->mutex);
10990 goto out_release_irq;
10991 }
10992
10993 mutex_unlock(&priv->mutex);
10994 err = register_netdev(net_dev);
10995 if (err) {
10996 IPW_ERROR("failed to register network device\n");
10997 goto out_remove_sysfs;
10998 }
10999
11000 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11001 "channels, %d 802.11a channels)\n",
11002 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11003 priv->ieee->geo.a_channels);
11004
11005 return 0;
11006
11007 out_remove_sysfs:
11008 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11009 out_release_irq:
11010 free_irq(pdev->irq, priv);
11011 out_destroy_workqueue:
11012 destroy_workqueue(priv->workqueue);
11013 priv->workqueue = NULL;
11014 out_iounmap:
11015 iounmap(priv->hw_base);
11016 out_pci_release_regions:
11017 pci_release_regions(pdev);
11018 out_pci_disable_device:
11019 pci_disable_device(pdev);
11020 pci_set_drvdata(pdev, NULL);
11021 out_free_ieee80211:
11022 free_ieee80211(priv->net_dev);
11023 out:
11024 return err;
11025 }
11026
11027 static void ipw_pci_remove(struct pci_dev *pdev)
11028 {
11029 struct ipw_priv *priv = pci_get_drvdata(pdev);
11030 struct list_head *p, *q;
11031 int i;
11032
11033 if (!priv)
11034 return;
11035
11036 mutex_lock(&priv->mutex);
11037
11038 priv->status |= STATUS_EXIT_PENDING;
11039 ipw_down(priv);
11040 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11041
11042 mutex_unlock(&priv->mutex);
11043
11044 unregister_netdev(priv->net_dev);
11045
11046 if (priv->rxq) {
11047 ipw_rx_queue_free(priv, priv->rxq);
11048 priv->rxq = NULL;
11049 }
11050 ipw_tx_queue_free(priv);
11051
11052 if (priv->cmdlog) {
11053 kfree(priv->cmdlog);
11054 priv->cmdlog = NULL;
11055 }
11056 /* ipw_down will ensure that there is no more pending work
11057 * in the workqueue's, so we can safely remove them now. */
11058 cancel_delayed_work(&priv->adhoc_check);
11059 cancel_delayed_work(&priv->gather_stats);
11060 cancel_delayed_work(&priv->request_scan);
11061 cancel_delayed_work(&priv->rf_kill);
11062 cancel_delayed_work(&priv->scan_check);
11063 destroy_workqueue(priv->workqueue);
11064 priv->workqueue = NULL;
11065
11066 /* Free MAC hash list for ADHOC */
11067 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11068 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11069 list_del(p);
11070 kfree(list_entry(p, struct ipw_ibss_seq, list));
11071 }
11072 }
11073
11074 if (priv->error) {
11075 ipw_free_error_log(priv->error);
11076 priv->error = NULL;
11077 }
11078
11079 free_irq(pdev->irq, priv);
11080 iounmap(priv->hw_base);
11081 pci_release_regions(pdev);
11082 pci_disable_device(pdev);
11083 pci_set_drvdata(pdev, NULL);
11084 free_ieee80211(priv->net_dev);
11085 free_firmware();
11086 }
11087
11088 #ifdef CONFIG_PM
11089 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11090 {
11091 struct ipw_priv *priv = pci_get_drvdata(pdev);
11092 struct net_device *dev = priv->net_dev;
11093
11094 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11095
11096 /* Take down the device; powers it off, etc. */
11097 ipw_down(priv);
11098
11099 /* Remove the PRESENT state of the device */
11100 netif_device_detach(dev);
11101
11102 pci_save_state(pdev);
11103 pci_disable_device(pdev);
11104 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11105
11106 return 0;
11107 }
11108
11109 static int ipw_pci_resume(struct pci_dev *pdev)
11110 {
11111 struct ipw_priv *priv = pci_get_drvdata(pdev);
11112 struct net_device *dev = priv->net_dev;
11113 u32 val;
11114
11115 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11116
11117 pci_set_power_state(pdev, PCI_D0);
11118 pci_enable_device(pdev);
11119 pci_restore_state(pdev);
11120
11121 /*
11122 * Suspend/Resume resets the PCI configuration space, so we have to
11123 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11124 * from interfering with C3 CPU state. pci_restore_state won't help
11125 * here since it only restores the first 64 bytes pci config header.
11126 */
11127 pci_read_config_dword(pdev, 0x40, &val);
11128 if ((val & 0x0000ff00) != 0)
11129 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11130
11131 /* Set the device back into the PRESENT state; this will also wake
11132 * the queue of needed */
11133 netif_device_attach(dev);
11134
11135 /* Bring the device back up */
11136 queue_work(priv->workqueue, &priv->up);
11137
11138 return 0;
11139 }
11140 #endif
11141
11142 /* driver initialization stuff */
11143 static struct pci_driver ipw_driver = {
11144 .name = DRV_NAME,
11145 .id_table = card_ids,
11146 .probe = ipw_pci_probe,
11147 .remove = __devexit_p(ipw_pci_remove),
11148 #ifdef CONFIG_PM
11149 .suspend = ipw_pci_suspend,
11150 .resume = ipw_pci_resume,
11151 #endif
11152 };
11153
11154 static int __init ipw_init(void)
11155 {
11156 int ret;
11157
11158 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11159 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11160
11161 ret = pci_module_init(&ipw_driver);
11162 if (ret) {
11163 IPW_ERROR("Unable to initialize PCI module\n");
11164 return ret;
11165 }
11166
11167 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11168 if (ret) {
11169 IPW_ERROR("Unable to create driver sysfs file\n");
11170 pci_unregister_driver(&ipw_driver);
11171 return ret;
11172 }
11173
11174 return ret;
11175 }
11176
11177 static void __exit ipw_exit(void)
11178 {
11179 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11180 pci_unregister_driver(&ipw_driver);
11181 }
11182
11183 module_param(disable, int, 0444);
11184 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11185
11186 module_param(associate, int, 0444);
11187 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11188
11189 module_param(auto_create, int, 0444);
11190 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11191
11192 module_param(led, int, 0444);
11193 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11194
11195 #ifdef CONFIG_IPW2200_DEBUG
11196 module_param(debug, int, 0444);
11197 MODULE_PARM_DESC(debug, "debug output mask");
11198 #endif
11199
11200 module_param(channel, int, 0444);
11201 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11202
11203 #ifdef CONFIG_IPW_QOS
11204 module_param(qos_enable, int, 0444);
11205 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11206
11207 module_param(qos_burst_enable, int, 0444);
11208 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11209
11210 module_param(qos_no_ack_mask, int, 0444);
11211 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11212
11213 module_param(burst_duration_CCK, int, 0444);
11214 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11215
11216 module_param(burst_duration_OFDM, int, 0444);
11217 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11218 #endif /* CONFIG_IPW_QOS */
11219
11220 #ifdef CONFIG_IPW2200_MONITOR
11221 module_param(mode, int, 0444);
11222 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11223 #else
11224 module_param(mode, int, 0444);
11225 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11226 #endif
11227
11228 module_param(bt_coexist, int, 0444);
11229 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11230
11231 module_param(hwcrypto, int, 0444);
11232 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11233
11234 module_param(cmdlog, int, 0444);
11235 MODULE_PARM_DESC(cmdlog,
11236 "allocate a ring buffer for logging firmware commands");
11237
11238 module_param(roaming, int, 0444);
11239 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11240
11241 module_exit(ipw_exit);
11242 module_init(ipw_init);
This page took 0.550727 seconds and 5 git commands to generate.