[PATCH] ipw2200: Fix "iwspy ethx off" causes kernel panic
[deliverable/linux.git] / drivers / net / wireless / ipw2200.c
1 /******************************************************************************
2
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34 #include <linux/version.h>
35
36 #define IPW2200_VERSION "git-1.0.8"
37 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
38 #define DRV_COPYRIGHT "Copyright(c) 2003-2005 Intel Corporation"
39 #define DRV_VERSION IPW2200_VERSION
40
41 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
42
43 MODULE_DESCRIPTION(DRV_DESCRIPTION);
44 MODULE_VERSION(DRV_VERSION);
45 MODULE_AUTHOR(DRV_COPYRIGHT);
46 MODULE_LICENSE("GPL");
47
48 static int cmdlog = 0;
49 static int debug = 0;
50 static int channel = 0;
51 static int mode = 0;
52
53 static u32 ipw_debug_level;
54 static int associate = 1;
55 static int auto_create = 1;
56 static int led = 0;
57 static int disable = 0;
58 static int hwcrypto = 1;
59 static const char ipw_modes[] = {
60 'a', 'b', 'g', '?'
61 };
62
63 #ifdef CONFIG_IPW_QOS
64 static int qos_enable = 0;
65 static int qos_burst_enable = 0;
66 static int qos_no_ack_mask = 0;
67 static int burst_duration_CCK = 0;
68 static int burst_duration_OFDM = 0;
69
70 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
71 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
72 QOS_TX3_CW_MIN_OFDM},
73 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
74 QOS_TX3_CW_MAX_OFDM},
75 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
76 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
77 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
78 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
79 };
80
81 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
82 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
83 QOS_TX3_CW_MIN_CCK},
84 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
85 QOS_TX3_CW_MAX_CCK},
86 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
87 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
88 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
89 QOS_TX3_TXOP_LIMIT_CCK}
90 };
91
92 static struct ieee80211_qos_parameters def_parameters_OFDM = {
93 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
94 DEF_TX3_CW_MIN_OFDM},
95 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
96 DEF_TX3_CW_MAX_OFDM},
97 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
98 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
99 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
100 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
101 };
102
103 static struct ieee80211_qos_parameters def_parameters_CCK = {
104 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
105 DEF_TX3_CW_MIN_CCK},
106 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
107 DEF_TX3_CW_MAX_CCK},
108 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
109 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
110 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
111 DEF_TX3_TXOP_LIMIT_CCK}
112 };
113
114 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
115
116 static int from_priority_to_tx_queue[] = {
117 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
118 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
119 };
120
121 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
122
123 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
124 *qos_param);
125 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
126 *qos_param);
127 #endif /* CONFIG_IPW_QOS */
128
129 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
130 static void ipw_remove_current_network(struct ipw_priv *priv);
131 static void ipw_rx(struct ipw_priv *priv);
132 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
133 struct clx2_tx_queue *txq, int qindex);
134 static int ipw_queue_reset(struct ipw_priv *priv);
135
136 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
137 int len, int sync);
138
139 static void ipw_tx_queue_free(struct ipw_priv *);
140
141 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
142 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
143 static void ipw_rx_queue_replenish(void *);
144 static int ipw_up(struct ipw_priv *);
145 static void ipw_bg_up(void *);
146 static void ipw_down(struct ipw_priv *);
147 static void ipw_bg_down(void *);
148 static int ipw_config(struct ipw_priv *);
149 static int init_supported_rates(struct ipw_priv *priv,
150 struct ipw_supported_rates *prates);
151 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
152 static void ipw_send_wep_keys(struct ipw_priv *, int);
153
154 static int ipw_is_valid_channel(struct ieee80211_device *, u8);
155 static int ipw_channel_to_index(struct ieee80211_device *, u8);
156 static u8 ipw_freq_to_channel(struct ieee80211_device *, u32);
157 static int ipw_set_geo(struct ieee80211_device *, const struct ieee80211_geo *);
158 static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *);
159
160 static int snprint_line(char *buf, size_t count,
161 const u8 * data, u32 len, u32 ofs)
162 {
163 int out, i, j, l;
164 char c;
165
166 out = snprintf(buf, count, "%08X", ofs);
167
168 for (l = 0, i = 0; i < 2; i++) {
169 out += snprintf(buf + out, count - out, " ");
170 for (j = 0; j < 8 && l < len; j++, l++)
171 out += snprintf(buf + out, count - out, "%02X ",
172 data[(i * 8 + j)]);
173 for (; j < 8; j++)
174 out += snprintf(buf + out, count - out, " ");
175 }
176
177 out += snprintf(buf + out, count - out, " ");
178 for (l = 0, i = 0; i < 2; i++) {
179 out += snprintf(buf + out, count - out, " ");
180 for (j = 0; j < 8 && l < len; j++, l++) {
181 c = data[(i * 8 + j)];
182 if (!isascii(c) || !isprint(c))
183 c = '.';
184
185 out += snprintf(buf + out, count - out, "%c", c);
186 }
187
188 for (; j < 8; j++)
189 out += snprintf(buf + out, count - out, " ");
190 }
191
192 return out;
193 }
194
195 static void printk_buf(int level, const u8 * data, u32 len)
196 {
197 char line[81];
198 u32 ofs = 0;
199 if (!(ipw_debug_level & level))
200 return;
201
202 while (len) {
203 snprint_line(line, sizeof(line), &data[ofs],
204 min(len, 16U), ofs);
205 printk(KERN_DEBUG "%s\n", line);
206 ofs += 16;
207 len -= min(len, 16U);
208 }
209 }
210
211 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
212 {
213 size_t out = size;
214 u32 ofs = 0;
215 int total = 0;
216
217 while (size && len) {
218 out = snprint_line(output, size, &data[ofs],
219 min_t(size_t, len, 16U), ofs);
220
221 ofs += 16;
222 output += out;
223 size -= out;
224 len -= min_t(size_t, len, 16U);
225 total += out;
226 }
227 return total;
228 }
229
230 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
231 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
232
233 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
234 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
235
236 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
237 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
238 {
239 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
240 __LINE__, (u32) (b), (u32) (c));
241 _ipw_write_reg8(a, b, c);
242 }
243
244 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
245 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
246 {
247 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
248 __LINE__, (u32) (b), (u32) (c));
249 _ipw_write_reg16(a, b, c);
250 }
251
252 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
253 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
254 {
255 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
256 __LINE__, (u32) (b), (u32) (c));
257 _ipw_write_reg32(a, b, c);
258 }
259
260 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
261 #define ipw_write8(ipw, ofs, val) \
262 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
263 _ipw_write8(ipw, ofs, val)
264
265 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
266 #define ipw_write16(ipw, ofs, val) \
267 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
268 _ipw_write16(ipw, ofs, val)
269
270 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
271 #define ipw_write32(ipw, ofs, val) \
272 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
273 _ipw_write32(ipw, ofs, val)
274
275 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
276 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
277 {
278 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
279 return _ipw_read8(ipw, ofs);
280 }
281
282 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
283
284 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
285 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
286 {
287 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
288 return _ipw_read16(ipw, ofs);
289 }
290
291 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
292
293 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
294 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
295 {
296 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
297 return _ipw_read32(ipw, ofs);
298 }
299
300 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
301
302 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
303 static inline void __ipw_read_indirect(const char *f, int l,
304 struct ipw_priv *a, u32 b, u8 * c, int d)
305 {
306 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
307 d);
308 _ipw_read_indirect(a, b, c, d);
309 }
310
311 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
312
313 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
314 int num);
315 #define ipw_write_indirect(a, b, c, d) \
316 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
317 _ipw_write_indirect(a, b, c, d)
318
319 /* indirect write s */
320 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
321 {
322 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
323 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
324 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
325 }
326
327 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
328 {
329 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
330 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
331 _ipw_write8(priv, IPW_INDIRECT_DATA, value);
332 }
333
334 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
335 {
336 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
337 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
338 _ipw_write16(priv, IPW_INDIRECT_DATA, value);
339 }
340
341 /* indirect read s */
342
343 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
344 {
345 u32 word;
346 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
347 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
348 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
349 return (word >> ((reg & 0x3) * 8)) & 0xff;
350 }
351
352 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
353 {
354 u32 value;
355
356 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
357
358 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
359 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
360 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
361 return value;
362 }
363
364 /* iterative/auto-increment 32 bit reads and writes */
365 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
366 int num)
367 {
368 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;
369 u32 dif_len = addr - aligned_addr;
370 u32 i;
371
372 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
373
374 if (num <= 0) {
375 return;
376 }
377
378 /* Read the first nibble byte by byte */
379 if (unlikely(dif_len)) {
380 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
381 /* Start reading at aligned_addr + dif_len */
382 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
383 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
384 aligned_addr += 4;
385 }
386
387 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
388 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
389 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
390
391 /* Copy the last nibble */
392 if (unlikely(num)) {
393 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
394 for (i = 0; num > 0; i++, num--)
395 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
396 }
397 }
398
399 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
400 int num)
401 {
402 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;
403 u32 dif_len = addr - aligned_addr;
404 u32 i;
405
406 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
407
408 if (num <= 0) {
409 return;
410 }
411
412 /* Write the first nibble byte by byte */
413 if (unlikely(dif_len)) {
414 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
415 /* Start reading at aligned_addr + dif_len */
416 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
417 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
418 aligned_addr += 4;
419 }
420
421 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
422 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
423 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
424
425 /* Copy the last nibble */
426 if (unlikely(num)) {
427 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
428 for (i = 0; num > 0; i++, num--, buf++)
429 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
430 }
431 }
432
433 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
434 int num)
435 {
436 memcpy_toio((priv->hw_base + addr), buf, num);
437 }
438
439 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
440 {
441 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
442 }
443
444 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
445 {
446 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
447 }
448
449 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
450 {
451 if (priv->status & STATUS_INT_ENABLED)
452 return;
453 priv->status |= STATUS_INT_ENABLED;
454 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
455 }
456
457 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
458 {
459 if (!(priv->status & STATUS_INT_ENABLED))
460 return;
461 priv->status &= ~STATUS_INT_ENABLED;
462 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
463 }
464
465 #ifdef CONFIG_IPW2200_DEBUG
466 static char *ipw_error_desc(u32 val)
467 {
468 switch (val) {
469 case IPW_FW_ERROR_OK:
470 return "ERROR_OK";
471 case IPW_FW_ERROR_FAIL:
472 return "ERROR_FAIL";
473 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
474 return "MEMORY_UNDERFLOW";
475 case IPW_FW_ERROR_MEMORY_OVERFLOW:
476 return "MEMORY_OVERFLOW";
477 case IPW_FW_ERROR_BAD_PARAM:
478 return "BAD_PARAM";
479 case IPW_FW_ERROR_BAD_CHECKSUM:
480 return "BAD_CHECKSUM";
481 case IPW_FW_ERROR_NMI_INTERRUPT:
482 return "NMI_INTERRUPT";
483 case IPW_FW_ERROR_BAD_DATABASE:
484 return "BAD_DATABASE";
485 case IPW_FW_ERROR_ALLOC_FAIL:
486 return "ALLOC_FAIL";
487 case IPW_FW_ERROR_DMA_UNDERRUN:
488 return "DMA_UNDERRUN";
489 case IPW_FW_ERROR_DMA_STATUS:
490 return "DMA_STATUS";
491 case IPW_FW_ERROR_DINO_ERROR:
492 return "DINO_ERROR";
493 case IPW_FW_ERROR_EEPROM_ERROR:
494 return "EEPROM_ERROR";
495 case IPW_FW_ERROR_SYSASSERT:
496 return "SYSASSERT";
497 case IPW_FW_ERROR_FATAL_ERROR:
498 return "FATAL_ERROR";
499 default:
500 return "UNKNOWN_ERROR";
501 }
502 }
503
504 static void ipw_dump_error_log(struct ipw_priv *priv,
505 struct ipw_fw_error *error)
506 {
507 u32 i;
508
509 if (!error) {
510 IPW_ERROR("Error allocating and capturing error log. "
511 "Nothing to dump.\n");
512 return;
513 }
514
515 IPW_ERROR("Start IPW Error Log Dump:\n");
516 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
517 error->status, error->config);
518
519 for (i = 0; i < error->elem_len; i++)
520 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
521 ipw_error_desc(error->elem[i].desc),
522 error->elem[i].time,
523 error->elem[i].blink1,
524 error->elem[i].blink2,
525 error->elem[i].link1,
526 error->elem[i].link2, error->elem[i].data);
527 for (i = 0; i < error->log_len; i++)
528 IPW_ERROR("%i\t0x%08x\t%i\n",
529 error->log[i].time,
530 error->log[i].data, error->log[i].event);
531 }
532 #endif
533
534 static inline int ipw_is_init(struct ipw_priv *priv)
535 {
536 return (priv->status & STATUS_INIT) ? 1 : 0;
537 }
538
539 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
540 {
541 u32 addr, field_info, field_len, field_count, total_len;
542
543 IPW_DEBUG_ORD("ordinal = %i\n", ord);
544
545 if (!priv || !val || !len) {
546 IPW_DEBUG_ORD("Invalid argument\n");
547 return -EINVAL;
548 }
549
550 /* verify device ordinal tables have been initialized */
551 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
552 IPW_DEBUG_ORD("Access ordinals before initialization\n");
553 return -EINVAL;
554 }
555
556 switch (IPW_ORD_TABLE_ID_MASK & ord) {
557 case IPW_ORD_TABLE_0_MASK:
558 /*
559 * TABLE 0: Direct access to a table of 32 bit values
560 *
561 * This is a very simple table with the data directly
562 * read from the table
563 */
564
565 /* remove the table id from the ordinal */
566 ord &= IPW_ORD_TABLE_VALUE_MASK;
567
568 /* boundary check */
569 if (ord > priv->table0_len) {
570 IPW_DEBUG_ORD("ordinal value (%i) longer then "
571 "max (%i)\n", ord, priv->table0_len);
572 return -EINVAL;
573 }
574
575 /* verify we have enough room to store the value */
576 if (*len < sizeof(u32)) {
577 IPW_DEBUG_ORD("ordinal buffer length too small, "
578 "need %zd\n", sizeof(u32));
579 return -EINVAL;
580 }
581
582 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
583 ord, priv->table0_addr + (ord << 2));
584
585 *len = sizeof(u32);
586 ord <<= 2;
587 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
588 break;
589
590 case IPW_ORD_TABLE_1_MASK:
591 /*
592 * TABLE 1: Indirect access to a table of 32 bit values
593 *
594 * This is a fairly large table of u32 values each
595 * representing starting addr for the data (which is
596 * also a u32)
597 */
598
599 /* remove the table id from the ordinal */
600 ord &= IPW_ORD_TABLE_VALUE_MASK;
601
602 /* boundary check */
603 if (ord > priv->table1_len) {
604 IPW_DEBUG_ORD("ordinal value too long\n");
605 return -EINVAL;
606 }
607
608 /* verify we have enough room to store the value */
609 if (*len < sizeof(u32)) {
610 IPW_DEBUG_ORD("ordinal buffer length too small, "
611 "need %zd\n", sizeof(u32));
612 return -EINVAL;
613 }
614
615 *((u32 *) val) =
616 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
617 *len = sizeof(u32);
618 break;
619
620 case IPW_ORD_TABLE_2_MASK:
621 /*
622 * TABLE 2: Indirect access to a table of variable sized values
623 *
624 * This table consist of six values, each containing
625 * - dword containing the starting offset of the data
626 * - dword containing the lengh in the first 16bits
627 * and the count in the second 16bits
628 */
629
630 /* remove the table id from the ordinal */
631 ord &= IPW_ORD_TABLE_VALUE_MASK;
632
633 /* boundary check */
634 if (ord > priv->table2_len) {
635 IPW_DEBUG_ORD("ordinal value too long\n");
636 return -EINVAL;
637 }
638
639 /* get the address of statistic */
640 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
641
642 /* get the second DW of statistics ;
643 * two 16-bit words - first is length, second is count */
644 field_info =
645 ipw_read_reg32(priv,
646 priv->table2_addr + (ord << 3) +
647 sizeof(u32));
648
649 /* get each entry length */
650 field_len = *((u16 *) & field_info);
651
652 /* get number of entries */
653 field_count = *(((u16 *) & field_info) + 1);
654
655 /* abort if not enought memory */
656 total_len = field_len * field_count;
657 if (total_len > *len) {
658 *len = total_len;
659 return -EINVAL;
660 }
661
662 *len = total_len;
663 if (!total_len)
664 return 0;
665
666 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
667 "field_info = 0x%08x\n",
668 addr, total_len, field_info);
669 ipw_read_indirect(priv, addr, val, total_len);
670 break;
671
672 default:
673 IPW_DEBUG_ORD("Invalid ordinal!\n");
674 return -EINVAL;
675
676 }
677
678 return 0;
679 }
680
681 static void ipw_init_ordinals(struct ipw_priv *priv)
682 {
683 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
684 priv->table0_len = ipw_read32(priv, priv->table0_addr);
685
686 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
687 priv->table0_addr, priv->table0_len);
688
689 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
690 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
691
692 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
693 priv->table1_addr, priv->table1_len);
694
695 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
696 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
697 priv->table2_len &= 0x0000ffff; /* use first two bytes */
698
699 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
700 priv->table2_addr, priv->table2_len);
701
702 }
703
704 u32 ipw_register_toggle(u32 reg)
705 {
706 reg &= ~IPW_START_STANDBY;
707 if (reg & IPW_GATE_ODMA)
708 reg &= ~IPW_GATE_ODMA;
709 if (reg & IPW_GATE_IDMA)
710 reg &= ~IPW_GATE_IDMA;
711 if (reg & IPW_GATE_ADMA)
712 reg &= ~IPW_GATE_ADMA;
713 return reg;
714 }
715
716 /*
717 * LED behavior:
718 * - On radio ON, turn on any LEDs that require to be on during start
719 * - On initialization, start unassociated blink
720 * - On association, disable unassociated blink
721 * - On disassociation, start unassociated blink
722 * - On radio OFF, turn off any LEDs started during radio on
723 *
724 */
725 #define LD_TIME_LINK_ON 300
726 #define LD_TIME_LINK_OFF 2700
727 #define LD_TIME_ACT_ON 250
728
729 void ipw_led_link_on(struct ipw_priv *priv)
730 {
731 unsigned long flags;
732 u32 led;
733
734 /* If configured to not use LEDs, or nic_type is 1,
735 * then we don't toggle a LINK led */
736 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
737 return;
738
739 spin_lock_irqsave(&priv->lock, flags);
740
741 if (!(priv->status & STATUS_RF_KILL_MASK) &&
742 !(priv->status & STATUS_LED_LINK_ON)) {
743 IPW_DEBUG_LED("Link LED On\n");
744 led = ipw_read_reg32(priv, IPW_EVENT_REG);
745 led |= priv->led_association_on;
746
747 led = ipw_register_toggle(led);
748
749 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
750 ipw_write_reg32(priv, IPW_EVENT_REG, led);
751
752 priv->status |= STATUS_LED_LINK_ON;
753
754 /* If we aren't associated, schedule turning the LED off */
755 if (!(priv->status & STATUS_ASSOCIATED))
756 queue_delayed_work(priv->workqueue,
757 &priv->led_link_off,
758 LD_TIME_LINK_ON);
759 }
760
761 spin_unlock_irqrestore(&priv->lock, flags);
762 }
763
764 static void ipw_bg_led_link_on(void *data)
765 {
766 struct ipw_priv *priv = data;
767 down(&priv->sem);
768 ipw_led_link_on(data);
769 up(&priv->sem);
770 }
771
772 void ipw_led_link_off(struct ipw_priv *priv)
773 {
774 unsigned long flags;
775 u32 led;
776
777 /* If configured not to use LEDs, or nic type is 1,
778 * then we don't goggle the LINK led. */
779 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
780 return;
781
782 spin_lock_irqsave(&priv->lock, flags);
783
784 if (priv->status & STATUS_LED_LINK_ON) {
785 led = ipw_read_reg32(priv, IPW_EVENT_REG);
786 led &= priv->led_association_off;
787 led = ipw_register_toggle(led);
788
789 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
790 ipw_write_reg32(priv, IPW_EVENT_REG, led);
791
792 IPW_DEBUG_LED("Link LED Off\n");
793
794 priv->status &= ~STATUS_LED_LINK_ON;
795
796 /* If we aren't associated and the radio is on, schedule
797 * turning the LED on (blink while unassociated) */
798 if (!(priv->status & STATUS_RF_KILL_MASK) &&
799 !(priv->status & STATUS_ASSOCIATED))
800 queue_delayed_work(priv->workqueue, &priv->led_link_on,
801 LD_TIME_LINK_OFF);
802
803 }
804
805 spin_unlock_irqrestore(&priv->lock, flags);
806 }
807
808 static void ipw_bg_led_link_off(void *data)
809 {
810 struct ipw_priv *priv = data;
811 down(&priv->sem);
812 ipw_led_link_off(data);
813 up(&priv->sem);
814 }
815
816 static void __ipw_led_activity_on(struct ipw_priv *priv)
817 {
818 u32 led;
819
820 if (priv->config & CFG_NO_LED)
821 return;
822
823 if (priv->status & STATUS_RF_KILL_MASK)
824 return;
825
826 if (!(priv->status & STATUS_LED_ACT_ON)) {
827 led = ipw_read_reg32(priv, IPW_EVENT_REG);
828 led |= priv->led_activity_on;
829
830 led = ipw_register_toggle(led);
831
832 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
833 ipw_write_reg32(priv, IPW_EVENT_REG, led);
834
835 IPW_DEBUG_LED("Activity LED On\n");
836
837 priv->status |= STATUS_LED_ACT_ON;
838
839 cancel_delayed_work(&priv->led_act_off);
840 queue_delayed_work(priv->workqueue, &priv->led_act_off,
841 LD_TIME_ACT_ON);
842 } else {
843 /* Reschedule LED off for full time period */
844 cancel_delayed_work(&priv->led_act_off);
845 queue_delayed_work(priv->workqueue, &priv->led_act_off,
846 LD_TIME_ACT_ON);
847 }
848 }
849
850 void ipw_led_activity_on(struct ipw_priv *priv)
851 {
852 unsigned long flags;
853 spin_lock_irqsave(&priv->lock, flags);
854 __ipw_led_activity_on(priv);
855 spin_unlock_irqrestore(&priv->lock, flags);
856 }
857
858 void ipw_led_activity_off(struct ipw_priv *priv)
859 {
860 unsigned long flags;
861 u32 led;
862
863 if (priv->config & CFG_NO_LED)
864 return;
865
866 spin_lock_irqsave(&priv->lock, flags);
867
868 if (priv->status & STATUS_LED_ACT_ON) {
869 led = ipw_read_reg32(priv, IPW_EVENT_REG);
870 led &= priv->led_activity_off;
871
872 led = ipw_register_toggle(led);
873
874 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
875 ipw_write_reg32(priv, IPW_EVENT_REG, led);
876
877 IPW_DEBUG_LED("Activity LED Off\n");
878
879 priv->status &= ~STATUS_LED_ACT_ON;
880 }
881
882 spin_unlock_irqrestore(&priv->lock, flags);
883 }
884
885 static void ipw_bg_led_activity_off(void *data)
886 {
887 struct ipw_priv *priv = data;
888 down(&priv->sem);
889 ipw_led_activity_off(data);
890 up(&priv->sem);
891 }
892
893 void ipw_led_band_on(struct ipw_priv *priv)
894 {
895 unsigned long flags;
896 u32 led;
897
898 /* Only nic type 1 supports mode LEDs */
899 if (priv->config & CFG_NO_LED ||
900 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
901 return;
902
903 spin_lock_irqsave(&priv->lock, flags);
904
905 led = ipw_read_reg32(priv, IPW_EVENT_REG);
906 if (priv->assoc_network->mode == IEEE_A) {
907 led |= priv->led_ofdm_on;
908 led &= priv->led_association_off;
909 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
910 } else if (priv->assoc_network->mode == IEEE_G) {
911 led |= priv->led_ofdm_on;
912 led |= priv->led_association_on;
913 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
914 } else {
915 led &= priv->led_ofdm_off;
916 led |= priv->led_association_on;
917 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
918 }
919
920 led = ipw_register_toggle(led);
921
922 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
923 ipw_write_reg32(priv, IPW_EVENT_REG, led);
924
925 spin_unlock_irqrestore(&priv->lock, flags);
926 }
927
928 void ipw_led_band_off(struct ipw_priv *priv)
929 {
930 unsigned long flags;
931 u32 led;
932
933 /* Only nic type 1 supports mode LEDs */
934 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
935 return;
936
937 spin_lock_irqsave(&priv->lock, flags);
938
939 led = ipw_read_reg32(priv, IPW_EVENT_REG);
940 led &= priv->led_ofdm_off;
941 led &= priv->led_association_off;
942
943 led = ipw_register_toggle(led);
944
945 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
946 ipw_write_reg32(priv, IPW_EVENT_REG, led);
947
948 spin_unlock_irqrestore(&priv->lock, flags);
949 }
950
951 void ipw_led_radio_on(struct ipw_priv *priv)
952 {
953 ipw_led_link_on(priv);
954 }
955
956 void ipw_led_radio_off(struct ipw_priv *priv)
957 {
958 ipw_led_activity_off(priv);
959 ipw_led_link_off(priv);
960 }
961
962 void ipw_led_link_up(struct ipw_priv *priv)
963 {
964 /* Set the Link Led on for all nic types */
965 ipw_led_link_on(priv);
966 }
967
968 void ipw_led_link_down(struct ipw_priv *priv)
969 {
970 ipw_led_activity_off(priv);
971 ipw_led_link_off(priv);
972
973 if (priv->status & STATUS_RF_KILL_MASK)
974 ipw_led_radio_off(priv);
975 }
976
977 void ipw_led_init(struct ipw_priv *priv)
978 {
979 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
980
981 /* Set the default PINs for the link and activity leds */
982 priv->led_activity_on = IPW_ACTIVITY_LED;
983 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
984
985 priv->led_association_on = IPW_ASSOCIATED_LED;
986 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
987
988 /* Set the default PINs for the OFDM leds */
989 priv->led_ofdm_on = IPW_OFDM_LED;
990 priv->led_ofdm_off = ~(IPW_OFDM_LED);
991
992 switch (priv->nic_type) {
993 case EEPROM_NIC_TYPE_1:
994 /* In this NIC type, the LEDs are reversed.... */
995 priv->led_activity_on = IPW_ASSOCIATED_LED;
996 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
997 priv->led_association_on = IPW_ACTIVITY_LED;
998 priv->led_association_off = ~(IPW_ACTIVITY_LED);
999
1000 if (!(priv->config & CFG_NO_LED))
1001 ipw_led_band_on(priv);
1002
1003 /* And we don't blink link LEDs for this nic, so
1004 * just return here */
1005 return;
1006
1007 case EEPROM_NIC_TYPE_3:
1008 case EEPROM_NIC_TYPE_2:
1009 case EEPROM_NIC_TYPE_4:
1010 case EEPROM_NIC_TYPE_0:
1011 break;
1012
1013 default:
1014 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1015 priv->nic_type);
1016 priv->nic_type = EEPROM_NIC_TYPE_0;
1017 break;
1018 }
1019
1020 if (!(priv->config & CFG_NO_LED)) {
1021 if (priv->status & STATUS_ASSOCIATED)
1022 ipw_led_link_on(priv);
1023 else
1024 ipw_led_link_off(priv);
1025 }
1026 }
1027
1028 void ipw_led_shutdown(struct ipw_priv *priv)
1029 {
1030 ipw_led_activity_off(priv);
1031 ipw_led_link_off(priv);
1032 ipw_led_band_off(priv);
1033 cancel_delayed_work(&priv->led_link_on);
1034 cancel_delayed_work(&priv->led_link_off);
1035 cancel_delayed_work(&priv->led_act_off);
1036 }
1037
1038 /*
1039 * The following adds a new attribute to the sysfs representation
1040 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1041 * used for controling the debug level.
1042 *
1043 * See the level definitions in ipw for details.
1044 */
1045 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1046 {
1047 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1048 }
1049
1050 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1051 size_t count)
1052 {
1053 char *p = (char *)buf;
1054 u32 val;
1055
1056 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1057 p++;
1058 if (p[0] == 'x' || p[0] == 'X')
1059 p++;
1060 val = simple_strtoul(p, &p, 16);
1061 } else
1062 val = simple_strtoul(p, &p, 10);
1063 if (p == buf)
1064 printk(KERN_INFO DRV_NAME
1065 ": %s is not in hex or decimal form.\n", buf);
1066 else
1067 ipw_debug_level = val;
1068
1069 return strnlen(buf, count);
1070 }
1071
1072 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1073 show_debug_level, store_debug_level);
1074
1075 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1076 {
1077 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1078 }
1079
1080 static void ipw_capture_event_log(struct ipw_priv *priv,
1081 u32 log_len, struct ipw_event *log)
1082 {
1083 u32 base;
1084
1085 if (log_len) {
1086 base = ipw_read32(priv, IPW_EVENT_LOG);
1087 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1088 (u8 *) log, sizeof(*log) * log_len);
1089 }
1090 }
1091
1092 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1093 {
1094 struct ipw_fw_error *error;
1095 u32 log_len = ipw_get_event_log_len(priv);
1096 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1097 u32 elem_len = ipw_read_reg32(priv, base);
1098
1099 error = kmalloc(sizeof(*error) +
1100 sizeof(*error->elem) * elem_len +
1101 sizeof(*error->log) * log_len, GFP_ATOMIC);
1102 if (!error) {
1103 IPW_ERROR("Memory allocation for firmware error log "
1104 "failed.\n");
1105 return NULL;
1106 }
1107 error->jiffies = jiffies;
1108 error->status = priv->status;
1109 error->config = priv->config;
1110 error->elem_len = elem_len;
1111 error->log_len = log_len;
1112 error->elem = (struct ipw_error_elem *)error->payload;
1113 error->log = (struct ipw_event *)(error->elem + elem_len);
1114
1115 ipw_capture_event_log(priv, log_len, error->log);
1116
1117 if (elem_len)
1118 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1119 sizeof(*error->elem) * elem_len);
1120
1121 return error;
1122 }
1123
1124 static void ipw_free_error_log(struct ipw_fw_error *error)
1125 {
1126 if (error)
1127 kfree(error);
1128 }
1129
1130 static ssize_t show_event_log(struct device *d,
1131 struct device_attribute *attr, char *buf)
1132 {
1133 struct ipw_priv *priv = dev_get_drvdata(d);
1134 u32 log_len = ipw_get_event_log_len(priv);
1135 struct ipw_event log[log_len];
1136 u32 len = 0, i;
1137
1138 ipw_capture_event_log(priv, log_len, log);
1139
1140 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1141 for (i = 0; i < log_len; i++)
1142 len += snprintf(buf + len, PAGE_SIZE - len,
1143 "\n%08X%08X%08X",
1144 log[i].time, log[i].event, log[i].data);
1145 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1146 return len;
1147 }
1148
1149 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1150
1151 static ssize_t show_error(struct device *d,
1152 struct device_attribute *attr, char *buf)
1153 {
1154 struct ipw_priv *priv = dev_get_drvdata(d);
1155 u32 len = 0, i;
1156 if (!priv->error)
1157 return 0;
1158 len += snprintf(buf + len, PAGE_SIZE - len,
1159 "%08lX%08X%08X%08X",
1160 priv->error->jiffies,
1161 priv->error->status,
1162 priv->error->config, priv->error->elem_len);
1163 for (i = 0; i < priv->error->elem_len; i++)
1164 len += snprintf(buf + len, PAGE_SIZE - len,
1165 "\n%08X%08X%08X%08X%08X%08X%08X",
1166 priv->error->elem[i].time,
1167 priv->error->elem[i].desc,
1168 priv->error->elem[i].blink1,
1169 priv->error->elem[i].blink2,
1170 priv->error->elem[i].link1,
1171 priv->error->elem[i].link2,
1172 priv->error->elem[i].data);
1173
1174 len += snprintf(buf + len, PAGE_SIZE - len,
1175 "\n%08X", priv->error->log_len);
1176 for (i = 0; i < priv->error->log_len; i++)
1177 len += snprintf(buf + len, PAGE_SIZE - len,
1178 "\n%08X%08X%08X",
1179 priv->error->log[i].time,
1180 priv->error->log[i].event,
1181 priv->error->log[i].data);
1182 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1183 return len;
1184 }
1185
1186 static ssize_t clear_error(struct device *d,
1187 struct device_attribute *attr,
1188 const char *buf, size_t count)
1189 {
1190 struct ipw_priv *priv = dev_get_drvdata(d);
1191 if (priv->error) {
1192 ipw_free_error_log(priv->error);
1193 priv->error = NULL;
1194 }
1195 return count;
1196 }
1197
1198 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1199
1200 static ssize_t show_cmd_log(struct device *d,
1201 struct device_attribute *attr, char *buf)
1202 {
1203 struct ipw_priv *priv = dev_get_drvdata(d);
1204 u32 len = 0, i;
1205 if (!priv->cmdlog)
1206 return 0;
1207 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1208 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1209 i = (i + 1) % priv->cmdlog_len) {
1210 len +=
1211 snprintf(buf + len, PAGE_SIZE - len,
1212 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1213 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1214 priv->cmdlog[i].cmd.len);
1215 len +=
1216 snprintk_buf(buf + len, PAGE_SIZE - len,
1217 (u8 *) priv->cmdlog[i].cmd.param,
1218 priv->cmdlog[i].cmd.len);
1219 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1220 }
1221 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1222 return len;
1223 }
1224
1225 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1226
1227 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1228 char *buf)
1229 {
1230 struct ipw_priv *priv = dev_get_drvdata(d);
1231 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1232 }
1233
1234 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1235 const char *buf, size_t count)
1236 {
1237 struct ipw_priv *priv = dev_get_drvdata(d);
1238 #ifdef CONFIG_IPW2200_DEBUG
1239 struct net_device *dev = priv->net_dev;
1240 #endif
1241 char buffer[] = "00000000";
1242 unsigned long len =
1243 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1244 unsigned long val;
1245 char *p = buffer;
1246
1247 IPW_DEBUG_INFO("enter\n");
1248
1249 strncpy(buffer, buf, len);
1250 buffer[len] = 0;
1251
1252 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1253 p++;
1254 if (p[0] == 'x' || p[0] == 'X')
1255 p++;
1256 val = simple_strtoul(p, &p, 16);
1257 } else
1258 val = simple_strtoul(p, &p, 10);
1259 if (p == buffer) {
1260 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1261 } else {
1262 priv->ieee->scan_age = val;
1263 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1264 }
1265
1266 IPW_DEBUG_INFO("exit\n");
1267 return len;
1268 }
1269
1270 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1271
1272 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1273 char *buf)
1274 {
1275 struct ipw_priv *priv = dev_get_drvdata(d);
1276 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1277 }
1278
1279 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1280 const char *buf, size_t count)
1281 {
1282 struct ipw_priv *priv = dev_get_drvdata(d);
1283
1284 IPW_DEBUG_INFO("enter\n");
1285
1286 if (count == 0)
1287 return 0;
1288
1289 if (*buf == 0) {
1290 IPW_DEBUG_LED("Disabling LED control.\n");
1291 priv->config |= CFG_NO_LED;
1292 ipw_led_shutdown(priv);
1293 } else {
1294 IPW_DEBUG_LED("Enabling LED control.\n");
1295 priv->config &= ~CFG_NO_LED;
1296 ipw_led_init(priv);
1297 }
1298
1299 IPW_DEBUG_INFO("exit\n");
1300 return count;
1301 }
1302
1303 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1304
1305 static ssize_t show_status(struct device *d,
1306 struct device_attribute *attr, char *buf)
1307 {
1308 struct ipw_priv *p = d->driver_data;
1309 return sprintf(buf, "0x%08x\n", (int)p->status);
1310 }
1311
1312 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1313
1314 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1315 char *buf)
1316 {
1317 struct ipw_priv *p = d->driver_data;
1318 return sprintf(buf, "0x%08x\n", (int)p->config);
1319 }
1320
1321 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1322
1323 static ssize_t show_nic_type(struct device *d,
1324 struct device_attribute *attr, char *buf)
1325 {
1326 struct ipw_priv *priv = d->driver_data;
1327 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1328 }
1329
1330 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1331
1332 static ssize_t show_ucode_version(struct device *d,
1333 struct device_attribute *attr, char *buf)
1334 {
1335 u32 len = sizeof(u32), tmp = 0;
1336 struct ipw_priv *p = d->driver_data;
1337
1338 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1339 return 0;
1340
1341 return sprintf(buf, "0x%08x\n", tmp);
1342 }
1343
1344 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1345
1346 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1347 char *buf)
1348 {
1349 u32 len = sizeof(u32), tmp = 0;
1350 struct ipw_priv *p = d->driver_data;
1351
1352 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1353 return 0;
1354
1355 return sprintf(buf, "0x%08x\n", tmp);
1356 }
1357
1358 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1359
1360 /*
1361 * Add a device attribute to view/control the delay between eeprom
1362 * operations.
1363 */
1364 static ssize_t show_eeprom_delay(struct device *d,
1365 struct device_attribute *attr, char *buf)
1366 {
1367 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1368 return sprintf(buf, "%i\n", n);
1369 }
1370 static ssize_t store_eeprom_delay(struct device *d,
1371 struct device_attribute *attr,
1372 const char *buf, size_t count)
1373 {
1374 struct ipw_priv *p = d->driver_data;
1375 sscanf(buf, "%i", &p->eeprom_delay);
1376 return strnlen(buf, count);
1377 }
1378
1379 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1380 show_eeprom_delay, store_eeprom_delay);
1381
1382 static ssize_t show_command_event_reg(struct device *d,
1383 struct device_attribute *attr, char *buf)
1384 {
1385 u32 reg = 0;
1386 struct ipw_priv *p = d->driver_data;
1387
1388 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1389 return sprintf(buf, "0x%08x\n", reg);
1390 }
1391 static ssize_t store_command_event_reg(struct device *d,
1392 struct device_attribute *attr,
1393 const char *buf, size_t count)
1394 {
1395 u32 reg;
1396 struct ipw_priv *p = d->driver_data;
1397
1398 sscanf(buf, "%x", &reg);
1399 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1400 return strnlen(buf, count);
1401 }
1402
1403 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1404 show_command_event_reg, store_command_event_reg);
1405
1406 static ssize_t show_mem_gpio_reg(struct device *d,
1407 struct device_attribute *attr, char *buf)
1408 {
1409 u32 reg = 0;
1410 struct ipw_priv *p = d->driver_data;
1411
1412 reg = ipw_read_reg32(p, 0x301100);
1413 return sprintf(buf, "0x%08x\n", reg);
1414 }
1415 static ssize_t store_mem_gpio_reg(struct device *d,
1416 struct device_attribute *attr,
1417 const char *buf, size_t count)
1418 {
1419 u32 reg;
1420 struct ipw_priv *p = d->driver_data;
1421
1422 sscanf(buf, "%x", &reg);
1423 ipw_write_reg32(p, 0x301100, reg);
1424 return strnlen(buf, count);
1425 }
1426
1427 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1428 show_mem_gpio_reg, store_mem_gpio_reg);
1429
1430 static ssize_t show_indirect_dword(struct device *d,
1431 struct device_attribute *attr, char *buf)
1432 {
1433 u32 reg = 0;
1434 struct ipw_priv *priv = d->driver_data;
1435
1436 if (priv->status & STATUS_INDIRECT_DWORD)
1437 reg = ipw_read_reg32(priv, priv->indirect_dword);
1438 else
1439 reg = 0;
1440
1441 return sprintf(buf, "0x%08x\n", reg);
1442 }
1443 static ssize_t store_indirect_dword(struct device *d,
1444 struct device_attribute *attr,
1445 const char *buf, size_t count)
1446 {
1447 struct ipw_priv *priv = d->driver_data;
1448
1449 sscanf(buf, "%x", &priv->indirect_dword);
1450 priv->status |= STATUS_INDIRECT_DWORD;
1451 return strnlen(buf, count);
1452 }
1453
1454 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1455 show_indirect_dword, store_indirect_dword);
1456
1457 static ssize_t show_indirect_byte(struct device *d,
1458 struct device_attribute *attr, char *buf)
1459 {
1460 u8 reg = 0;
1461 struct ipw_priv *priv = d->driver_data;
1462
1463 if (priv->status & STATUS_INDIRECT_BYTE)
1464 reg = ipw_read_reg8(priv, priv->indirect_byte);
1465 else
1466 reg = 0;
1467
1468 return sprintf(buf, "0x%02x\n", reg);
1469 }
1470 static ssize_t store_indirect_byte(struct device *d,
1471 struct device_attribute *attr,
1472 const char *buf, size_t count)
1473 {
1474 struct ipw_priv *priv = d->driver_data;
1475
1476 sscanf(buf, "%x", &priv->indirect_byte);
1477 priv->status |= STATUS_INDIRECT_BYTE;
1478 return strnlen(buf, count);
1479 }
1480
1481 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1482 show_indirect_byte, store_indirect_byte);
1483
1484 static ssize_t show_direct_dword(struct device *d,
1485 struct device_attribute *attr, char *buf)
1486 {
1487 u32 reg = 0;
1488 struct ipw_priv *priv = d->driver_data;
1489
1490 if (priv->status & STATUS_DIRECT_DWORD)
1491 reg = ipw_read32(priv, priv->direct_dword);
1492 else
1493 reg = 0;
1494
1495 return sprintf(buf, "0x%08x\n", reg);
1496 }
1497 static ssize_t store_direct_dword(struct device *d,
1498 struct device_attribute *attr,
1499 const char *buf, size_t count)
1500 {
1501 struct ipw_priv *priv = d->driver_data;
1502
1503 sscanf(buf, "%x", &priv->direct_dword);
1504 priv->status |= STATUS_DIRECT_DWORD;
1505 return strnlen(buf, count);
1506 }
1507
1508 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1509 show_direct_dword, store_direct_dword);
1510
1511 static int rf_kill_active(struct ipw_priv *priv)
1512 {
1513 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1514 priv->status |= STATUS_RF_KILL_HW;
1515 else
1516 priv->status &= ~STATUS_RF_KILL_HW;
1517
1518 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1519 }
1520
1521 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1522 char *buf)
1523 {
1524 /* 0 - RF kill not enabled
1525 1 - SW based RF kill active (sysfs)
1526 2 - HW based RF kill active
1527 3 - Both HW and SW baed RF kill active */
1528 struct ipw_priv *priv = d->driver_data;
1529 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1530 (rf_kill_active(priv) ? 0x2 : 0x0);
1531 return sprintf(buf, "%i\n", val);
1532 }
1533
1534 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1535 {
1536 if ((disable_radio ? 1 : 0) ==
1537 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1538 return 0;
1539
1540 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1541 disable_radio ? "OFF" : "ON");
1542
1543 if (disable_radio) {
1544 priv->status |= STATUS_RF_KILL_SW;
1545
1546 if (priv->workqueue)
1547 cancel_delayed_work(&priv->request_scan);
1548 queue_work(priv->workqueue, &priv->down);
1549 } else {
1550 priv->status &= ~STATUS_RF_KILL_SW;
1551 if (rf_kill_active(priv)) {
1552 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1553 "disabled by HW switch\n");
1554 /* Make sure the RF_KILL check timer is running */
1555 cancel_delayed_work(&priv->rf_kill);
1556 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1557 2 * HZ);
1558 } else
1559 queue_work(priv->workqueue, &priv->up);
1560 }
1561
1562 return 1;
1563 }
1564
1565 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1566 const char *buf, size_t count)
1567 {
1568 struct ipw_priv *priv = d->driver_data;
1569
1570 ipw_radio_kill_sw(priv, buf[0] == '1');
1571
1572 return count;
1573 }
1574
1575 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1576
1577 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1578 char *buf)
1579 {
1580 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1581 int pos = 0, len = 0;
1582 if (priv->config & CFG_SPEED_SCAN) {
1583 while (priv->speed_scan[pos] != 0)
1584 len += sprintf(&buf[len], "%d ",
1585 priv->speed_scan[pos++]);
1586 return len + sprintf(&buf[len], "\n");
1587 }
1588
1589 return sprintf(buf, "0\n");
1590 }
1591
1592 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1593 const char *buf, size_t count)
1594 {
1595 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1596 int channel, pos = 0;
1597 const char *p = buf;
1598
1599 /* list of space separated channels to scan, optionally ending with 0 */
1600 while ((channel = simple_strtol(p, NULL, 0))) {
1601 if (pos == MAX_SPEED_SCAN - 1) {
1602 priv->speed_scan[pos] = 0;
1603 break;
1604 }
1605
1606 if (ipw_is_valid_channel(priv->ieee, channel))
1607 priv->speed_scan[pos++] = channel;
1608 else
1609 IPW_WARNING("Skipping invalid channel request: %d\n",
1610 channel);
1611 p = strchr(p, ' ');
1612 if (!p)
1613 break;
1614 while (*p == ' ' || *p == '\t')
1615 p++;
1616 }
1617
1618 if (pos == 0)
1619 priv->config &= ~CFG_SPEED_SCAN;
1620 else {
1621 priv->speed_scan_pos = 0;
1622 priv->config |= CFG_SPEED_SCAN;
1623 }
1624
1625 return count;
1626 }
1627
1628 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1629 store_speed_scan);
1630
1631 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1632 char *buf)
1633 {
1634 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1635 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1636 }
1637
1638 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1639 const char *buf, size_t count)
1640 {
1641 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1642 if (buf[0] == '1')
1643 priv->config |= CFG_NET_STATS;
1644 else
1645 priv->config &= ~CFG_NET_STATS;
1646
1647 return count;
1648 }
1649
1650 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1651 show_net_stats, store_net_stats);
1652
1653 static void notify_wx_assoc_event(struct ipw_priv *priv)
1654 {
1655 union iwreq_data wrqu;
1656 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1657 if (priv->status & STATUS_ASSOCIATED)
1658 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1659 else
1660 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1661 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1662 }
1663
1664 static void ipw_irq_tasklet(struct ipw_priv *priv)
1665 {
1666 u32 inta, inta_mask, handled = 0;
1667 unsigned long flags;
1668 int rc = 0;
1669
1670 spin_lock_irqsave(&priv->lock, flags);
1671
1672 inta = ipw_read32(priv, IPW_INTA_RW);
1673 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1674 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1675
1676 /* Add any cached INTA values that need to be handled */
1677 inta |= priv->isr_inta;
1678
1679 /* handle all the justifications for the interrupt */
1680 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1681 ipw_rx(priv);
1682 handled |= IPW_INTA_BIT_RX_TRANSFER;
1683 }
1684
1685 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1686 IPW_DEBUG_HC("Command completed.\n");
1687 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1688 priv->status &= ~STATUS_HCMD_ACTIVE;
1689 wake_up_interruptible(&priv->wait_command_queue);
1690 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1691 }
1692
1693 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1694 IPW_DEBUG_TX("TX_QUEUE_1\n");
1695 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1696 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1697 }
1698
1699 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1700 IPW_DEBUG_TX("TX_QUEUE_2\n");
1701 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1702 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1703 }
1704
1705 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1706 IPW_DEBUG_TX("TX_QUEUE_3\n");
1707 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1708 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1709 }
1710
1711 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1712 IPW_DEBUG_TX("TX_QUEUE_4\n");
1713 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1714 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1715 }
1716
1717 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1718 IPW_WARNING("STATUS_CHANGE\n");
1719 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1720 }
1721
1722 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1723 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1724 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1725 }
1726
1727 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1728 IPW_WARNING("HOST_CMD_DONE\n");
1729 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1730 }
1731
1732 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1733 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1734 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1735 }
1736
1737 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1738 IPW_WARNING("PHY_OFF_DONE\n");
1739 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1740 }
1741
1742 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1743 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1744 priv->status |= STATUS_RF_KILL_HW;
1745 wake_up_interruptible(&priv->wait_command_queue);
1746 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1747 cancel_delayed_work(&priv->request_scan);
1748 schedule_work(&priv->link_down);
1749 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1750 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1751 }
1752
1753 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1754 IPW_ERROR("Firmware error detected. Restarting.\n");
1755 if (priv->error) {
1756 IPW_ERROR("Sysfs 'error' log already exists.\n");
1757 #ifdef CONFIG_IPW2200_DEBUG
1758 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1759 struct ipw_fw_error *error =
1760 ipw_alloc_error_log(priv);
1761 ipw_dump_error_log(priv, error);
1762 if (error)
1763 ipw_free_error_log(error);
1764 }
1765 #endif
1766 } else {
1767 priv->error = ipw_alloc_error_log(priv);
1768 if (priv->error)
1769 IPW_ERROR("Sysfs 'error' log captured.\n");
1770 else
1771 IPW_ERROR("Error allocating sysfs 'error' "
1772 "log.\n");
1773 #ifdef CONFIG_IPW2200_DEBUG
1774 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1775 ipw_dump_error_log(priv, priv->error);
1776 #endif
1777 }
1778
1779 /* XXX: If hardware encryption is for WPA/WPA2,
1780 * we have to notify the supplicant. */
1781 if (priv->ieee->sec.encrypt) {
1782 priv->status &= ~STATUS_ASSOCIATED;
1783 notify_wx_assoc_event(priv);
1784 }
1785
1786 /* Keep the restart process from trying to send host
1787 * commands by clearing the INIT status bit */
1788 priv->status &= ~STATUS_INIT;
1789
1790 /* Cancel currently queued command. */
1791 priv->status &= ~STATUS_HCMD_ACTIVE;
1792 wake_up_interruptible(&priv->wait_command_queue);
1793
1794 queue_work(priv->workqueue, &priv->adapter_restart);
1795 handled |= IPW_INTA_BIT_FATAL_ERROR;
1796 }
1797
1798 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1799 IPW_ERROR("Parity error\n");
1800 handled |= IPW_INTA_BIT_PARITY_ERROR;
1801 }
1802
1803 if (handled != inta) {
1804 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1805 }
1806
1807 /* enable all interrupts */
1808 ipw_enable_interrupts(priv);
1809
1810 spin_unlock_irqrestore(&priv->lock, flags);
1811 }
1812
1813 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1814 static char *get_cmd_string(u8 cmd)
1815 {
1816 switch (cmd) {
1817 IPW_CMD(HOST_COMPLETE);
1818 IPW_CMD(POWER_DOWN);
1819 IPW_CMD(SYSTEM_CONFIG);
1820 IPW_CMD(MULTICAST_ADDRESS);
1821 IPW_CMD(SSID);
1822 IPW_CMD(ADAPTER_ADDRESS);
1823 IPW_CMD(PORT_TYPE);
1824 IPW_CMD(RTS_THRESHOLD);
1825 IPW_CMD(FRAG_THRESHOLD);
1826 IPW_CMD(POWER_MODE);
1827 IPW_CMD(WEP_KEY);
1828 IPW_CMD(TGI_TX_KEY);
1829 IPW_CMD(SCAN_REQUEST);
1830 IPW_CMD(SCAN_REQUEST_EXT);
1831 IPW_CMD(ASSOCIATE);
1832 IPW_CMD(SUPPORTED_RATES);
1833 IPW_CMD(SCAN_ABORT);
1834 IPW_CMD(TX_FLUSH);
1835 IPW_CMD(QOS_PARAMETERS);
1836 IPW_CMD(DINO_CONFIG);
1837 IPW_CMD(RSN_CAPABILITIES);
1838 IPW_CMD(RX_KEY);
1839 IPW_CMD(CARD_DISABLE);
1840 IPW_CMD(SEED_NUMBER);
1841 IPW_CMD(TX_POWER);
1842 IPW_CMD(COUNTRY_INFO);
1843 IPW_CMD(AIRONET_INFO);
1844 IPW_CMD(AP_TX_POWER);
1845 IPW_CMD(CCKM_INFO);
1846 IPW_CMD(CCX_VER_INFO);
1847 IPW_CMD(SET_CALIBRATION);
1848 IPW_CMD(SENSITIVITY_CALIB);
1849 IPW_CMD(RETRY_LIMIT);
1850 IPW_CMD(IPW_PRE_POWER_DOWN);
1851 IPW_CMD(VAP_BEACON_TEMPLATE);
1852 IPW_CMD(VAP_DTIM_PERIOD);
1853 IPW_CMD(EXT_SUPPORTED_RATES);
1854 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1855 IPW_CMD(VAP_QUIET_INTERVALS);
1856 IPW_CMD(VAP_CHANNEL_SWITCH);
1857 IPW_CMD(VAP_MANDATORY_CHANNELS);
1858 IPW_CMD(VAP_CELL_PWR_LIMIT);
1859 IPW_CMD(VAP_CF_PARAM_SET);
1860 IPW_CMD(VAP_SET_BEACONING_STATE);
1861 IPW_CMD(MEASUREMENT);
1862 IPW_CMD(POWER_CAPABILITY);
1863 IPW_CMD(SUPPORTED_CHANNELS);
1864 IPW_CMD(TPC_REPORT);
1865 IPW_CMD(WME_INFO);
1866 IPW_CMD(PRODUCTION_COMMAND);
1867 default:
1868 return "UNKNOWN";
1869 }
1870 }
1871
1872 #define HOST_COMPLETE_TIMEOUT HZ
1873 static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1874 {
1875 int rc = 0;
1876 unsigned long flags;
1877
1878 spin_lock_irqsave(&priv->lock, flags);
1879 if (priv->status & STATUS_HCMD_ACTIVE) {
1880 IPW_ERROR("Failed to send %s: Already sending a command.\n",
1881 get_cmd_string(cmd->cmd));
1882 spin_unlock_irqrestore(&priv->lock, flags);
1883 return -EAGAIN;
1884 }
1885
1886 priv->status |= STATUS_HCMD_ACTIVE;
1887
1888 if (priv->cmdlog) {
1889 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
1890 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
1891 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
1892 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
1893 cmd->len);
1894 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
1895 }
1896
1897 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
1898 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
1899 priv->status);
1900 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1901
1902 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
1903 if (rc) {
1904 priv->status &= ~STATUS_HCMD_ACTIVE;
1905 IPW_ERROR("Failed to send %s: Reason %d\n",
1906 get_cmd_string(cmd->cmd), rc);
1907 spin_unlock_irqrestore(&priv->lock, flags);
1908 goto exit;
1909 }
1910 spin_unlock_irqrestore(&priv->lock, flags);
1911
1912 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1913 !(priv->
1914 status & STATUS_HCMD_ACTIVE),
1915 HOST_COMPLETE_TIMEOUT);
1916 if (rc == 0) {
1917 spin_lock_irqsave(&priv->lock, flags);
1918 if (priv->status & STATUS_HCMD_ACTIVE) {
1919 IPW_ERROR("Failed to send %s: Command timed out.\n",
1920 get_cmd_string(cmd->cmd));
1921 priv->status &= ~STATUS_HCMD_ACTIVE;
1922 spin_unlock_irqrestore(&priv->lock, flags);
1923 rc = -EIO;
1924 goto exit;
1925 }
1926 spin_unlock_irqrestore(&priv->lock, flags);
1927 } else
1928 rc = 0;
1929
1930 if (priv->status & STATUS_RF_KILL_HW) {
1931 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
1932 get_cmd_string(cmd->cmd));
1933 rc = -EIO;
1934 goto exit;
1935 }
1936
1937 exit:
1938 if (priv->cmdlog) {
1939 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
1940 priv->cmdlog_pos %= priv->cmdlog_len;
1941 }
1942 return rc;
1943 }
1944
1945 static int ipw_send_host_complete(struct ipw_priv *priv)
1946 {
1947 struct host_cmd cmd = {
1948 .cmd = IPW_CMD_HOST_COMPLETE,
1949 .len = 0
1950 };
1951
1952 if (!priv) {
1953 IPW_ERROR("Invalid args\n");
1954 return -1;
1955 }
1956
1957 return ipw_send_cmd(priv, &cmd);
1958 }
1959
1960 static int ipw_send_system_config(struct ipw_priv *priv,
1961 struct ipw_sys_config *config)
1962 {
1963 struct host_cmd cmd = {
1964 .cmd = IPW_CMD_SYSTEM_CONFIG,
1965 .len = sizeof(*config)
1966 };
1967
1968 if (!priv || !config) {
1969 IPW_ERROR("Invalid args\n");
1970 return -1;
1971 }
1972
1973 memcpy(cmd.param, config, sizeof(*config));
1974 return ipw_send_cmd(priv, &cmd);
1975 }
1976
1977 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
1978 {
1979 struct host_cmd cmd = {
1980 .cmd = IPW_CMD_SSID,
1981 .len = min(len, IW_ESSID_MAX_SIZE)
1982 };
1983
1984 if (!priv || !ssid) {
1985 IPW_ERROR("Invalid args\n");
1986 return -1;
1987 }
1988
1989 memcpy(cmd.param, ssid, cmd.len);
1990 return ipw_send_cmd(priv, &cmd);
1991 }
1992
1993 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
1994 {
1995 struct host_cmd cmd = {
1996 .cmd = IPW_CMD_ADAPTER_ADDRESS,
1997 .len = ETH_ALEN
1998 };
1999
2000 if (!priv || !mac) {
2001 IPW_ERROR("Invalid args\n");
2002 return -1;
2003 }
2004
2005 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2006 priv->net_dev->name, MAC_ARG(mac));
2007
2008 memcpy(cmd.param, mac, ETH_ALEN);
2009 return ipw_send_cmd(priv, &cmd);
2010 }
2011
2012 /*
2013 * NOTE: This must be executed from our workqueue as it results in udelay
2014 * being called which may corrupt the keyboard if executed on default
2015 * workqueue
2016 */
2017 static void ipw_adapter_restart(void *adapter)
2018 {
2019 struct ipw_priv *priv = adapter;
2020
2021 if (priv->status & STATUS_RF_KILL_MASK)
2022 return;
2023
2024 ipw_down(priv);
2025
2026 if (priv->assoc_network &&
2027 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2028 ipw_remove_current_network(priv);
2029
2030 if (ipw_up(priv)) {
2031 IPW_ERROR("Failed to up device\n");
2032 return;
2033 }
2034 }
2035
2036 static void ipw_bg_adapter_restart(void *data)
2037 {
2038 struct ipw_priv *priv = data;
2039 down(&priv->sem);
2040 ipw_adapter_restart(data);
2041 up(&priv->sem);
2042 }
2043
2044 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2045
2046 static void ipw_scan_check(void *data)
2047 {
2048 struct ipw_priv *priv = data;
2049 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2050 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2051 "adapter (%dms).\n",
2052 IPW_SCAN_CHECK_WATCHDOG / 100);
2053 queue_work(priv->workqueue, &priv->adapter_restart);
2054 }
2055 }
2056
2057 static void ipw_bg_scan_check(void *data)
2058 {
2059 struct ipw_priv *priv = data;
2060 down(&priv->sem);
2061 ipw_scan_check(data);
2062 up(&priv->sem);
2063 }
2064
2065 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2066 struct ipw_scan_request_ext *request)
2067 {
2068 struct host_cmd cmd = {
2069 .cmd = IPW_CMD_SCAN_REQUEST_EXT,
2070 .len = sizeof(*request)
2071 };
2072
2073 memcpy(cmd.param, request, sizeof(*request));
2074 return ipw_send_cmd(priv, &cmd);
2075 }
2076
2077 static int ipw_send_scan_abort(struct ipw_priv *priv)
2078 {
2079 struct host_cmd cmd = {
2080 .cmd = IPW_CMD_SCAN_ABORT,
2081 .len = 0
2082 };
2083
2084 if (!priv) {
2085 IPW_ERROR("Invalid args\n");
2086 return -1;
2087 }
2088
2089 return ipw_send_cmd(priv, &cmd);
2090 }
2091
2092 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2093 {
2094 struct host_cmd cmd = {
2095 .cmd = IPW_CMD_SENSITIVITY_CALIB,
2096 .len = sizeof(struct ipw_sensitivity_calib)
2097 };
2098 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
2099 &cmd.param;
2100 calib->beacon_rssi_raw = sens;
2101 return ipw_send_cmd(priv, &cmd);
2102 }
2103
2104 static int ipw_send_associate(struct ipw_priv *priv,
2105 struct ipw_associate *associate)
2106 {
2107 struct host_cmd cmd = {
2108 .cmd = IPW_CMD_ASSOCIATE,
2109 .len = sizeof(*associate)
2110 };
2111
2112 struct ipw_associate tmp_associate;
2113 memcpy(&tmp_associate, associate, sizeof(*associate));
2114 tmp_associate.policy_support =
2115 cpu_to_le16(tmp_associate.policy_support);
2116 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2117 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2118 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2119 tmp_associate.listen_interval =
2120 cpu_to_le16(tmp_associate.listen_interval);
2121 tmp_associate.beacon_interval =
2122 cpu_to_le16(tmp_associate.beacon_interval);
2123 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2124
2125 if (!priv || !associate) {
2126 IPW_ERROR("Invalid args\n");
2127 return -1;
2128 }
2129
2130 memcpy(cmd.param, &tmp_associate, sizeof(*associate));
2131 return ipw_send_cmd(priv, &cmd);
2132 }
2133
2134 static int ipw_send_supported_rates(struct ipw_priv *priv,
2135 struct ipw_supported_rates *rates)
2136 {
2137 struct host_cmd cmd = {
2138 .cmd = IPW_CMD_SUPPORTED_RATES,
2139 .len = sizeof(*rates)
2140 };
2141
2142 if (!priv || !rates) {
2143 IPW_ERROR("Invalid args\n");
2144 return -1;
2145 }
2146
2147 memcpy(cmd.param, rates, sizeof(*rates));
2148 return ipw_send_cmd(priv, &cmd);
2149 }
2150
2151 static int ipw_set_random_seed(struct ipw_priv *priv)
2152 {
2153 struct host_cmd cmd = {
2154 .cmd = IPW_CMD_SEED_NUMBER,
2155 .len = sizeof(u32)
2156 };
2157
2158 if (!priv) {
2159 IPW_ERROR("Invalid args\n");
2160 return -1;
2161 }
2162
2163 get_random_bytes(&cmd.param, sizeof(u32));
2164
2165 return ipw_send_cmd(priv, &cmd);
2166 }
2167
2168 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2169 {
2170 struct host_cmd cmd = {
2171 .cmd = IPW_CMD_CARD_DISABLE,
2172 .len = sizeof(u32)
2173 };
2174
2175 if (!priv) {
2176 IPW_ERROR("Invalid args\n");
2177 return -1;
2178 }
2179
2180 *((u32 *) & cmd.param) = phy_off;
2181
2182 return ipw_send_cmd(priv, &cmd);
2183 }
2184
2185 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2186 {
2187 struct host_cmd cmd = {
2188 .cmd = IPW_CMD_TX_POWER,
2189 .len = sizeof(*power)
2190 };
2191
2192 if (!priv || !power) {
2193 IPW_ERROR("Invalid args\n");
2194 return -1;
2195 }
2196
2197 memcpy(cmd.param, power, sizeof(*power));
2198 return ipw_send_cmd(priv, &cmd);
2199 }
2200
2201 static int ipw_set_tx_power(struct ipw_priv *priv)
2202 {
2203 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
2204 struct ipw_tx_power tx_power;
2205 s8 max_power;
2206 int i;
2207
2208 memset(&tx_power, 0, sizeof(tx_power));
2209
2210 /* configure device for 'G' band */
2211 tx_power.ieee_mode = IPW_G_MODE;
2212 tx_power.num_channels = geo->bg_channels;
2213 for (i = 0; i < geo->bg_channels; i++) {
2214 max_power = geo->bg[i].max_power;
2215 tx_power.channels_tx_power[i].channel_number =
2216 geo->bg[i].channel;
2217 tx_power.channels_tx_power[i].tx_power = max_power ?
2218 min(max_power, priv->tx_power) : priv->tx_power;
2219 }
2220 if (ipw_send_tx_power(priv, &tx_power))
2221 return -EIO;
2222
2223 /* configure device to also handle 'B' band */
2224 tx_power.ieee_mode = IPW_B_MODE;
2225 if (ipw_send_tx_power(priv, &tx_power))
2226 return -EIO;
2227
2228 /* configure device to also handle 'A' band */
2229 if (priv->ieee->abg_true) {
2230 tx_power.ieee_mode = IPW_A_MODE;
2231 tx_power.num_channels = geo->a_channels;
2232 for (i = 0; i < tx_power.num_channels; i++) {
2233 max_power = geo->a[i].max_power;
2234 tx_power.channels_tx_power[i].channel_number =
2235 geo->a[i].channel;
2236 tx_power.channels_tx_power[i].tx_power = max_power ?
2237 min(max_power, priv->tx_power) : priv->tx_power;
2238 }
2239 if (ipw_send_tx_power(priv, &tx_power))
2240 return -EIO;
2241 }
2242 return 0;
2243 }
2244
2245 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2246 {
2247 struct ipw_rts_threshold rts_threshold = {
2248 .rts_threshold = rts,
2249 };
2250 struct host_cmd cmd = {
2251 .cmd = IPW_CMD_RTS_THRESHOLD,
2252 .len = sizeof(rts_threshold)
2253 };
2254
2255 if (!priv) {
2256 IPW_ERROR("Invalid args\n");
2257 return -1;
2258 }
2259
2260 memcpy(cmd.param, &rts_threshold, sizeof(rts_threshold));
2261 return ipw_send_cmd(priv, &cmd);
2262 }
2263
2264 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2265 {
2266 struct ipw_frag_threshold frag_threshold = {
2267 .frag_threshold = frag,
2268 };
2269 struct host_cmd cmd = {
2270 .cmd = IPW_CMD_FRAG_THRESHOLD,
2271 .len = sizeof(frag_threshold)
2272 };
2273
2274 if (!priv) {
2275 IPW_ERROR("Invalid args\n");
2276 return -1;
2277 }
2278
2279 memcpy(cmd.param, &frag_threshold, sizeof(frag_threshold));
2280 return ipw_send_cmd(priv, &cmd);
2281 }
2282
2283 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2284 {
2285 struct host_cmd cmd = {
2286 .cmd = IPW_CMD_POWER_MODE,
2287 .len = sizeof(u32)
2288 };
2289 u32 *param = (u32 *) (&cmd.param);
2290
2291 if (!priv) {
2292 IPW_ERROR("Invalid args\n");
2293 return -1;
2294 }
2295
2296 /* If on battery, set to 3, if AC set to CAM, else user
2297 * level */
2298 switch (mode) {
2299 case IPW_POWER_BATTERY:
2300 *param = IPW_POWER_INDEX_3;
2301 break;
2302 case IPW_POWER_AC:
2303 *param = IPW_POWER_MODE_CAM;
2304 break;
2305 default:
2306 *param = mode;
2307 break;
2308 }
2309
2310 return ipw_send_cmd(priv, &cmd);
2311 }
2312
2313 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2314 {
2315 struct ipw_retry_limit retry_limit = {
2316 .short_retry_limit = slimit,
2317 .long_retry_limit = llimit
2318 };
2319 struct host_cmd cmd = {
2320 .cmd = IPW_CMD_RETRY_LIMIT,
2321 .len = sizeof(retry_limit)
2322 };
2323
2324 if (!priv) {
2325 IPW_ERROR("Invalid args\n");
2326 return -1;
2327 }
2328
2329 memcpy(cmd.param, &retry_limit, sizeof(retry_limit));
2330 return ipw_send_cmd(priv, &cmd);
2331 }
2332
2333 /*
2334 * The IPW device contains a Microwire compatible EEPROM that stores
2335 * various data like the MAC address. Usually the firmware has exclusive
2336 * access to the eeprom, but during device initialization (before the
2337 * device driver has sent the HostComplete command to the firmware) the
2338 * device driver has read access to the EEPROM by way of indirect addressing
2339 * through a couple of memory mapped registers.
2340 *
2341 * The following is a simplified implementation for pulling data out of the
2342 * the eeprom, along with some helper functions to find information in
2343 * the per device private data's copy of the eeprom.
2344 *
2345 * NOTE: To better understand how these functions work (i.e what is a chip
2346 * select and why do have to keep driving the eeprom clock?), read
2347 * just about any data sheet for a Microwire compatible EEPROM.
2348 */
2349
2350 /* write a 32 bit value into the indirect accessor register */
2351 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2352 {
2353 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2354
2355 /* the eeprom requires some time to complete the operation */
2356 udelay(p->eeprom_delay);
2357
2358 return;
2359 }
2360
2361 /* perform a chip select operation */
2362 static void eeprom_cs(struct ipw_priv *priv)
2363 {
2364 eeprom_write_reg(priv, 0);
2365 eeprom_write_reg(priv, EEPROM_BIT_CS);
2366 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2367 eeprom_write_reg(priv, EEPROM_BIT_CS);
2368 }
2369
2370 /* perform a chip select operation */
2371 static void eeprom_disable_cs(struct ipw_priv *priv)
2372 {
2373 eeprom_write_reg(priv, EEPROM_BIT_CS);
2374 eeprom_write_reg(priv, 0);
2375 eeprom_write_reg(priv, EEPROM_BIT_SK);
2376 }
2377
2378 /* push a single bit down to the eeprom */
2379 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2380 {
2381 int d = (bit ? EEPROM_BIT_DI : 0);
2382 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2383 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2384 }
2385
2386 /* push an opcode followed by an address down to the eeprom */
2387 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2388 {
2389 int i;
2390
2391 eeprom_cs(priv);
2392 eeprom_write_bit(priv, 1);
2393 eeprom_write_bit(priv, op & 2);
2394 eeprom_write_bit(priv, op & 1);
2395 for (i = 7; i >= 0; i--) {
2396 eeprom_write_bit(priv, addr & (1 << i));
2397 }
2398 }
2399
2400 /* pull 16 bits off the eeprom, one bit at a time */
2401 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2402 {
2403 int i;
2404 u16 r = 0;
2405
2406 /* Send READ Opcode */
2407 eeprom_op(priv, EEPROM_CMD_READ, addr);
2408
2409 /* Send dummy bit */
2410 eeprom_write_reg(priv, EEPROM_BIT_CS);
2411
2412 /* Read the byte off the eeprom one bit at a time */
2413 for (i = 0; i < 16; i++) {
2414 u32 data = 0;
2415 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2416 eeprom_write_reg(priv, EEPROM_BIT_CS);
2417 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2418 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2419 }
2420
2421 /* Send another dummy bit */
2422 eeprom_write_reg(priv, 0);
2423 eeprom_disable_cs(priv);
2424
2425 return r;
2426 }
2427
2428 /* helper function for pulling the mac address out of the private */
2429 /* data's copy of the eeprom data */
2430 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2431 {
2432 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2433 }
2434
2435 /*
2436 * Either the device driver (i.e. the host) or the firmware can
2437 * load eeprom data into the designated region in SRAM. If neither
2438 * happens then the FW will shutdown with a fatal error.
2439 *
2440 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2441 * bit needs region of shared SRAM needs to be non-zero.
2442 */
2443 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2444 {
2445 int i;
2446 u16 *eeprom = (u16 *) priv->eeprom;
2447
2448 IPW_DEBUG_TRACE(">>\n");
2449
2450 /* read entire contents of eeprom into private buffer */
2451 for (i = 0; i < 128; i++)
2452 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2453
2454 /*
2455 If the data looks correct, then copy it to our private
2456 copy. Otherwise let the firmware know to perform the operation
2457 on it's own
2458 */
2459 if ((priv->eeprom + EEPROM_VERSION) != 0) {
2460 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2461
2462 /* write the eeprom data to sram */
2463 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2464 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2465
2466 /* Do not load eeprom data on fatal error or suspend */
2467 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2468 } else {
2469 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2470
2471 /* Load eeprom data on fatal error or suspend */
2472 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2473 }
2474
2475 IPW_DEBUG_TRACE("<<\n");
2476 }
2477
2478 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2479 {
2480 count >>= 2;
2481 if (!count)
2482 return;
2483 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2484 while (count--)
2485 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2486 }
2487
2488 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2489 {
2490 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2491 CB_NUMBER_OF_ELEMENTS_SMALL *
2492 sizeof(struct command_block));
2493 }
2494
2495 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2496 { /* start dma engine but no transfers yet */
2497
2498 IPW_DEBUG_FW(">> : \n");
2499
2500 /* Start the dma */
2501 ipw_fw_dma_reset_command_blocks(priv);
2502
2503 /* Write CB base address */
2504 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2505
2506 IPW_DEBUG_FW("<< : \n");
2507 return 0;
2508 }
2509
2510 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2511 {
2512 u32 control = 0;
2513
2514 IPW_DEBUG_FW(">> :\n");
2515
2516 //set the Stop and Abort bit
2517 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2518 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2519 priv->sram_desc.last_cb_index = 0;
2520
2521 IPW_DEBUG_FW("<< \n");
2522 }
2523
2524 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2525 struct command_block *cb)
2526 {
2527 u32 address =
2528 IPW_SHARED_SRAM_DMA_CONTROL +
2529 (sizeof(struct command_block) * index);
2530 IPW_DEBUG_FW(">> :\n");
2531
2532 ipw_write_indirect(priv, address, (u8 *) cb,
2533 (int)sizeof(struct command_block));
2534
2535 IPW_DEBUG_FW("<< :\n");
2536 return 0;
2537
2538 }
2539
2540 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2541 {
2542 u32 control = 0;
2543 u32 index = 0;
2544
2545 IPW_DEBUG_FW(">> :\n");
2546
2547 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2548 ipw_fw_dma_write_command_block(priv, index,
2549 &priv->sram_desc.cb_list[index]);
2550
2551 /* Enable the DMA in the CSR register */
2552 ipw_clear_bit(priv, IPW_RESET_REG,
2553 IPW_RESET_REG_MASTER_DISABLED |
2554 IPW_RESET_REG_STOP_MASTER);
2555
2556 /* Set the Start bit. */
2557 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2558 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2559
2560 IPW_DEBUG_FW("<< :\n");
2561 return 0;
2562 }
2563
2564 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2565 {
2566 u32 address;
2567 u32 register_value = 0;
2568 u32 cb_fields_address = 0;
2569
2570 IPW_DEBUG_FW(">> :\n");
2571 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2572 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2573
2574 /* Read the DMA Controlor register */
2575 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2576 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2577
2578 /* Print the CB values */
2579 cb_fields_address = address;
2580 register_value = ipw_read_reg32(priv, cb_fields_address);
2581 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2582
2583 cb_fields_address += sizeof(u32);
2584 register_value = ipw_read_reg32(priv, cb_fields_address);
2585 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2586
2587 cb_fields_address += sizeof(u32);
2588 register_value = ipw_read_reg32(priv, cb_fields_address);
2589 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2590 register_value);
2591
2592 cb_fields_address += sizeof(u32);
2593 register_value = ipw_read_reg32(priv, cb_fields_address);
2594 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2595
2596 IPW_DEBUG_FW(">> :\n");
2597 }
2598
2599 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2600 {
2601 u32 current_cb_address = 0;
2602 u32 current_cb_index = 0;
2603
2604 IPW_DEBUG_FW("<< :\n");
2605 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2606
2607 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2608 sizeof(struct command_block);
2609
2610 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2611 current_cb_index, current_cb_address);
2612
2613 IPW_DEBUG_FW(">> :\n");
2614 return current_cb_index;
2615
2616 }
2617
2618 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2619 u32 src_address,
2620 u32 dest_address,
2621 u32 length,
2622 int interrupt_enabled, int is_last)
2623 {
2624
2625 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2626 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2627 CB_DEST_SIZE_LONG;
2628 struct command_block *cb;
2629 u32 last_cb_element = 0;
2630
2631 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2632 src_address, dest_address, length);
2633
2634 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2635 return -1;
2636
2637 last_cb_element = priv->sram_desc.last_cb_index;
2638 cb = &priv->sram_desc.cb_list[last_cb_element];
2639 priv->sram_desc.last_cb_index++;
2640
2641 /* Calculate the new CB control word */
2642 if (interrupt_enabled)
2643 control |= CB_INT_ENABLED;
2644
2645 if (is_last)
2646 control |= CB_LAST_VALID;
2647
2648 control |= length;
2649
2650 /* Calculate the CB Element's checksum value */
2651 cb->status = control ^ src_address ^ dest_address;
2652
2653 /* Copy the Source and Destination addresses */
2654 cb->dest_addr = dest_address;
2655 cb->source_addr = src_address;
2656
2657 /* Copy the Control Word last */
2658 cb->control = control;
2659
2660 return 0;
2661 }
2662
2663 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2664 u32 src_phys, u32 dest_address, u32 length)
2665 {
2666 u32 bytes_left = length;
2667 u32 src_offset = 0;
2668 u32 dest_offset = 0;
2669 int status = 0;
2670 IPW_DEBUG_FW(">> \n");
2671 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2672 src_phys, dest_address, length);
2673 while (bytes_left > CB_MAX_LENGTH) {
2674 status = ipw_fw_dma_add_command_block(priv,
2675 src_phys + src_offset,
2676 dest_address +
2677 dest_offset,
2678 CB_MAX_LENGTH, 0, 0);
2679 if (status) {
2680 IPW_DEBUG_FW_INFO(": Failed\n");
2681 return -1;
2682 } else
2683 IPW_DEBUG_FW_INFO(": Added new cb\n");
2684
2685 src_offset += CB_MAX_LENGTH;
2686 dest_offset += CB_MAX_LENGTH;
2687 bytes_left -= CB_MAX_LENGTH;
2688 }
2689
2690 /* add the buffer tail */
2691 if (bytes_left > 0) {
2692 status =
2693 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2694 dest_address + dest_offset,
2695 bytes_left, 0, 0);
2696 if (status) {
2697 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2698 return -1;
2699 } else
2700 IPW_DEBUG_FW_INFO
2701 (": Adding new cb - the buffer tail\n");
2702 }
2703
2704 IPW_DEBUG_FW("<< \n");
2705 return 0;
2706 }
2707
2708 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2709 {
2710 u32 current_index = 0;
2711 u32 watchdog = 0;
2712
2713 IPW_DEBUG_FW(">> : \n");
2714
2715 current_index = ipw_fw_dma_command_block_index(priv);
2716 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
2717 (int)priv->sram_desc.last_cb_index);
2718
2719 while (current_index < priv->sram_desc.last_cb_index) {
2720 udelay(50);
2721 current_index = ipw_fw_dma_command_block_index(priv);
2722
2723 watchdog++;
2724
2725 if (watchdog > 400) {
2726 IPW_DEBUG_FW_INFO("Timeout\n");
2727 ipw_fw_dma_dump_command_block(priv);
2728 ipw_fw_dma_abort(priv);
2729 return -1;
2730 }
2731 }
2732
2733 ipw_fw_dma_abort(priv);
2734
2735 /*Disable the DMA in the CSR register */
2736 ipw_set_bit(priv, IPW_RESET_REG,
2737 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2738
2739 IPW_DEBUG_FW("<< dmaWaitSync \n");
2740 return 0;
2741 }
2742
2743 static void ipw_remove_current_network(struct ipw_priv *priv)
2744 {
2745 struct list_head *element, *safe;
2746 struct ieee80211_network *network = NULL;
2747 unsigned long flags;
2748
2749 spin_lock_irqsave(&priv->ieee->lock, flags);
2750 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2751 network = list_entry(element, struct ieee80211_network, list);
2752 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2753 list_del(element);
2754 list_add_tail(&network->list,
2755 &priv->ieee->network_free_list);
2756 }
2757 }
2758 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2759 }
2760
2761 /**
2762 * Check that card is still alive.
2763 * Reads debug register from domain0.
2764 * If card is present, pre-defined value should
2765 * be found there.
2766 *
2767 * @param priv
2768 * @return 1 if card is present, 0 otherwise
2769 */
2770 static inline int ipw_alive(struct ipw_priv *priv)
2771 {
2772 return ipw_read32(priv, 0x90) == 0xd55555d5;
2773 }
2774
2775 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2776 int timeout)
2777 {
2778 int i = 0;
2779
2780 do {
2781 if ((ipw_read32(priv, addr) & mask) == mask)
2782 return i;
2783 mdelay(10);
2784 i += 10;
2785 } while (i < timeout);
2786
2787 return -ETIME;
2788 }
2789
2790 /* These functions load the firmware and micro code for the operation of
2791 * the ipw hardware. It assumes the buffer has all the bits for the
2792 * image and the caller is handling the memory allocation and clean up.
2793 */
2794
2795 static int ipw_stop_master(struct ipw_priv *priv)
2796 {
2797 int rc;
2798
2799 IPW_DEBUG_TRACE(">> \n");
2800 /* stop master. typical delay - 0 */
2801 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2802
2803 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2804 IPW_RESET_REG_MASTER_DISABLED, 100);
2805 if (rc < 0) {
2806 IPW_ERROR("stop master failed in 10ms\n");
2807 return -1;
2808 }
2809
2810 IPW_DEBUG_INFO("stop master %dms\n", rc);
2811
2812 return rc;
2813 }
2814
2815 static void ipw_arc_release(struct ipw_priv *priv)
2816 {
2817 IPW_DEBUG_TRACE(">> \n");
2818 mdelay(5);
2819
2820 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2821
2822 /* no one knows timing, for safety add some delay */
2823 mdelay(5);
2824 }
2825
2826 struct fw_header {
2827 u32 version;
2828 u32 mode;
2829 };
2830
2831 struct fw_chunk {
2832 u32 address;
2833 u32 length;
2834 };
2835
2836 #define IPW_FW_MAJOR_VERSION 2
2837 #define IPW_FW_MINOR_VERSION 4
2838
2839 #define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2840 #define IPW_FW_MAJOR(x) (x & 0xff)
2841
2842 #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | IPW_FW_MAJOR_VERSION)
2843
2844 #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2845 "." __stringify(IPW_FW_MINOR_VERSION) "-"
2846
2847 #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2848 #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2849 #else
2850 #define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2851 #endif
2852
2853 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2854 {
2855 int rc = 0, i, addr;
2856 u8 cr = 0;
2857 u16 *image;
2858
2859 image = (u16 *) data;
2860
2861 IPW_DEBUG_TRACE(">> \n");
2862
2863 rc = ipw_stop_master(priv);
2864
2865 if (rc < 0)
2866 return rc;
2867
2868 // spin_lock_irqsave(&priv->lock, flags);
2869
2870 for (addr = IPW_SHARED_LOWER_BOUND;
2871 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
2872 ipw_write32(priv, addr, 0);
2873 }
2874
2875 /* no ucode (yet) */
2876 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2877 /* destroy DMA queues */
2878 /* reset sequence */
2879
2880 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
2881 ipw_arc_release(priv);
2882 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
2883 mdelay(1);
2884
2885 /* reset PHY */
2886 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
2887 mdelay(1);
2888
2889 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
2890 mdelay(1);
2891
2892 /* enable ucode store */
2893 ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0);
2894 ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS);
2895 mdelay(1);
2896
2897 /* write ucode */
2898 /**
2899 * @bug
2900 * Do NOT set indirect address register once and then
2901 * store data to indirect data register in the loop.
2902 * It seems very reasonable, but in this case DINO do not
2903 * accept ucode. It is essential to set address each time.
2904 */
2905 /* load new ipw uCode */
2906 for (i = 0; i < len / 2; i++)
2907 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
2908 cpu_to_le16(image[i]));
2909
2910 /* enable DINO */
2911 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2912 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2913
2914 /* this is where the igx / win driver deveates from the VAP driver. */
2915
2916 /* wait for alive response */
2917 for (i = 0; i < 100; i++) {
2918 /* poll for incoming data */
2919 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
2920 if (cr & DINO_RXFIFO_DATA)
2921 break;
2922 mdelay(1);
2923 }
2924
2925 if (cr & DINO_RXFIFO_DATA) {
2926 /* alive_command_responce size is NOT multiple of 4 */
2927 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2928
2929 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2930 response_buffer[i] =
2931 le32_to_cpu(ipw_read_reg32(priv,
2932 IPW_BASEBAND_RX_FIFO_READ));
2933 memcpy(&priv->dino_alive, response_buffer,
2934 sizeof(priv->dino_alive));
2935 if (priv->dino_alive.alive_command == 1
2936 && priv->dino_alive.ucode_valid == 1) {
2937 rc = 0;
2938 IPW_DEBUG_INFO
2939 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2940 "of %02d/%02d/%02d %02d:%02d\n",
2941 priv->dino_alive.software_revision,
2942 priv->dino_alive.software_revision,
2943 priv->dino_alive.device_identifier,
2944 priv->dino_alive.device_identifier,
2945 priv->dino_alive.time_stamp[0],
2946 priv->dino_alive.time_stamp[1],
2947 priv->dino_alive.time_stamp[2],
2948 priv->dino_alive.time_stamp[3],
2949 priv->dino_alive.time_stamp[4]);
2950 } else {
2951 IPW_DEBUG_INFO("Microcode is not alive\n");
2952 rc = -EINVAL;
2953 }
2954 } else {
2955 IPW_DEBUG_INFO("No alive response from DINO\n");
2956 rc = -ETIME;
2957 }
2958
2959 /* disable DINO, otherwise for some reason
2960 firmware have problem getting alive resp. */
2961 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
2962
2963 // spin_unlock_irqrestore(&priv->lock, flags);
2964
2965 return rc;
2966 }
2967
2968 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
2969 {
2970 int rc = -1;
2971 int offset = 0;
2972 struct fw_chunk *chunk;
2973 dma_addr_t shared_phys;
2974 u8 *shared_virt;
2975
2976 IPW_DEBUG_TRACE("<< : \n");
2977 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2978
2979 if (!shared_virt)
2980 return -ENOMEM;
2981
2982 memmove(shared_virt, data, len);
2983
2984 /* Start the Dma */
2985 rc = ipw_fw_dma_enable(priv);
2986
2987 if (priv->sram_desc.last_cb_index > 0) {
2988 /* the DMA is already ready this would be a bug. */
2989 BUG();
2990 goto out;
2991 }
2992
2993 do {
2994 chunk = (struct fw_chunk *)(data + offset);
2995 offset += sizeof(struct fw_chunk);
2996 /* build DMA packet and queue up for sending */
2997 /* dma to chunk->address, the chunk->length bytes from data +
2998 * offeset*/
2999 /* Dma loading */
3000 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3001 le32_to_cpu(chunk->address),
3002 le32_to_cpu(chunk->length));
3003 if (rc) {
3004 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3005 goto out;
3006 }
3007
3008 offset += le32_to_cpu(chunk->length);
3009 } while (offset < len);
3010
3011 /* Run the DMA and wait for the answer */
3012 rc = ipw_fw_dma_kick(priv);
3013 if (rc) {
3014 IPW_ERROR("dmaKick Failed\n");
3015 goto out;
3016 }
3017
3018 rc = ipw_fw_dma_wait(priv);
3019 if (rc) {
3020 IPW_ERROR("dmaWaitSync Failed\n");
3021 goto out;
3022 }
3023 out:
3024 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3025 return rc;
3026 }
3027
3028 /* stop nic */
3029 static int ipw_stop_nic(struct ipw_priv *priv)
3030 {
3031 int rc = 0;
3032
3033 /* stop */
3034 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3035
3036 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3037 IPW_RESET_REG_MASTER_DISABLED, 500);
3038 if (rc < 0) {
3039 IPW_ERROR("wait for reg master disabled failed\n");
3040 return rc;
3041 }
3042
3043 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3044
3045 return rc;
3046 }
3047
3048 static void ipw_start_nic(struct ipw_priv *priv)
3049 {
3050 IPW_DEBUG_TRACE(">>\n");
3051
3052 /* prvHwStartNic release ARC */
3053 ipw_clear_bit(priv, IPW_RESET_REG,
3054 IPW_RESET_REG_MASTER_DISABLED |
3055 IPW_RESET_REG_STOP_MASTER |
3056 CBD_RESET_REG_PRINCETON_RESET);
3057
3058 /* enable power management */
3059 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3060 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3061
3062 IPW_DEBUG_TRACE("<<\n");
3063 }
3064
3065 static int ipw_init_nic(struct ipw_priv *priv)
3066 {
3067 int rc;
3068
3069 IPW_DEBUG_TRACE(">>\n");
3070 /* reset */
3071 /*prvHwInitNic */
3072 /* set "initialization complete" bit to move adapter to D0 state */
3073 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3074
3075 /* low-level PLL activation */
3076 ipw_write32(priv, IPW_READ_INT_REGISTER,
3077 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3078
3079 /* wait for clock stabilization */
3080 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3081 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3082 if (rc < 0)
3083 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3084
3085 /* assert SW reset */
3086 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3087
3088 udelay(10);
3089
3090 /* set "initialization complete" bit to move adapter to D0 state */
3091 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3092
3093 IPW_DEBUG_TRACE(">>\n");
3094 return 0;
3095 }
3096
3097 /* Call this function from process context, it will sleep in request_firmware.
3098 * Probe is an ok place to call this from.
3099 */
3100 static int ipw_reset_nic(struct ipw_priv *priv)
3101 {
3102 int rc = 0;
3103 unsigned long flags;
3104
3105 IPW_DEBUG_TRACE(">>\n");
3106
3107 rc = ipw_init_nic(priv);
3108
3109 spin_lock_irqsave(&priv->lock, flags);
3110 /* Clear the 'host command active' bit... */
3111 priv->status &= ~STATUS_HCMD_ACTIVE;
3112 wake_up_interruptible(&priv->wait_command_queue);
3113 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3114 wake_up_interruptible(&priv->wait_state);
3115 spin_unlock_irqrestore(&priv->lock, flags);
3116
3117 IPW_DEBUG_TRACE("<<\n");
3118 return rc;
3119 }
3120
3121 static int ipw_get_fw(struct ipw_priv *priv,
3122 const struct firmware **fw, const char *name)
3123 {
3124 struct fw_header *header;
3125 int rc;
3126
3127 /* ask firmware_class module to get the boot firmware off disk */
3128 rc = request_firmware(fw, name, &priv->pci_dev->dev);
3129 if (rc < 0) {
3130 IPW_ERROR("%s load failed: Reason %d\n", name, rc);
3131 return rc;
3132 }
3133
3134 header = (struct fw_header *)(*fw)->data;
3135 if (IPW_FW_MAJOR(le32_to_cpu(header->version)) != IPW_FW_MAJOR_VERSION) {
3136 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
3137 name,
3138 IPW_FW_MAJOR(le32_to_cpu(header->version)),
3139 IPW_FW_MAJOR_VERSION);
3140 return -EINVAL;
3141 }
3142
3143 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
3144 name,
3145 IPW_FW_MAJOR(le32_to_cpu(header->version)),
3146 IPW_FW_MINOR(le32_to_cpu(header->version)),
3147 (*fw)->size - sizeof(struct fw_header));
3148 return 0;
3149 }
3150
3151 #define IPW_RX_BUF_SIZE (3000)
3152
3153 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3154 struct ipw_rx_queue *rxq)
3155 {
3156 unsigned long flags;
3157 int i;
3158
3159 spin_lock_irqsave(&rxq->lock, flags);
3160
3161 INIT_LIST_HEAD(&rxq->rx_free);
3162 INIT_LIST_HEAD(&rxq->rx_used);
3163
3164 /* Fill the rx_used queue with _all_ of the Rx buffers */
3165 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3166 /* In the reset function, these buffers may have been allocated
3167 * to an SKB, so we need to unmap and free potential storage */
3168 if (rxq->pool[i].skb != NULL) {
3169 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3170 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3171 dev_kfree_skb(rxq->pool[i].skb);
3172 rxq->pool[i].skb = NULL;
3173 }
3174 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3175 }
3176
3177 /* Set us so that we have processed and used all buffers, but have
3178 * not restocked the Rx queue with fresh buffers */
3179 rxq->read = rxq->write = 0;
3180 rxq->processed = RX_QUEUE_SIZE - 1;
3181 rxq->free_count = 0;
3182 spin_unlock_irqrestore(&rxq->lock, flags);
3183 }
3184
3185 #ifdef CONFIG_PM
3186 static int fw_loaded = 0;
3187 static const struct firmware *bootfw = NULL;
3188 static const struct firmware *firmware = NULL;
3189 static const struct firmware *ucode = NULL;
3190
3191 static void free_firmware(void)
3192 {
3193 if (fw_loaded) {
3194 release_firmware(bootfw);
3195 release_firmware(ucode);
3196 release_firmware(firmware);
3197 bootfw = ucode = firmware = NULL;
3198 fw_loaded = 0;
3199 }
3200 }
3201 #else
3202 #define free_firmware() do {} while (0)
3203 #endif
3204
3205 static int ipw_load(struct ipw_priv *priv)
3206 {
3207 #ifndef CONFIG_PM
3208 const struct firmware *bootfw = NULL;
3209 const struct firmware *firmware = NULL;
3210 const struct firmware *ucode = NULL;
3211 #endif
3212 int rc = 0, retries = 3;
3213
3214 #ifdef CONFIG_PM
3215 if (!fw_loaded) {
3216 #endif
3217 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
3218 if (rc)
3219 goto error;
3220
3221 switch (priv->ieee->iw_mode) {
3222 case IW_MODE_ADHOC:
3223 rc = ipw_get_fw(priv, &ucode,
3224 IPW_FW_NAME("ibss_ucode"));
3225 if (rc)
3226 goto error;
3227
3228 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
3229 break;
3230
3231 #ifdef CONFIG_IPW2200_MONITOR
3232 case IW_MODE_MONITOR:
3233 rc = ipw_get_fw(priv, &ucode,
3234 IPW_FW_NAME("sniffer_ucode"));
3235 if (rc)
3236 goto error;
3237
3238 rc = ipw_get_fw(priv, &firmware,
3239 IPW_FW_NAME("sniffer"));
3240 break;
3241 #endif
3242 case IW_MODE_INFRA:
3243 rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode"));
3244 if (rc)
3245 goto error;
3246
3247 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
3248 break;
3249
3250 default:
3251 rc = -EINVAL;
3252 }
3253
3254 if (rc)
3255 goto error;
3256
3257 #ifdef CONFIG_PM
3258 fw_loaded = 1;
3259 }
3260 #endif
3261
3262 if (!priv->rxq)
3263 priv->rxq = ipw_rx_queue_alloc(priv);
3264 else
3265 ipw_rx_queue_reset(priv, priv->rxq);
3266 if (!priv->rxq) {
3267 IPW_ERROR("Unable to initialize Rx queue\n");
3268 goto error;
3269 }
3270
3271 retry:
3272 /* Ensure interrupts are disabled */
3273 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3274 priv->status &= ~STATUS_INT_ENABLED;
3275
3276 /* ack pending interrupts */
3277 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3278
3279 ipw_stop_nic(priv);
3280
3281 rc = ipw_reset_nic(priv);
3282 if (rc) {
3283 IPW_ERROR("Unable to reset NIC\n");
3284 goto error;
3285 }
3286
3287 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3288 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3289
3290 /* DMA the initial boot firmware into the device */
3291 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
3292 bootfw->size - sizeof(struct fw_header));
3293 if (rc < 0) {
3294 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3295 goto error;
3296 }
3297
3298 /* kick start the device */
3299 ipw_start_nic(priv);
3300
3301 /* wait for the device to finish it's initial startup sequence */
3302 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3303 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3304 if (rc < 0) {
3305 IPW_ERROR("device failed to boot initial fw image\n");
3306 goto error;
3307 }
3308 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3309
3310 /* ack fw init done interrupt */
3311 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3312
3313 /* DMA the ucode into the device */
3314 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
3315 ucode->size - sizeof(struct fw_header));
3316 if (rc < 0) {
3317 IPW_ERROR("Unable to load ucode: %d\n", rc);
3318 goto error;
3319 }
3320
3321 /* stop nic */
3322 ipw_stop_nic(priv);
3323
3324 /* DMA bss firmware into the device */
3325 rc = ipw_load_firmware(priv, firmware->data +
3326 sizeof(struct fw_header),
3327 firmware->size - sizeof(struct fw_header));
3328 if (rc < 0) {
3329 IPW_ERROR("Unable to load firmware: %d\n", rc);
3330 goto error;
3331 }
3332
3333 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3334
3335 rc = ipw_queue_reset(priv);
3336 if (rc) {
3337 IPW_ERROR("Unable to initialize queues\n");
3338 goto error;
3339 }
3340
3341 /* Ensure interrupts are disabled */
3342 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3343 /* ack pending interrupts */
3344 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3345
3346 /* kick start the device */
3347 ipw_start_nic(priv);
3348
3349 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3350 if (retries > 0) {
3351 IPW_WARNING("Parity error. Retrying init.\n");
3352 retries--;
3353 goto retry;
3354 }
3355
3356 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3357 rc = -EIO;
3358 goto error;
3359 }
3360
3361 /* wait for the device */
3362 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3363 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3364 if (rc < 0) {
3365 IPW_ERROR("device failed to start after 500ms\n");
3366 goto error;
3367 }
3368 IPW_DEBUG_INFO("device response after %dms\n", rc);
3369
3370 /* ack fw init done interrupt */
3371 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3372
3373 /* read eeprom data and initialize the eeprom region of sram */
3374 priv->eeprom_delay = 1;
3375 ipw_eeprom_init_sram(priv);
3376
3377 /* enable interrupts */
3378 ipw_enable_interrupts(priv);
3379
3380 /* Ensure our queue has valid packets */
3381 ipw_rx_queue_replenish(priv);
3382
3383 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3384
3385 /* ack pending interrupts */
3386 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3387
3388 #ifndef CONFIG_PM
3389 release_firmware(bootfw);
3390 release_firmware(ucode);
3391 release_firmware(firmware);
3392 #endif
3393 return 0;
3394
3395 error:
3396 if (priv->rxq) {
3397 ipw_rx_queue_free(priv, priv->rxq);
3398 priv->rxq = NULL;
3399 }
3400 ipw_tx_queue_free(priv);
3401 if (bootfw)
3402 release_firmware(bootfw);
3403 if (ucode)
3404 release_firmware(ucode);
3405 if (firmware)
3406 release_firmware(firmware);
3407 #ifdef CONFIG_PM
3408 fw_loaded = 0;
3409 bootfw = ucode = firmware = NULL;
3410 #endif
3411
3412 return rc;
3413 }
3414
3415 /**
3416 * DMA services
3417 *
3418 * Theory of operation
3419 *
3420 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3421 * 2 empty entries always kept in the buffer to protect from overflow.
3422 *
3423 * For Tx queue, there are low mark and high mark limits. If, after queuing
3424 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3425 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3426 * Tx queue resumed.
3427 *
3428 * The IPW operates with six queues, one receive queue in the device's
3429 * sram, one transmit queue for sending commands to the device firmware,
3430 * and four transmit queues for data.
3431 *
3432 * The four transmit queues allow for performing quality of service (qos)
3433 * transmissions as per the 802.11 protocol. Currently Linux does not
3434 * provide a mechanism to the user for utilizing prioritized queues, so
3435 * we only utilize the first data transmit queue (queue1).
3436 */
3437
3438 /**
3439 * Driver allocates buffers of this size for Rx
3440 */
3441
3442 static inline int ipw_queue_space(const struct clx2_queue *q)
3443 {
3444 int s = q->last_used - q->first_empty;
3445 if (s <= 0)
3446 s += q->n_bd;
3447 s -= 2; /* keep some reserve to not confuse empty and full situations */
3448 if (s < 0)
3449 s = 0;
3450 return s;
3451 }
3452
3453 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3454 {
3455 return (++index == n_bd) ? 0 : index;
3456 }
3457
3458 /**
3459 * Initialize common DMA queue structure
3460 *
3461 * @param q queue to init
3462 * @param count Number of BD's to allocate. Should be power of 2
3463 * @param read_register Address for 'read' register
3464 * (not offset within BAR, full address)
3465 * @param write_register Address for 'write' register
3466 * (not offset within BAR, full address)
3467 * @param base_register Address for 'base' register
3468 * (not offset within BAR, full address)
3469 * @param size Address for 'size' register
3470 * (not offset within BAR, full address)
3471 */
3472 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3473 int count, u32 read, u32 write, u32 base, u32 size)
3474 {
3475 q->n_bd = count;
3476
3477 q->low_mark = q->n_bd / 4;
3478 if (q->low_mark < 4)
3479 q->low_mark = 4;
3480
3481 q->high_mark = q->n_bd / 8;
3482 if (q->high_mark < 2)
3483 q->high_mark = 2;
3484
3485 q->first_empty = q->last_used = 0;
3486 q->reg_r = read;
3487 q->reg_w = write;
3488
3489 ipw_write32(priv, base, q->dma_addr);
3490 ipw_write32(priv, size, count);
3491 ipw_write32(priv, read, 0);
3492 ipw_write32(priv, write, 0);
3493
3494 _ipw_read32(priv, 0x90);
3495 }
3496
3497 static int ipw_queue_tx_init(struct ipw_priv *priv,
3498 struct clx2_tx_queue *q,
3499 int count, u32 read, u32 write, u32 base, u32 size)
3500 {
3501 struct pci_dev *dev = priv->pci_dev;
3502
3503 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3504 if (!q->txb) {
3505 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3506 return -ENOMEM;
3507 }
3508
3509 q->bd =
3510 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3511 if (!q->bd) {
3512 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3513 sizeof(q->bd[0]) * count);
3514 kfree(q->txb);
3515 q->txb = NULL;
3516 return -ENOMEM;
3517 }
3518
3519 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3520 return 0;
3521 }
3522
3523 /**
3524 * Free one TFD, those at index [txq->q.last_used].
3525 * Do NOT advance any indexes
3526 *
3527 * @param dev
3528 * @param txq
3529 */
3530 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3531 struct clx2_tx_queue *txq)
3532 {
3533 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3534 struct pci_dev *dev = priv->pci_dev;
3535 int i;
3536
3537 /* classify bd */
3538 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3539 /* nothing to cleanup after for host commands */
3540 return;
3541
3542 /* sanity check */
3543 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3544 IPW_ERROR("Too many chunks: %i\n",
3545 le32_to_cpu(bd->u.data.num_chunks));
3546 /** @todo issue fatal error, it is quite serious situation */
3547 return;
3548 }
3549
3550 /* unmap chunks if any */
3551 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3552 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3553 le16_to_cpu(bd->u.data.chunk_len[i]),
3554 PCI_DMA_TODEVICE);
3555 if (txq->txb[txq->q.last_used]) {
3556 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3557 txq->txb[txq->q.last_used] = NULL;
3558 }
3559 }
3560 }
3561
3562 /**
3563 * Deallocate DMA queue.
3564 *
3565 * Empty queue by removing and destroying all BD's.
3566 * Free all buffers.
3567 *
3568 * @param dev
3569 * @param q
3570 */
3571 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3572 {
3573 struct clx2_queue *q = &txq->q;
3574 struct pci_dev *dev = priv->pci_dev;
3575
3576 if (q->n_bd == 0)
3577 return;
3578
3579 /* first, empty all BD's */
3580 for (; q->first_empty != q->last_used;
3581 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3582 ipw_queue_tx_free_tfd(priv, txq);
3583 }
3584
3585 /* free buffers belonging to queue itself */
3586 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3587 q->dma_addr);
3588 kfree(txq->txb);
3589
3590 /* 0 fill whole structure */
3591 memset(txq, 0, sizeof(*txq));
3592 }
3593
3594 /**
3595 * Destroy all DMA queues and structures
3596 *
3597 * @param priv
3598 */
3599 static void ipw_tx_queue_free(struct ipw_priv *priv)
3600 {
3601 /* Tx CMD queue */
3602 ipw_queue_tx_free(priv, &priv->txq_cmd);
3603
3604 /* Tx queues */
3605 ipw_queue_tx_free(priv, &priv->txq[0]);
3606 ipw_queue_tx_free(priv, &priv->txq[1]);
3607 ipw_queue_tx_free(priv, &priv->txq[2]);
3608 ipw_queue_tx_free(priv, &priv->txq[3]);
3609 }
3610
3611 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3612 {
3613 /* First 3 bytes are manufacturer */
3614 bssid[0] = priv->mac_addr[0];
3615 bssid[1] = priv->mac_addr[1];
3616 bssid[2] = priv->mac_addr[2];
3617
3618 /* Last bytes are random */
3619 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3620
3621 bssid[0] &= 0xfe; /* clear multicast bit */
3622 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3623 }
3624
3625 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3626 {
3627 struct ipw_station_entry entry;
3628 int i;
3629
3630 for (i = 0; i < priv->num_stations; i++) {
3631 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3632 /* Another node is active in network */
3633 priv->missed_adhoc_beacons = 0;
3634 if (!(priv->config & CFG_STATIC_CHANNEL))
3635 /* when other nodes drop out, we drop out */
3636 priv->config &= ~CFG_ADHOC_PERSIST;
3637
3638 return i;
3639 }
3640 }
3641
3642 if (i == MAX_STATIONS)
3643 return IPW_INVALID_STATION;
3644
3645 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3646
3647 entry.reserved = 0;
3648 entry.support_mode = 0;
3649 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3650 memcpy(priv->stations[i], bssid, ETH_ALEN);
3651 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3652 &entry, sizeof(entry));
3653 priv->num_stations++;
3654
3655 return i;
3656 }
3657
3658 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3659 {
3660 int i;
3661
3662 for (i = 0; i < priv->num_stations; i++)
3663 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3664 return i;
3665
3666 return IPW_INVALID_STATION;
3667 }
3668
3669 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3670 {
3671 int err;
3672
3673 if (priv->status & STATUS_ASSOCIATING) {
3674 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3675 queue_work(priv->workqueue, &priv->disassociate);
3676 return;
3677 }
3678
3679 if (!(priv->status & STATUS_ASSOCIATED)) {
3680 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3681 return;
3682 }
3683
3684 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3685 "on channel %d.\n",
3686 MAC_ARG(priv->assoc_request.bssid),
3687 priv->assoc_request.channel);
3688
3689 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3690 priv->status |= STATUS_DISASSOCIATING;
3691
3692 if (quiet)
3693 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3694 else
3695 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3696
3697 err = ipw_send_associate(priv, &priv->assoc_request);
3698 if (err) {
3699 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3700 "failed.\n");
3701 return;
3702 }
3703
3704 }
3705
3706 static int ipw_disassociate(void *data)
3707 {
3708 struct ipw_priv *priv = data;
3709 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3710 return 0;
3711 ipw_send_disassociate(data, 0);
3712 return 1;
3713 }
3714
3715 static void ipw_bg_disassociate(void *data)
3716 {
3717 struct ipw_priv *priv = data;
3718 down(&priv->sem);
3719 ipw_disassociate(data);
3720 up(&priv->sem);
3721 }
3722
3723 static void ipw_system_config(void *data)
3724 {
3725 struct ipw_priv *priv = data;
3726 ipw_send_system_config(priv, &priv->sys_config);
3727 }
3728
3729 struct ipw_status_code {
3730 u16 status;
3731 const char *reason;
3732 };
3733
3734 static const struct ipw_status_code ipw_status_codes[] = {
3735 {0x00, "Successful"},
3736 {0x01, "Unspecified failure"},
3737 {0x0A, "Cannot support all requested capabilities in the "
3738 "Capability information field"},
3739 {0x0B, "Reassociation denied due to inability to confirm that "
3740 "association exists"},
3741 {0x0C, "Association denied due to reason outside the scope of this "
3742 "standard"},
3743 {0x0D,
3744 "Responding station does not support the specified authentication "
3745 "algorithm"},
3746 {0x0E,
3747 "Received an Authentication frame with authentication sequence "
3748 "transaction sequence number out of expected sequence"},
3749 {0x0F, "Authentication rejected because of challenge failure"},
3750 {0x10, "Authentication rejected due to timeout waiting for next "
3751 "frame in sequence"},
3752 {0x11, "Association denied because AP is unable to handle additional "
3753 "associated stations"},
3754 {0x12,
3755 "Association denied due to requesting station not supporting all "
3756 "of the datarates in the BSSBasicServiceSet Parameter"},
3757 {0x13,
3758 "Association denied due to requesting station not supporting "
3759 "short preamble operation"},
3760 {0x14,
3761 "Association denied due to requesting station not supporting "
3762 "PBCC encoding"},
3763 {0x15,
3764 "Association denied due to requesting station not supporting "
3765 "channel agility"},
3766 {0x19,
3767 "Association denied due to requesting station not supporting "
3768 "short slot operation"},
3769 {0x1A,
3770 "Association denied due to requesting station not supporting "
3771 "DSSS-OFDM operation"},
3772 {0x28, "Invalid Information Element"},
3773 {0x29, "Group Cipher is not valid"},
3774 {0x2A, "Pairwise Cipher is not valid"},
3775 {0x2B, "AKMP is not valid"},
3776 {0x2C, "Unsupported RSN IE version"},
3777 {0x2D, "Invalid RSN IE Capabilities"},
3778 {0x2E, "Cipher suite is rejected per security policy"},
3779 };
3780
3781 #ifdef CONFIG_IPW2200_DEBUG
3782 static const char *ipw_get_status_code(u16 status)
3783 {
3784 int i;
3785 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3786 if (ipw_status_codes[i].status == (status & 0xff))
3787 return ipw_status_codes[i].reason;
3788 return "Unknown status value.";
3789 }
3790 #endif
3791
3792 static void inline average_init(struct average *avg)
3793 {
3794 memset(avg, 0, sizeof(*avg));
3795 }
3796
3797 static void average_add(struct average *avg, s16 val)
3798 {
3799 avg->sum -= avg->entries[avg->pos];
3800 avg->sum += val;
3801 avg->entries[avg->pos++] = val;
3802 if (unlikely(avg->pos == AVG_ENTRIES)) {
3803 avg->init = 1;
3804 avg->pos = 0;
3805 }
3806 }
3807
3808 static s16 average_value(struct average *avg)
3809 {
3810 if (!unlikely(avg->init)) {
3811 if (avg->pos)
3812 return avg->sum / avg->pos;
3813 return 0;
3814 }
3815
3816 return avg->sum / AVG_ENTRIES;
3817 }
3818
3819 static void ipw_reset_stats(struct ipw_priv *priv)
3820 {
3821 u32 len = sizeof(u32);
3822
3823 priv->quality = 0;
3824
3825 average_init(&priv->average_missed_beacons);
3826 average_init(&priv->average_rssi);
3827 average_init(&priv->average_noise);
3828
3829 priv->last_rate = 0;
3830 priv->last_missed_beacons = 0;
3831 priv->last_rx_packets = 0;
3832 priv->last_tx_packets = 0;
3833 priv->last_tx_failures = 0;
3834
3835 /* Firmware managed, reset only when NIC is restarted, so we have to
3836 * normalize on the current value */
3837 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3838 &priv->last_rx_err, &len);
3839 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3840 &priv->last_tx_failures, &len);
3841
3842 /* Driver managed, reset with each association */
3843 priv->missed_adhoc_beacons = 0;
3844 priv->missed_beacons = 0;
3845 priv->tx_packets = 0;
3846 priv->rx_packets = 0;
3847
3848 }
3849
3850 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3851 {
3852 u32 i = 0x80000000;
3853 u32 mask = priv->rates_mask;
3854 /* If currently associated in B mode, restrict the maximum
3855 * rate match to B rates */
3856 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3857 mask &= IEEE80211_CCK_RATES_MASK;
3858
3859 /* TODO: Verify that the rate is supported by the current rates
3860 * list. */
3861
3862 while (i && !(mask & i))
3863 i >>= 1;
3864 switch (i) {
3865 case IEEE80211_CCK_RATE_1MB_MASK:
3866 return 1000000;
3867 case IEEE80211_CCK_RATE_2MB_MASK:
3868 return 2000000;
3869 case IEEE80211_CCK_RATE_5MB_MASK:
3870 return 5500000;
3871 case IEEE80211_OFDM_RATE_6MB_MASK:
3872 return 6000000;
3873 case IEEE80211_OFDM_RATE_9MB_MASK:
3874 return 9000000;
3875 case IEEE80211_CCK_RATE_11MB_MASK:
3876 return 11000000;
3877 case IEEE80211_OFDM_RATE_12MB_MASK:
3878 return 12000000;
3879 case IEEE80211_OFDM_RATE_18MB_MASK:
3880 return 18000000;
3881 case IEEE80211_OFDM_RATE_24MB_MASK:
3882 return 24000000;
3883 case IEEE80211_OFDM_RATE_36MB_MASK:
3884 return 36000000;
3885 case IEEE80211_OFDM_RATE_48MB_MASK:
3886 return 48000000;
3887 case IEEE80211_OFDM_RATE_54MB_MASK:
3888 return 54000000;
3889 }
3890
3891 if (priv->ieee->mode == IEEE_B)
3892 return 11000000;
3893 else
3894 return 54000000;
3895 }
3896
3897 static u32 ipw_get_current_rate(struct ipw_priv *priv)
3898 {
3899 u32 rate, len = sizeof(rate);
3900 int err;
3901
3902 if (!(priv->status & STATUS_ASSOCIATED))
3903 return 0;
3904
3905 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3906 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3907 &len);
3908 if (err) {
3909 IPW_DEBUG_INFO("failed querying ordinals.\n");
3910 return 0;
3911 }
3912 } else
3913 return ipw_get_max_rate(priv);
3914
3915 switch (rate) {
3916 case IPW_TX_RATE_1MB:
3917 return 1000000;
3918 case IPW_TX_RATE_2MB:
3919 return 2000000;
3920 case IPW_TX_RATE_5MB:
3921 return 5500000;
3922 case IPW_TX_RATE_6MB:
3923 return 6000000;
3924 case IPW_TX_RATE_9MB:
3925 return 9000000;
3926 case IPW_TX_RATE_11MB:
3927 return 11000000;
3928 case IPW_TX_RATE_12MB:
3929 return 12000000;
3930 case IPW_TX_RATE_18MB:
3931 return 18000000;
3932 case IPW_TX_RATE_24MB:
3933 return 24000000;
3934 case IPW_TX_RATE_36MB:
3935 return 36000000;
3936 case IPW_TX_RATE_48MB:
3937 return 48000000;
3938 case IPW_TX_RATE_54MB:
3939 return 54000000;
3940 }
3941
3942 return 0;
3943 }
3944
3945 #define IPW_STATS_INTERVAL (2 * HZ)
3946 static void ipw_gather_stats(struct ipw_priv *priv)
3947 {
3948 u32 rx_err, rx_err_delta, rx_packets_delta;
3949 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3950 u32 missed_beacons_percent, missed_beacons_delta;
3951 u32 quality = 0;
3952 u32 len = sizeof(u32);
3953 s16 rssi;
3954 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3955 rate_quality;
3956 u32 max_rate;
3957
3958 if (!(priv->status & STATUS_ASSOCIATED)) {
3959 priv->quality = 0;
3960 return;
3961 }
3962
3963 /* Update the statistics */
3964 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3965 &priv->missed_beacons, &len);
3966 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
3967 priv->last_missed_beacons = priv->missed_beacons;
3968 if (priv->assoc_request.beacon_interval) {
3969 missed_beacons_percent = missed_beacons_delta *
3970 (HZ * priv->assoc_request.beacon_interval) /
3971 (IPW_STATS_INTERVAL * 10);
3972 } else {
3973 missed_beacons_percent = 0;
3974 }
3975 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3976
3977 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3978 rx_err_delta = rx_err - priv->last_rx_err;
3979 priv->last_rx_err = rx_err;
3980
3981 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
3982 tx_failures_delta = tx_failures - priv->last_tx_failures;
3983 priv->last_tx_failures = tx_failures;
3984
3985 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
3986 priv->last_rx_packets = priv->rx_packets;
3987
3988 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
3989 priv->last_tx_packets = priv->tx_packets;
3990
3991 /* Calculate quality based on the following:
3992 *
3993 * Missed beacon: 100% = 0, 0% = 70% missed
3994 * Rate: 60% = 1Mbs, 100% = Max
3995 * Rx and Tx errors represent a straight % of total Rx/Tx
3996 * RSSI: 100% = > -50, 0% = < -80
3997 * Rx errors: 100% = 0, 0% = 50% missed
3998 *
3999 * The lowest computed quality is used.
4000 *
4001 */
4002 #define BEACON_THRESHOLD 5
4003 beacon_quality = 100 - missed_beacons_percent;
4004 if (beacon_quality < BEACON_THRESHOLD)
4005 beacon_quality = 0;
4006 else
4007 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4008 (100 - BEACON_THRESHOLD);
4009 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4010 beacon_quality, missed_beacons_percent);
4011
4012 priv->last_rate = ipw_get_current_rate(priv);
4013 max_rate = ipw_get_max_rate(priv);
4014 rate_quality = priv->last_rate * 40 / max_rate + 60;
4015 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4016 rate_quality, priv->last_rate / 1000000);
4017
4018 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4019 rx_quality = 100 - (rx_err_delta * 100) /
4020 (rx_packets_delta + rx_err_delta);
4021 else
4022 rx_quality = 100;
4023 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4024 rx_quality, rx_err_delta, rx_packets_delta);
4025
4026 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4027 tx_quality = 100 - (tx_failures_delta * 100) /
4028 (tx_packets_delta + tx_failures_delta);
4029 else
4030 tx_quality = 100;
4031 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4032 tx_quality, tx_failures_delta, tx_packets_delta);
4033
4034 rssi = average_value(&priv->average_rssi);
4035 signal_quality =
4036 (100 *
4037 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4038 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4039 (priv->ieee->perfect_rssi - rssi) *
4040 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4041 62 * (priv->ieee->perfect_rssi - rssi))) /
4042 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4043 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4044 if (signal_quality > 100)
4045 signal_quality = 100;
4046 else if (signal_quality < 1)
4047 signal_quality = 0;
4048
4049 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4050 signal_quality, rssi);
4051
4052 quality = min(beacon_quality,
4053 min(rate_quality,
4054 min(tx_quality, min(rx_quality, signal_quality))));
4055 if (quality == beacon_quality)
4056 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4057 quality);
4058 if (quality == rate_quality)
4059 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4060 quality);
4061 if (quality == tx_quality)
4062 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4063 quality);
4064 if (quality == rx_quality)
4065 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4066 quality);
4067 if (quality == signal_quality)
4068 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4069 quality);
4070
4071 priv->quality = quality;
4072
4073 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4074 IPW_STATS_INTERVAL);
4075 }
4076
4077 static void ipw_bg_gather_stats(void *data)
4078 {
4079 struct ipw_priv *priv = data;
4080 down(&priv->sem);
4081 ipw_gather_stats(data);
4082 up(&priv->sem);
4083 }
4084
4085 /* Missed beacon behavior:
4086 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4087 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4088 * Above disassociate threshold, give up and stop scanning.
4089 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4090 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4091 int missed_count)
4092 {
4093 priv->notif_missed_beacons = missed_count;
4094
4095 if (missed_count > priv->disassociate_threshold &&
4096 priv->status & STATUS_ASSOCIATED) {
4097 /* If associated and we've hit the missed
4098 * beacon threshold, disassociate, turn
4099 * off roaming, and abort any active scans */
4100 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4101 IPW_DL_STATE | IPW_DL_ASSOC,
4102 "Missed beacon: %d - disassociate\n", missed_count);
4103 priv->status &= ~STATUS_ROAMING;
4104 if (priv->status & STATUS_SCANNING) {
4105 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4106 IPW_DL_STATE,
4107 "Aborting scan with missed beacon.\n");
4108 queue_work(priv->workqueue, &priv->abort_scan);
4109 }
4110
4111 queue_work(priv->workqueue, &priv->disassociate);
4112 return;
4113 }
4114
4115 if (priv->status & STATUS_ROAMING) {
4116 /* If we are currently roaming, then just
4117 * print a debug statement... */
4118 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4119 "Missed beacon: %d - roam in progress\n",
4120 missed_count);
4121 return;
4122 }
4123
4124 if (missed_count > priv->roaming_threshold &&
4125 missed_count <= priv->disassociate_threshold) {
4126 /* If we are not already roaming, set the ROAM
4127 * bit in the status and kick off a scan.
4128 * This can happen several times before we reach
4129 * disassociate_threshold. */
4130 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4131 "Missed beacon: %d - initiate "
4132 "roaming\n", missed_count);
4133 if (!(priv->status & STATUS_ROAMING)) {
4134 priv->status |= STATUS_ROAMING;
4135 if (!(priv->status & STATUS_SCANNING))
4136 queue_work(priv->workqueue,
4137 &priv->request_scan);
4138 }
4139 return;
4140 }
4141
4142 if (priv->status & STATUS_SCANNING) {
4143 /* Stop scan to keep fw from getting
4144 * stuck (only if we aren't roaming --
4145 * otherwise we'll never scan more than 2 or 3
4146 * channels..) */
4147 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4148 "Aborting scan with missed beacon.\n");
4149 queue_work(priv->workqueue, &priv->abort_scan);
4150 }
4151
4152 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4153
4154 }
4155
4156 /**
4157 * Handle host notification packet.
4158 * Called from interrupt routine
4159 */
4160 static void ipw_rx_notification(struct ipw_priv *priv,
4161 struct ipw_rx_notification *notif)
4162 {
4163 notif->size = le16_to_cpu(notif->size);
4164
4165 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4166
4167 switch (notif->subtype) {
4168 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4169 struct notif_association *assoc = &notif->u.assoc;
4170
4171 switch (assoc->state) {
4172 case CMAS_ASSOCIATED:{
4173 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4174 IPW_DL_ASSOC,
4175 "associated: '%s' " MAC_FMT
4176 " \n",
4177 escape_essid(priv->essid,
4178 priv->essid_len),
4179 MAC_ARG(priv->bssid));
4180
4181 switch (priv->ieee->iw_mode) {
4182 case IW_MODE_INFRA:
4183 memcpy(priv->ieee->bssid,
4184 priv->bssid, ETH_ALEN);
4185 break;
4186
4187 case IW_MODE_ADHOC:
4188 memcpy(priv->ieee->bssid,
4189 priv->bssid, ETH_ALEN);
4190
4191 /* clear out the station table */
4192 priv->num_stations = 0;
4193
4194 IPW_DEBUG_ASSOC
4195 ("queueing adhoc check\n");
4196 queue_delayed_work(priv->
4197 workqueue,
4198 &priv->
4199 adhoc_check,
4200 priv->
4201 assoc_request.
4202 beacon_interval);
4203 break;
4204 }
4205
4206 priv->status &= ~STATUS_ASSOCIATING;
4207 priv->status |= STATUS_ASSOCIATED;
4208 queue_work(priv->workqueue,
4209 &priv->system_config);
4210
4211 #ifdef CONFIG_IPW_QOS
4212 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4213 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4214 if ((priv->status & STATUS_AUTH) &&
4215 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4216 == IEEE80211_STYPE_ASSOC_RESP)) {
4217 if ((sizeof
4218 (struct
4219 ieee80211_assoc_response)
4220 <= notif->size)
4221 && (notif->size <= 2314)) {
4222 struct
4223 ieee80211_rx_stats
4224 stats = {
4225 .len =
4226 notif->
4227 size - 1,
4228 };
4229
4230 IPW_DEBUG_QOS
4231 ("QoS Associate "
4232 "size %d\n",
4233 notif->size);
4234 ieee80211_rx_mgt(priv->
4235 ieee,
4236 (struct
4237 ieee80211_hdr_4addr
4238 *)
4239 &notif->u.raw, &stats);
4240 }
4241 }
4242 #endif
4243
4244 schedule_work(&priv->link_up);
4245
4246 break;
4247 }
4248
4249 case CMAS_AUTHENTICATED:{
4250 if (priv->
4251 status & (STATUS_ASSOCIATED |
4252 STATUS_AUTH)) {
4253 #ifdef CONFIG_IPW2200_DEBUG
4254 struct notif_authenticate *auth
4255 = &notif->u.auth;
4256 IPW_DEBUG(IPW_DL_NOTIF |
4257 IPW_DL_STATE |
4258 IPW_DL_ASSOC,
4259 "deauthenticated: '%s' "
4260 MAC_FMT
4261 ": (0x%04X) - %s \n",
4262 escape_essid(priv->
4263 essid,
4264 priv->
4265 essid_len),
4266 MAC_ARG(priv->bssid),
4267 ntohs(auth->status),
4268 ipw_get_status_code
4269 (ntohs
4270 (auth->status)));
4271 #endif
4272
4273 priv->status &=
4274 ~(STATUS_ASSOCIATING |
4275 STATUS_AUTH |
4276 STATUS_ASSOCIATED);
4277
4278 schedule_work(&priv->link_down);
4279 break;
4280 }
4281
4282 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4283 IPW_DL_ASSOC,
4284 "authenticated: '%s' " MAC_FMT
4285 "\n",
4286 escape_essid(priv->essid,
4287 priv->essid_len),
4288 MAC_ARG(priv->bssid));
4289 break;
4290 }
4291
4292 case CMAS_INIT:{
4293 if (priv->status & STATUS_AUTH) {
4294 struct
4295 ieee80211_assoc_response
4296 *resp;
4297 resp =
4298 (struct
4299 ieee80211_assoc_response
4300 *)&notif->u.raw;
4301 IPW_DEBUG(IPW_DL_NOTIF |
4302 IPW_DL_STATE |
4303 IPW_DL_ASSOC,
4304 "association failed (0x%04X): %s\n",
4305 ntohs(resp->status),
4306 ipw_get_status_code
4307 (ntohs
4308 (resp->status)));
4309 }
4310
4311 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4312 IPW_DL_ASSOC,
4313 "disassociated: '%s' " MAC_FMT
4314 " \n",
4315 escape_essid(priv->essid,
4316 priv->essid_len),
4317 MAC_ARG(priv->bssid));
4318
4319 priv->status &=
4320 ~(STATUS_DISASSOCIATING |
4321 STATUS_ASSOCIATING |
4322 STATUS_ASSOCIATED | STATUS_AUTH);
4323 if (priv->assoc_network
4324 && (priv->assoc_network->
4325 capability &
4326 WLAN_CAPABILITY_IBSS))
4327 ipw_remove_current_network
4328 (priv);
4329
4330 schedule_work(&priv->link_down);
4331
4332 break;
4333 }
4334
4335 case CMAS_RX_ASSOC_RESP:
4336 break;
4337
4338 default:
4339 IPW_ERROR("assoc: unknown (%d)\n",
4340 assoc->state);
4341 break;
4342 }
4343
4344 break;
4345 }
4346
4347 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4348 struct notif_authenticate *auth = &notif->u.auth;
4349 switch (auth->state) {
4350 case CMAS_AUTHENTICATED:
4351 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4352 "authenticated: '%s' " MAC_FMT " \n",
4353 escape_essid(priv->essid,
4354 priv->essid_len),
4355 MAC_ARG(priv->bssid));
4356 priv->status |= STATUS_AUTH;
4357 break;
4358
4359 case CMAS_INIT:
4360 if (priv->status & STATUS_AUTH) {
4361 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4362 IPW_DL_ASSOC,
4363 "authentication failed (0x%04X): %s\n",
4364 ntohs(auth->status),
4365 ipw_get_status_code(ntohs
4366 (auth->
4367 status)));
4368 }
4369 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4370 IPW_DL_ASSOC,
4371 "deauthenticated: '%s' " MAC_FMT "\n",
4372 escape_essid(priv->essid,
4373 priv->essid_len),
4374 MAC_ARG(priv->bssid));
4375
4376 priv->status &= ~(STATUS_ASSOCIATING |
4377 STATUS_AUTH |
4378 STATUS_ASSOCIATED);
4379
4380 schedule_work(&priv->link_down);
4381 break;
4382
4383 case CMAS_TX_AUTH_SEQ_1:
4384 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4385 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4386 break;
4387 case CMAS_RX_AUTH_SEQ_2:
4388 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4389 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4390 break;
4391 case CMAS_AUTH_SEQ_1_PASS:
4392 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4393 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4394 break;
4395 case CMAS_AUTH_SEQ_1_FAIL:
4396 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4397 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4398 break;
4399 case CMAS_TX_AUTH_SEQ_3:
4400 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4401 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4402 break;
4403 case CMAS_RX_AUTH_SEQ_4:
4404 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4405 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4406 break;
4407 case CMAS_AUTH_SEQ_2_PASS:
4408 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4409 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4410 break;
4411 case CMAS_AUTH_SEQ_2_FAIL:
4412 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4413 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4414 break;
4415 case CMAS_TX_ASSOC:
4416 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4417 IPW_DL_ASSOC, "TX_ASSOC\n");
4418 break;
4419 case CMAS_RX_ASSOC_RESP:
4420 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4421 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4422
4423 break;
4424 case CMAS_ASSOCIATED:
4425 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4426 IPW_DL_ASSOC, "ASSOCIATED\n");
4427 break;
4428 default:
4429 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4430 auth->state);
4431 break;
4432 }
4433 break;
4434 }
4435
4436 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4437 struct notif_channel_result *x =
4438 &notif->u.channel_result;
4439
4440 if (notif->size == sizeof(*x)) {
4441 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4442 x->channel_num);
4443 } else {
4444 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4445 "(should be %zd)\n",
4446 notif->size, sizeof(*x));
4447 }
4448 break;
4449 }
4450
4451 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4452 struct notif_scan_complete *x = &notif->u.scan_complete;
4453 if (notif->size == sizeof(*x)) {
4454 IPW_DEBUG_SCAN
4455 ("Scan completed: type %d, %d channels, "
4456 "%d status\n", x->scan_type,
4457 x->num_channels, x->status);
4458 } else {
4459 IPW_ERROR("Scan completed of wrong size %d "
4460 "(should be %zd)\n",
4461 notif->size, sizeof(*x));
4462 }
4463
4464 priv->status &=
4465 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4466
4467 wake_up_interruptible(&priv->wait_state);
4468 cancel_delayed_work(&priv->scan_check);
4469
4470 if (priv->status & STATUS_EXIT_PENDING)
4471 break;
4472
4473 priv->ieee->scans++;
4474
4475 #ifdef CONFIG_IPW2200_MONITOR
4476 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4477 priv->status |= STATUS_SCAN_FORCED;
4478 queue_work(priv->workqueue,
4479 &priv->request_scan);
4480 break;
4481 }
4482 priv->status &= ~STATUS_SCAN_FORCED;
4483 #endif /* CONFIG_IPW2200_MONITOR */
4484
4485 if (!(priv->status & (STATUS_ASSOCIATED |
4486 STATUS_ASSOCIATING |
4487 STATUS_ROAMING |
4488 STATUS_DISASSOCIATING)))
4489 queue_work(priv->workqueue, &priv->associate);
4490 else if (priv->status & STATUS_ROAMING) {
4491 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4492 /* If a scan completed and we are in roam mode, then
4493 * the scan that completed was the one requested as a
4494 * result of entering roam... so, schedule the
4495 * roam work */
4496 queue_work(priv->workqueue,
4497 &priv->roam);
4498 else
4499 /* Don't schedule if we aborted the scan */
4500 priv->status &= ~STATUS_ROAMING;
4501 } else if (priv->status & STATUS_SCAN_PENDING)
4502 queue_work(priv->workqueue,
4503 &priv->request_scan);
4504 else if (priv->config & CFG_BACKGROUND_SCAN
4505 && priv->status & STATUS_ASSOCIATED)
4506 queue_delayed_work(priv->workqueue,
4507 &priv->request_scan, HZ);
4508 break;
4509 }
4510
4511 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4512 struct notif_frag_length *x = &notif->u.frag_len;
4513
4514 if (notif->size == sizeof(*x))
4515 IPW_ERROR("Frag length: %d\n",
4516 le16_to_cpu(x->frag_length));
4517 else
4518 IPW_ERROR("Frag length of wrong size %d "
4519 "(should be %zd)\n",
4520 notif->size, sizeof(*x));
4521 break;
4522 }
4523
4524 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4525 struct notif_link_deterioration *x =
4526 &notif->u.link_deterioration;
4527
4528 if (notif->size == sizeof(*x)) {
4529 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4530 "link deterioration: '%s' " MAC_FMT
4531 " \n", escape_essid(priv->essid,
4532 priv->essid_len),
4533 MAC_ARG(priv->bssid));
4534 memcpy(&priv->last_link_deterioration, x,
4535 sizeof(*x));
4536 } else {
4537 IPW_ERROR("Link Deterioration of wrong size %d "
4538 "(should be %zd)\n",
4539 notif->size, sizeof(*x));
4540 }
4541 break;
4542 }
4543
4544 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4545 IPW_ERROR("Dino config\n");
4546 if (priv->hcmd
4547 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4548 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4549
4550 break;
4551 }
4552
4553 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4554 struct notif_beacon_state *x = &notif->u.beacon_state;
4555 if (notif->size != sizeof(*x)) {
4556 IPW_ERROR
4557 ("Beacon state of wrong size %d (should "
4558 "be %zd)\n", notif->size, sizeof(*x));
4559 break;
4560 }
4561
4562 if (le32_to_cpu(x->state) ==
4563 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4564 ipw_handle_missed_beacon(priv,
4565 le32_to_cpu(x->
4566 number));
4567
4568 break;
4569 }
4570
4571 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4572 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4573 if (notif->size == sizeof(*x)) {
4574 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4575 "0x%02x station %d\n",
4576 x->key_state, x->security_type,
4577 x->station_index);
4578 break;
4579 }
4580
4581 IPW_ERROR
4582 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4583 notif->size, sizeof(*x));
4584 break;
4585 }
4586
4587 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4588 struct notif_calibration *x = &notif->u.calibration;
4589
4590 if (notif->size == sizeof(*x)) {
4591 memcpy(&priv->calib, x, sizeof(*x));
4592 IPW_DEBUG_INFO("TODO: Calibration\n");
4593 break;
4594 }
4595
4596 IPW_ERROR
4597 ("Calibration of wrong size %d (should be %zd)\n",
4598 notif->size, sizeof(*x));
4599 break;
4600 }
4601
4602 case HOST_NOTIFICATION_NOISE_STATS:{
4603 if (notif->size == sizeof(u32)) {
4604 priv->last_noise =
4605 (u8) (le32_to_cpu(notif->u.noise.value) &
4606 0xff);
4607 average_add(&priv->average_noise,
4608 priv->last_noise);
4609 break;
4610 }
4611
4612 IPW_ERROR
4613 ("Noise stat is wrong size %d (should be %zd)\n",
4614 notif->size, sizeof(u32));
4615 break;
4616 }
4617
4618 default:
4619 IPW_ERROR("Unknown notification: "
4620 "subtype=%d,flags=0x%2x,size=%d\n",
4621 notif->subtype, notif->flags, notif->size);
4622 }
4623 }
4624
4625 /**
4626 * Destroys all DMA structures and initialise them again
4627 *
4628 * @param priv
4629 * @return error code
4630 */
4631 static int ipw_queue_reset(struct ipw_priv *priv)
4632 {
4633 int rc = 0;
4634 /** @todo customize queue sizes */
4635 int nTx = 64, nTxCmd = 8;
4636 ipw_tx_queue_free(priv);
4637 /* Tx CMD queue */
4638 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4639 IPW_TX_CMD_QUEUE_READ_INDEX,
4640 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4641 IPW_TX_CMD_QUEUE_BD_BASE,
4642 IPW_TX_CMD_QUEUE_BD_SIZE);
4643 if (rc) {
4644 IPW_ERROR("Tx Cmd queue init failed\n");
4645 goto error;
4646 }
4647 /* Tx queue(s) */
4648 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4649 IPW_TX_QUEUE_0_READ_INDEX,
4650 IPW_TX_QUEUE_0_WRITE_INDEX,
4651 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4652 if (rc) {
4653 IPW_ERROR("Tx 0 queue init failed\n");
4654 goto error;
4655 }
4656 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4657 IPW_TX_QUEUE_1_READ_INDEX,
4658 IPW_TX_QUEUE_1_WRITE_INDEX,
4659 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4660 if (rc) {
4661 IPW_ERROR("Tx 1 queue init failed\n");
4662 goto error;
4663 }
4664 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4665 IPW_TX_QUEUE_2_READ_INDEX,
4666 IPW_TX_QUEUE_2_WRITE_INDEX,
4667 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4668 if (rc) {
4669 IPW_ERROR("Tx 2 queue init failed\n");
4670 goto error;
4671 }
4672 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4673 IPW_TX_QUEUE_3_READ_INDEX,
4674 IPW_TX_QUEUE_3_WRITE_INDEX,
4675 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4676 if (rc) {
4677 IPW_ERROR("Tx 3 queue init failed\n");
4678 goto error;
4679 }
4680 /* statistics */
4681 priv->rx_bufs_min = 0;
4682 priv->rx_pend_max = 0;
4683 return rc;
4684
4685 error:
4686 ipw_tx_queue_free(priv);
4687 return rc;
4688 }
4689
4690 /**
4691 * Reclaim Tx queue entries no more used by NIC.
4692 *
4693 * When FW adwances 'R' index, all entries between old and
4694 * new 'R' index need to be reclaimed. As result, some free space
4695 * forms. If there is enough free space (> low mark), wake Tx queue.
4696 *
4697 * @note Need to protect against garbage in 'R' index
4698 * @param priv
4699 * @param txq
4700 * @param qindex
4701 * @return Number of used entries remains in the queue
4702 */
4703 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4704 struct clx2_tx_queue *txq, int qindex)
4705 {
4706 u32 hw_tail;
4707 int used;
4708 struct clx2_queue *q = &txq->q;
4709
4710 hw_tail = ipw_read32(priv, q->reg_r);
4711 if (hw_tail >= q->n_bd) {
4712 IPW_ERROR
4713 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4714 hw_tail, q->n_bd);
4715 goto done;
4716 }
4717 for (; q->last_used != hw_tail;
4718 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4719 ipw_queue_tx_free_tfd(priv, txq);
4720 priv->tx_packets++;
4721 }
4722 done:
4723 if ((ipw_queue_space(q) > q->low_mark) &&
4724 (qindex >= 0) &&
4725 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4726 netif_wake_queue(priv->net_dev);
4727 used = q->first_empty - q->last_used;
4728 if (used < 0)
4729 used += q->n_bd;
4730
4731 return used;
4732 }
4733
4734 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4735 int len, int sync)
4736 {
4737 struct clx2_tx_queue *txq = &priv->txq_cmd;
4738 struct clx2_queue *q = &txq->q;
4739 struct tfd_frame *tfd;
4740
4741 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4742 IPW_ERROR("No space for Tx\n");
4743 return -EBUSY;
4744 }
4745
4746 tfd = &txq->bd[q->first_empty];
4747 txq->txb[q->first_empty] = NULL;
4748
4749 memset(tfd, 0, sizeof(*tfd));
4750 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4751 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4752 priv->hcmd_seq++;
4753 tfd->u.cmd.index = hcmd;
4754 tfd->u.cmd.length = len;
4755 memcpy(tfd->u.cmd.payload, buf, len);
4756 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4757 ipw_write32(priv, q->reg_w, q->first_empty);
4758 _ipw_read32(priv, 0x90);
4759
4760 return 0;
4761 }
4762
4763 /*
4764 * Rx theory of operation
4765 *
4766 * The host allocates 32 DMA target addresses and passes the host address
4767 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4768 * 0 to 31
4769 *
4770 * Rx Queue Indexes
4771 * The host/firmware share two index registers for managing the Rx buffers.
4772 *
4773 * The READ index maps to the first position that the firmware may be writing
4774 * to -- the driver can read up to (but not including) this position and get
4775 * good data.
4776 * The READ index is managed by the firmware once the card is enabled.
4777 *
4778 * The WRITE index maps to the last position the driver has read from -- the
4779 * position preceding WRITE is the last slot the firmware can place a packet.
4780 *
4781 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4782 * WRITE = READ.
4783 *
4784 * During initialization the host sets up the READ queue position to the first
4785 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4786 *
4787 * When the firmware places a packet in a buffer it will advance the READ index
4788 * and fire the RX interrupt. The driver can then query the READ index and
4789 * process as many packets as possible, moving the WRITE index forward as it
4790 * resets the Rx queue buffers with new memory.
4791 *
4792 * The management in the driver is as follows:
4793 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
4794 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4795 * to replensish the ipw->rxq->rx_free.
4796 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4797 * ipw->rxq is replenished and the READ INDEX is updated (updating the
4798 * 'processed' and 'read' driver indexes as well)
4799 * + A received packet is processed and handed to the kernel network stack,
4800 * detached from the ipw->rxq. The driver 'processed' index is updated.
4801 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4802 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4803 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
4804 * were enough free buffers and RX_STALLED is set it is cleared.
4805 *
4806 *
4807 * Driver sequence:
4808 *
4809 * ipw_rx_queue_alloc() Allocates rx_free
4810 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
4811 * ipw_rx_queue_restock
4812 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
4813 * queue, updates firmware pointers, and updates
4814 * the WRITE index. If insufficient rx_free buffers
4815 * are available, schedules ipw_rx_queue_replenish
4816 *
4817 * -- enable interrupts --
4818 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
4819 * READ INDEX, detaching the SKB from the pool.
4820 * Moves the packet buffer from queue to rx_used.
4821 * Calls ipw_rx_queue_restock to refill any empty
4822 * slots.
4823 * ...
4824 *
4825 */
4826
4827 /*
4828 * If there are slots in the RX queue that need to be restocked,
4829 * and we have free pre-allocated buffers, fill the ranks as much
4830 * as we can pulling from rx_free.
4831 *
4832 * This moves the 'write' index forward to catch up with 'processed', and
4833 * also updates the memory address in the firmware to reference the new
4834 * target buffer.
4835 */
4836 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4837 {
4838 struct ipw_rx_queue *rxq = priv->rxq;
4839 struct list_head *element;
4840 struct ipw_rx_mem_buffer *rxb;
4841 unsigned long flags;
4842 int write;
4843
4844 spin_lock_irqsave(&rxq->lock, flags);
4845 write = rxq->write;
4846 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
4847 element = rxq->rx_free.next;
4848 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4849 list_del(element);
4850
4851 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
4852 rxb->dma_addr);
4853 rxq->queue[rxq->write] = rxb;
4854 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
4855 rxq->free_count--;
4856 }
4857 spin_unlock_irqrestore(&rxq->lock, flags);
4858
4859 /* If the pre-allocated buffer pool is dropping low, schedule to
4860 * refill it */
4861 if (rxq->free_count <= RX_LOW_WATERMARK)
4862 queue_work(priv->workqueue, &priv->rx_replenish);
4863
4864 /* If we've added more space for the firmware to place data, tell it */
4865 if (write != rxq->write)
4866 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
4867 }
4868
4869 /*
4870 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
4871 * Also restock the Rx queue via ipw_rx_queue_restock.
4872 *
4873 * This is called as a scheduled work item (except for during intialization)
4874 */
4875 static void ipw_rx_queue_replenish(void *data)
4876 {
4877 struct ipw_priv *priv = data;
4878 struct ipw_rx_queue *rxq = priv->rxq;
4879 struct list_head *element;
4880 struct ipw_rx_mem_buffer *rxb;
4881 unsigned long flags;
4882
4883 spin_lock_irqsave(&rxq->lock, flags);
4884 while (!list_empty(&rxq->rx_used)) {
4885 element = rxq->rx_used.next;
4886 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
4887 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
4888 if (!rxb->skb) {
4889 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
4890 priv->net_dev->name);
4891 /* We don't reschedule replenish work here -- we will
4892 * call the restock method and if it still needs
4893 * more buffers it will schedule replenish */
4894 break;
4895 }
4896 list_del(element);
4897
4898 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
4899 rxb->dma_addr =
4900 pci_map_single(priv->pci_dev, rxb->skb->data,
4901 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4902
4903 list_add_tail(&rxb->list, &rxq->rx_free);
4904 rxq->free_count++;
4905 }
4906 spin_unlock_irqrestore(&rxq->lock, flags);
4907
4908 ipw_rx_queue_restock(priv);
4909 }
4910
4911 static void ipw_bg_rx_queue_replenish(void *data)
4912 {
4913 struct ipw_priv *priv = data;
4914 down(&priv->sem);
4915 ipw_rx_queue_replenish(data);
4916 up(&priv->sem);
4917 }
4918
4919 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4920 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
4921 * This free routine walks the list of POOL entries and if SKB is set to
4922 * non NULL it is unmapped and freed
4923 */
4924 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
4925 {
4926 int i;
4927
4928 if (!rxq)
4929 return;
4930
4931 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4932 if (rxq->pool[i].skb != NULL) {
4933 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
4934 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4935 dev_kfree_skb(rxq->pool[i].skb);
4936 }
4937 }
4938
4939 kfree(rxq);
4940 }
4941
4942 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4943 {
4944 struct ipw_rx_queue *rxq;
4945 int i;
4946
4947 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
4948 if (unlikely(!rxq)) {
4949 IPW_ERROR("memory allocation failed\n");
4950 return NULL;
4951 }
4952 spin_lock_init(&rxq->lock);
4953 INIT_LIST_HEAD(&rxq->rx_free);
4954 INIT_LIST_HEAD(&rxq->rx_used);
4955
4956 /* Fill the rx_used queue with _all_ of the Rx buffers */
4957 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4958 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4959
4960 /* Set us so that we have processed and used all buffers, but have
4961 * not restocked the Rx queue with fresh buffers */
4962 rxq->read = rxq->write = 0;
4963 rxq->processed = RX_QUEUE_SIZE - 1;
4964 rxq->free_count = 0;
4965
4966 return rxq;
4967 }
4968
4969 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4970 {
4971 rate &= ~IEEE80211_BASIC_RATE_MASK;
4972 if (ieee_mode == IEEE_A) {
4973 switch (rate) {
4974 case IEEE80211_OFDM_RATE_6MB:
4975 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4976 1 : 0;
4977 case IEEE80211_OFDM_RATE_9MB:
4978 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4979 1 : 0;
4980 case IEEE80211_OFDM_RATE_12MB:
4981 return priv->
4982 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4983 case IEEE80211_OFDM_RATE_18MB:
4984 return priv->
4985 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4986 case IEEE80211_OFDM_RATE_24MB:
4987 return priv->
4988 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4989 case IEEE80211_OFDM_RATE_36MB:
4990 return priv->
4991 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4992 case IEEE80211_OFDM_RATE_48MB:
4993 return priv->
4994 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4995 case IEEE80211_OFDM_RATE_54MB:
4996 return priv->
4997 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4998 default:
4999 return 0;
5000 }
5001 }
5002
5003 /* B and G mixed */
5004 switch (rate) {
5005 case IEEE80211_CCK_RATE_1MB:
5006 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5007 case IEEE80211_CCK_RATE_2MB:
5008 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5009 case IEEE80211_CCK_RATE_5MB:
5010 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5011 case IEEE80211_CCK_RATE_11MB:
5012 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5013 }
5014
5015 /* If we are limited to B modulations, bail at this point */
5016 if (ieee_mode == IEEE_B)
5017 return 0;
5018
5019 /* G */
5020 switch (rate) {
5021 case IEEE80211_OFDM_RATE_6MB:
5022 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5023 case IEEE80211_OFDM_RATE_9MB:
5024 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5025 case IEEE80211_OFDM_RATE_12MB:
5026 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5027 case IEEE80211_OFDM_RATE_18MB:
5028 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5029 case IEEE80211_OFDM_RATE_24MB:
5030 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5031 case IEEE80211_OFDM_RATE_36MB:
5032 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5033 case IEEE80211_OFDM_RATE_48MB:
5034 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5035 case IEEE80211_OFDM_RATE_54MB:
5036 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5037 }
5038
5039 return 0;
5040 }
5041
5042 static int ipw_compatible_rates(struct ipw_priv *priv,
5043 const struct ieee80211_network *network,
5044 struct ipw_supported_rates *rates)
5045 {
5046 int num_rates, i;
5047
5048 memset(rates, 0, sizeof(*rates));
5049 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5050 rates->num_rates = 0;
5051 for (i = 0; i < num_rates; i++) {
5052 if (!ipw_is_rate_in_mask(priv, network->mode,
5053 network->rates[i])) {
5054
5055 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5056 IPW_DEBUG_SCAN("Adding masked mandatory "
5057 "rate %02X\n",
5058 network->rates[i]);
5059 rates->supported_rates[rates->num_rates++] =
5060 network->rates[i];
5061 continue;
5062 }
5063
5064 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5065 network->rates[i], priv->rates_mask);
5066 continue;
5067 }
5068
5069 rates->supported_rates[rates->num_rates++] = network->rates[i];
5070 }
5071
5072 num_rates = min(network->rates_ex_len,
5073 (u8) (IPW_MAX_RATES - num_rates));
5074 for (i = 0; i < num_rates; i++) {
5075 if (!ipw_is_rate_in_mask(priv, network->mode,
5076 network->rates_ex[i])) {
5077 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5078 IPW_DEBUG_SCAN("Adding masked mandatory "
5079 "rate %02X\n",
5080 network->rates_ex[i]);
5081 rates->supported_rates[rates->num_rates++] =
5082 network->rates[i];
5083 continue;
5084 }
5085
5086 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5087 network->rates_ex[i], priv->rates_mask);
5088 continue;
5089 }
5090
5091 rates->supported_rates[rates->num_rates++] =
5092 network->rates_ex[i];
5093 }
5094
5095 return 1;
5096 }
5097
5098 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5099 const struct ipw_supported_rates *src)
5100 {
5101 u8 i;
5102 for (i = 0; i < src->num_rates; i++)
5103 dest->supported_rates[i] = src->supported_rates[i];
5104 dest->num_rates = src->num_rates;
5105 }
5106
5107 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5108 * mask should ever be used -- right now all callers to add the scan rates are
5109 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5110 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5111 u8 modulation, u32 rate_mask)
5112 {
5113 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5114 IEEE80211_BASIC_RATE_MASK : 0;
5115
5116 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5117 rates->supported_rates[rates->num_rates++] =
5118 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5119
5120 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5121 rates->supported_rates[rates->num_rates++] =
5122 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5123
5124 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5125 rates->supported_rates[rates->num_rates++] = basic_mask |
5126 IEEE80211_CCK_RATE_5MB;
5127
5128 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5129 rates->supported_rates[rates->num_rates++] = basic_mask |
5130 IEEE80211_CCK_RATE_11MB;
5131 }
5132
5133 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5134 u8 modulation, u32 rate_mask)
5135 {
5136 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5137 IEEE80211_BASIC_RATE_MASK : 0;
5138
5139 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5140 rates->supported_rates[rates->num_rates++] = basic_mask |
5141 IEEE80211_OFDM_RATE_6MB;
5142
5143 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5144 rates->supported_rates[rates->num_rates++] =
5145 IEEE80211_OFDM_RATE_9MB;
5146
5147 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5148 rates->supported_rates[rates->num_rates++] = basic_mask |
5149 IEEE80211_OFDM_RATE_12MB;
5150
5151 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5152 rates->supported_rates[rates->num_rates++] =
5153 IEEE80211_OFDM_RATE_18MB;
5154
5155 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5156 rates->supported_rates[rates->num_rates++] = basic_mask |
5157 IEEE80211_OFDM_RATE_24MB;
5158
5159 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5160 rates->supported_rates[rates->num_rates++] =
5161 IEEE80211_OFDM_RATE_36MB;
5162
5163 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5164 rates->supported_rates[rates->num_rates++] =
5165 IEEE80211_OFDM_RATE_48MB;
5166
5167 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5168 rates->supported_rates[rates->num_rates++] =
5169 IEEE80211_OFDM_RATE_54MB;
5170 }
5171
5172 struct ipw_network_match {
5173 struct ieee80211_network *network;
5174 struct ipw_supported_rates rates;
5175 };
5176
5177 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5178 struct ipw_network_match *match,
5179 struct ieee80211_network *network,
5180 int roaming)
5181 {
5182 struct ipw_supported_rates rates;
5183
5184 /* Verify that this network's capability is compatible with the
5185 * current mode (AdHoc or Infrastructure) */
5186 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5187 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5188 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5189 "capability mismatch.\n",
5190 escape_essid(network->ssid, network->ssid_len),
5191 MAC_ARG(network->bssid));
5192 return 0;
5193 }
5194
5195 /* If we do not have an ESSID for this AP, we can not associate with
5196 * it */
5197 if (network->flags & NETWORK_EMPTY_ESSID) {
5198 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5199 "because of hidden ESSID.\n",
5200 escape_essid(network->ssid, network->ssid_len),
5201 MAC_ARG(network->bssid));
5202 return 0;
5203 }
5204
5205 if (unlikely(roaming)) {
5206 /* If we are roaming, then ensure check if this is a valid
5207 * network to try and roam to */
5208 if ((network->ssid_len != match->network->ssid_len) ||
5209 memcmp(network->ssid, match->network->ssid,
5210 network->ssid_len)) {
5211 IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5212 "because of non-network ESSID.\n",
5213 escape_essid(network->ssid,
5214 network->ssid_len),
5215 MAC_ARG(network->bssid));
5216 return 0;
5217 }
5218 } else {
5219 /* If an ESSID has been configured then compare the broadcast
5220 * ESSID to ours */
5221 if ((priv->config & CFG_STATIC_ESSID) &&
5222 ((network->ssid_len != priv->essid_len) ||
5223 memcmp(network->ssid, priv->essid,
5224 min(network->ssid_len, priv->essid_len)))) {
5225 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5226
5227 strncpy(escaped,
5228 escape_essid(network->ssid, network->ssid_len),
5229 sizeof(escaped));
5230 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5231 "because of ESSID mismatch: '%s'.\n",
5232 escaped, MAC_ARG(network->bssid),
5233 escape_essid(priv->essid,
5234 priv->essid_len));
5235 return 0;
5236 }
5237 }
5238
5239 /* If the old network rate is better than this one, don't bother
5240 * testing everything else. */
5241
5242 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5243 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5244 "current network.\n",
5245 escape_essid(match->network->ssid,
5246 match->network->ssid_len));
5247 return 0;
5248 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5249 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5250 "current network.\n",
5251 escape_essid(match->network->ssid,
5252 match->network->ssid_len));
5253 return 0;
5254 }
5255
5256 /* Now go through and see if the requested network is valid... */
5257 if (priv->ieee->scan_age != 0 &&
5258 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5259 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5260 "because of age: %lums.\n",
5261 escape_essid(network->ssid, network->ssid_len),
5262 MAC_ARG(network->bssid),
5263 1000 * (jiffies - network->last_scanned) / HZ);
5264 return 0;
5265 }
5266
5267 if ((priv->config & CFG_STATIC_CHANNEL) &&
5268 (network->channel != priv->channel)) {
5269 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5270 "because of channel mismatch: %d != %d.\n",
5271 escape_essid(network->ssid, network->ssid_len),
5272 MAC_ARG(network->bssid),
5273 network->channel, priv->channel);
5274 return 0;
5275 }
5276
5277 /* Verify privacy compatability */
5278 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5279 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5280 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5281 "because of privacy mismatch: %s != %s.\n",
5282 escape_essid(network->ssid, network->ssid_len),
5283 MAC_ARG(network->bssid),
5284 priv->
5285 capability & CAP_PRIVACY_ON ? "on" : "off",
5286 network->
5287 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5288 "off");
5289 return 0;
5290 }
5291
5292 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5293 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5294 "because of the same BSSID match: " MAC_FMT
5295 ".\n", escape_essid(network->ssid,
5296 network->ssid_len),
5297 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5298 return 0;
5299 }
5300
5301 /* Filter out any incompatible freq / mode combinations */
5302 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5303 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5304 "because of invalid frequency/mode "
5305 "combination.\n",
5306 escape_essid(network->ssid, network->ssid_len),
5307 MAC_ARG(network->bssid));
5308 return 0;
5309 }
5310
5311 /* Ensure that the rates supported by the driver are compatible with
5312 * this AP, including verification of basic rates (mandatory) */
5313 if (!ipw_compatible_rates(priv, network, &rates)) {
5314 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5315 "because configured rate mask excludes "
5316 "AP mandatory rate.\n",
5317 escape_essid(network->ssid, network->ssid_len),
5318 MAC_ARG(network->bssid));
5319 return 0;
5320 }
5321
5322 if (rates.num_rates == 0) {
5323 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5324 "because of no compatible rates.\n",
5325 escape_essid(network->ssid, network->ssid_len),
5326 MAC_ARG(network->bssid));
5327 return 0;
5328 }
5329
5330 /* TODO: Perform any further minimal comparititive tests. We do not
5331 * want to put too much policy logic here; intelligent scan selection
5332 * should occur within a generic IEEE 802.11 user space tool. */
5333
5334 /* Set up 'new' AP to this network */
5335 ipw_copy_rates(&match->rates, &rates);
5336 match->network = network;
5337 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5338 escape_essid(network->ssid, network->ssid_len),
5339 MAC_ARG(network->bssid));
5340
5341 return 1;
5342 }
5343
5344 static void ipw_merge_adhoc_network(void *data)
5345 {
5346 struct ipw_priv *priv = data;
5347 struct ieee80211_network *network = NULL;
5348 struct ipw_network_match match = {
5349 .network = priv->assoc_network
5350 };
5351
5352 if ((priv->status & STATUS_ASSOCIATED) &&
5353 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5354 /* First pass through ROAM process -- look for a better
5355 * network */
5356 unsigned long flags;
5357
5358 spin_lock_irqsave(&priv->ieee->lock, flags);
5359 list_for_each_entry(network, &priv->ieee->network_list, list) {
5360 if (network != priv->assoc_network)
5361 ipw_find_adhoc_network(priv, &match, network,
5362 1);
5363 }
5364 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5365
5366 if (match.network == priv->assoc_network) {
5367 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5368 "merge to.\n");
5369 return;
5370 }
5371
5372 down(&priv->sem);
5373 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5374 IPW_DEBUG_MERGE("remove network %s\n",
5375 escape_essid(priv->essid,
5376 priv->essid_len));
5377 ipw_remove_current_network(priv);
5378 }
5379
5380 ipw_disassociate(priv);
5381 priv->assoc_network = match.network;
5382 up(&priv->sem);
5383 return;
5384 }
5385 }
5386
5387 static int ipw_best_network(struct ipw_priv *priv,
5388 struct ipw_network_match *match,
5389 struct ieee80211_network *network, int roaming)
5390 {
5391 struct ipw_supported_rates rates;
5392
5393 /* Verify that this network's capability is compatible with the
5394 * current mode (AdHoc or Infrastructure) */
5395 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5396 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5397 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5398 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5399 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5400 "capability mismatch.\n",
5401 escape_essid(network->ssid, network->ssid_len),
5402 MAC_ARG(network->bssid));
5403 return 0;
5404 }
5405
5406 /* If we do not have an ESSID for this AP, we can not associate with
5407 * it */
5408 if (network->flags & NETWORK_EMPTY_ESSID) {
5409 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5410 "because of hidden ESSID.\n",
5411 escape_essid(network->ssid, network->ssid_len),
5412 MAC_ARG(network->bssid));
5413 return 0;
5414 }
5415
5416 if (unlikely(roaming)) {
5417 /* If we are roaming, then ensure check if this is a valid
5418 * network to try and roam to */
5419 if ((network->ssid_len != match->network->ssid_len) ||
5420 memcmp(network->ssid, match->network->ssid,
5421 network->ssid_len)) {
5422 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5423 "because of non-network ESSID.\n",
5424 escape_essid(network->ssid,
5425 network->ssid_len),
5426 MAC_ARG(network->bssid));
5427 return 0;
5428 }
5429 } else {
5430 /* If an ESSID has been configured then compare the broadcast
5431 * ESSID to ours */
5432 if ((priv->config & CFG_STATIC_ESSID) &&
5433 ((network->ssid_len != priv->essid_len) ||
5434 memcmp(network->ssid, priv->essid,
5435 min(network->ssid_len, priv->essid_len)))) {
5436 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5437 strncpy(escaped,
5438 escape_essid(network->ssid, network->ssid_len),
5439 sizeof(escaped));
5440 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5441 "because of ESSID mismatch: '%s'.\n",
5442 escaped, MAC_ARG(network->bssid),
5443 escape_essid(priv->essid,
5444 priv->essid_len));
5445 return 0;
5446 }
5447 }
5448
5449 /* If the old network rate is better than this one, don't bother
5450 * testing everything else. */
5451 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5452 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5453 strncpy(escaped,
5454 escape_essid(network->ssid, network->ssid_len),
5455 sizeof(escaped));
5456 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5457 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5458 escaped, MAC_ARG(network->bssid),
5459 escape_essid(match->network->ssid,
5460 match->network->ssid_len),
5461 MAC_ARG(match->network->bssid));
5462 return 0;
5463 }
5464
5465 /* If this network has already had an association attempt within the
5466 * last 3 seconds, do not try and associate again... */
5467 if (network->last_associate &&
5468 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5469 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5470 "because of storming (%lus since last "
5471 "assoc attempt).\n",
5472 escape_essid(network->ssid, network->ssid_len),
5473 MAC_ARG(network->bssid),
5474 (jiffies - network->last_associate) / HZ);
5475 return 0;
5476 }
5477
5478 /* Now go through and see if the requested network is valid... */
5479 if (priv->ieee->scan_age != 0 &&
5480 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5481 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5482 "because of age: %lums.\n",
5483 escape_essid(network->ssid, network->ssid_len),
5484 MAC_ARG(network->bssid),
5485 1000 * (jiffies - network->last_scanned) / HZ);
5486 return 0;
5487 }
5488
5489 if ((priv->config & CFG_STATIC_CHANNEL) &&
5490 (network->channel != priv->channel)) {
5491 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5492 "because of channel mismatch: %d != %d.\n",
5493 escape_essid(network->ssid, network->ssid_len),
5494 MAC_ARG(network->bssid),
5495 network->channel, priv->channel);
5496 return 0;
5497 }
5498
5499 /* Verify privacy compatability */
5500 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5501 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5502 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5503 "because of privacy mismatch: %s != %s.\n",
5504 escape_essid(network->ssid, network->ssid_len),
5505 MAC_ARG(network->bssid),
5506 priv->capability & CAP_PRIVACY_ON ? "on" :
5507 "off",
5508 network->capability &
5509 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5510 return 0;
5511 }
5512
5513 if (!priv->ieee->wpa_enabled && (network->wpa_ie_len > 0 ||
5514 network->rsn_ie_len > 0)) {
5515 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5516 "because of WPA capability mismatch.\n",
5517 escape_essid(network->ssid, network->ssid_len),
5518 MAC_ARG(network->bssid));
5519 return 0;
5520 }
5521
5522 if ((priv->config & CFG_STATIC_BSSID) &&
5523 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5524 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5525 "because of BSSID mismatch: " MAC_FMT ".\n",
5526 escape_essid(network->ssid, network->ssid_len),
5527 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5528 return 0;
5529 }
5530
5531 /* Filter out any incompatible freq / mode combinations */
5532 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5533 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5534 "because of invalid frequency/mode "
5535 "combination.\n",
5536 escape_essid(network->ssid, network->ssid_len),
5537 MAC_ARG(network->bssid));
5538 return 0;
5539 }
5540
5541 /* Filter out invalid channel in current GEO */
5542 if (!ipw_is_valid_channel(priv->ieee, network->channel)) {
5543 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5544 "because of invalid channel in current GEO\n",
5545 escape_essid(network->ssid, network->ssid_len),
5546 MAC_ARG(network->bssid));
5547 return 0;
5548 }
5549
5550 /* Ensure that the rates supported by the driver are compatible with
5551 * this AP, including verification of basic rates (mandatory) */
5552 if (!ipw_compatible_rates(priv, network, &rates)) {
5553 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5554 "because configured rate mask excludes "
5555 "AP mandatory rate.\n",
5556 escape_essid(network->ssid, network->ssid_len),
5557 MAC_ARG(network->bssid));
5558 return 0;
5559 }
5560
5561 if (rates.num_rates == 0) {
5562 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5563 "because of no compatible rates.\n",
5564 escape_essid(network->ssid, network->ssid_len),
5565 MAC_ARG(network->bssid));
5566 return 0;
5567 }
5568
5569 /* TODO: Perform any further minimal comparititive tests. We do not
5570 * want to put too much policy logic here; intelligent scan selection
5571 * should occur within a generic IEEE 802.11 user space tool. */
5572
5573 /* Set up 'new' AP to this network */
5574 ipw_copy_rates(&match->rates, &rates);
5575 match->network = network;
5576
5577 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5578 escape_essid(network->ssid, network->ssid_len),
5579 MAC_ARG(network->bssid));
5580
5581 return 1;
5582 }
5583
5584 static void ipw_adhoc_create(struct ipw_priv *priv,
5585 struct ieee80211_network *network)
5586 {
5587 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
5588 int i;
5589
5590 /*
5591 * For the purposes of scanning, we can set our wireless mode
5592 * to trigger scans across combinations of bands, but when it
5593 * comes to creating a new ad-hoc network, we have tell the FW
5594 * exactly which band to use.
5595 *
5596 * We also have the possibility of an invalid channel for the
5597 * chossen band. Attempting to create a new ad-hoc network
5598 * with an invalid channel for wireless mode will trigger a
5599 * FW fatal error.
5600 *
5601 */
5602 switch (ipw_is_valid_channel(priv->ieee, priv->channel)) {
5603 case IEEE80211_52GHZ_BAND:
5604 network->mode = IEEE_A;
5605 i = ipw_channel_to_index(priv->ieee, priv->channel);
5606 if (i == -1)
5607 BUG();
5608 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5609 IPW_WARNING("Overriding invalid channel\n");
5610 priv->channel = geo->a[0].channel;
5611 }
5612 break;
5613
5614 case IEEE80211_24GHZ_BAND:
5615 if (priv->ieee->mode & IEEE_G)
5616 network->mode = IEEE_G;
5617 else
5618 network->mode = IEEE_B;
5619 i = ipw_channel_to_index(priv->ieee, priv->channel);
5620 if (i == -1)
5621 BUG();
5622 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5623 IPW_WARNING("Overriding invalid channel\n");
5624 priv->channel = geo->bg[0].channel;
5625 }
5626 break;
5627
5628 default:
5629 IPW_WARNING("Overriding invalid channel\n");
5630 if (priv->ieee->mode & IEEE_A) {
5631 network->mode = IEEE_A;
5632 priv->channel = geo->a[0].channel;
5633 } else if (priv->ieee->mode & IEEE_G) {
5634 network->mode = IEEE_G;
5635 priv->channel = geo->bg[0].channel;
5636 } else {
5637 network->mode = IEEE_B;
5638 priv->channel = geo->bg[0].channel;
5639 }
5640 break;
5641 }
5642
5643 network->channel = priv->channel;
5644 priv->config |= CFG_ADHOC_PERSIST;
5645 ipw_create_bssid(priv, network->bssid);
5646 network->ssid_len = priv->essid_len;
5647 memcpy(network->ssid, priv->essid, priv->essid_len);
5648 memset(&network->stats, 0, sizeof(network->stats));
5649 network->capability = WLAN_CAPABILITY_IBSS;
5650 if (!(priv->config & CFG_PREAMBLE_LONG))
5651 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5652 if (priv->capability & CAP_PRIVACY_ON)
5653 network->capability |= WLAN_CAPABILITY_PRIVACY;
5654 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5655 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5656 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5657 memcpy(network->rates_ex,
5658 &priv->rates.supported_rates[network->rates_len],
5659 network->rates_ex_len);
5660 network->last_scanned = 0;
5661 network->flags = 0;
5662 network->last_associate = 0;
5663 network->time_stamp[0] = 0;
5664 network->time_stamp[1] = 0;
5665 network->beacon_interval = 100; /* Default */
5666 network->listen_interval = 10; /* Default */
5667 network->atim_window = 0; /* Default */
5668 network->wpa_ie_len = 0;
5669 network->rsn_ie_len = 0;
5670 }
5671
5672 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5673 {
5674 struct ipw_tgi_tx_key *key;
5675 struct host_cmd cmd = {
5676 .cmd = IPW_CMD_TGI_TX_KEY,
5677 .len = sizeof(*key)
5678 };
5679
5680 if (!(priv->ieee->sec.flags & (1 << index)))
5681 return;
5682
5683 key = (struct ipw_tgi_tx_key *)&cmd.param;
5684 key->key_id = index;
5685 memcpy(key->key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5686 key->security_type = type;
5687 key->station_index = 0; /* always 0 for BSS */
5688 key->flags = 0;
5689 /* 0 for new key; previous value of counter (after fatal error) */
5690 key->tx_counter[0] = 0;
5691 key->tx_counter[1] = 0;
5692
5693 ipw_send_cmd(priv, &cmd);
5694 }
5695
5696 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5697 {
5698 struct ipw_wep_key *key;
5699 int i;
5700 struct host_cmd cmd = {
5701 .cmd = IPW_CMD_WEP_KEY,
5702 .len = sizeof(*key)
5703 };
5704
5705 key = (struct ipw_wep_key *)&cmd.param;
5706 key->cmd_id = DINO_CMD_WEP_KEY;
5707 key->seq_num = 0;
5708
5709 /* Note: AES keys cannot be set for multiple times.
5710 * Only set it at the first time. */
5711 for (i = 0; i < 4; i++) {
5712 key->key_index = i | type;
5713 if (!(priv->ieee->sec.flags & (1 << i))) {
5714 key->key_size = 0;
5715 continue;
5716 }
5717
5718 key->key_size = priv->ieee->sec.key_sizes[i];
5719 memcpy(key->key, priv->ieee->sec.keys[i], key->key_size);
5720
5721 ipw_send_cmd(priv, &cmd);
5722 }
5723 }
5724
5725 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5726 {
5727 if (priv->ieee->host_encrypt)
5728 return;
5729
5730 switch (level) {
5731 case SEC_LEVEL_3:
5732 priv->sys_config.disable_unicast_decryption = 0;
5733 priv->ieee->host_decrypt = 0;
5734 break;
5735 case SEC_LEVEL_2:
5736 priv->sys_config.disable_unicast_decryption = 1;
5737 priv->ieee->host_decrypt = 1;
5738 break;
5739 case SEC_LEVEL_1:
5740 priv->sys_config.disable_unicast_decryption = 0;
5741 priv->ieee->host_decrypt = 0;
5742 break;
5743 case SEC_LEVEL_0:
5744 priv->sys_config.disable_unicast_decryption = 1;
5745 break;
5746 default:
5747 break;
5748 }
5749 }
5750
5751 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5752 {
5753 if (priv->ieee->host_encrypt)
5754 return;
5755
5756 switch (level) {
5757 case SEC_LEVEL_3:
5758 priv->sys_config.disable_multicast_decryption = 0;
5759 break;
5760 case SEC_LEVEL_2:
5761 priv->sys_config.disable_multicast_decryption = 1;
5762 break;
5763 case SEC_LEVEL_1:
5764 priv->sys_config.disable_multicast_decryption = 0;
5765 break;
5766 case SEC_LEVEL_0:
5767 priv->sys_config.disable_multicast_decryption = 1;
5768 break;
5769 default:
5770 break;
5771 }
5772 }
5773
5774 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5775 {
5776 switch (priv->ieee->sec.level) {
5777 case SEC_LEVEL_3:
5778 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5779 ipw_send_tgi_tx_key(priv,
5780 DCT_FLAG_EXT_SECURITY_CCM,
5781 priv->ieee->sec.active_key);
5782
5783 if (!priv->ieee->host_mc_decrypt)
5784 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5785 break;
5786 case SEC_LEVEL_2:
5787 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5788 ipw_send_tgi_tx_key(priv,
5789 DCT_FLAG_EXT_SECURITY_TKIP,
5790 priv->ieee->sec.active_key);
5791 break;
5792 case SEC_LEVEL_1:
5793 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5794 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5795 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5796 break;
5797 case SEC_LEVEL_0:
5798 default:
5799 break;
5800 }
5801 }
5802
5803 static void ipw_adhoc_check(void *data)
5804 {
5805 struct ipw_priv *priv = data;
5806
5807 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5808 !(priv->config & CFG_ADHOC_PERSIST)) {
5809 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5810 IPW_DL_STATE | IPW_DL_ASSOC,
5811 "Missed beacon: %d - disassociate\n",
5812 priv->missed_adhoc_beacons);
5813 ipw_remove_current_network(priv);
5814 ipw_disassociate(priv);
5815 return;
5816 }
5817
5818 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5819 priv->assoc_request.beacon_interval);
5820 }
5821
5822 static void ipw_bg_adhoc_check(void *data)
5823 {
5824 struct ipw_priv *priv = data;
5825 down(&priv->sem);
5826 ipw_adhoc_check(data);
5827 up(&priv->sem);
5828 }
5829
5830 #ifdef CONFIG_IPW2200_DEBUG
5831 static void ipw_debug_config(struct ipw_priv *priv)
5832 {
5833 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
5834 "[CFG 0x%08X]\n", priv->config);
5835 if (priv->config & CFG_STATIC_CHANNEL)
5836 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
5837 else
5838 IPW_DEBUG_INFO("Channel unlocked.\n");
5839 if (priv->config & CFG_STATIC_ESSID)
5840 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
5841 escape_essid(priv->essid, priv->essid_len));
5842 else
5843 IPW_DEBUG_INFO("ESSID unlocked.\n");
5844 if (priv->config & CFG_STATIC_BSSID)
5845 IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
5846 MAC_ARG(priv->bssid));
5847 else
5848 IPW_DEBUG_INFO("BSSID unlocked.\n");
5849 if (priv->capability & CAP_PRIVACY_ON)
5850 IPW_DEBUG_INFO("PRIVACY on\n");
5851 else
5852 IPW_DEBUG_INFO("PRIVACY off\n");
5853 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
5854 }
5855 #else
5856 #define ipw_debug_config(x) do {} while (0)
5857 #endif
5858
5859 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
5860 {
5861 /* TODO: Verify that this works... */
5862 struct ipw_fixed_rate fr = {
5863 .tx_rates = priv->rates_mask
5864 };
5865 u32 reg;
5866 u16 mask = 0;
5867
5868 /* Identify 'current FW band' and match it with the fixed
5869 * Tx rates */
5870
5871 switch (priv->ieee->freq_band) {
5872 case IEEE80211_52GHZ_BAND: /* A only */
5873 /* IEEE_A */
5874 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
5875 /* Invalid fixed rate mask */
5876 IPW_DEBUG_WX
5877 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5878 fr.tx_rates = 0;
5879 break;
5880 }
5881
5882 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
5883 break;
5884
5885 default: /* 2.4Ghz or Mixed */
5886 /* IEEE_B */
5887 if (mode == IEEE_B) {
5888 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
5889 /* Invalid fixed rate mask */
5890 IPW_DEBUG_WX
5891 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5892 fr.tx_rates = 0;
5893 }
5894 break;
5895 }
5896
5897 /* IEEE_G */
5898 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
5899 IEEE80211_OFDM_RATES_MASK)) {
5900 /* Invalid fixed rate mask */
5901 IPW_DEBUG_WX
5902 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
5903 fr.tx_rates = 0;
5904 break;
5905 }
5906
5907 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
5908 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
5909 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
5910 }
5911
5912 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
5913 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
5914 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
5915 }
5916
5917 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
5918 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
5919 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
5920 }
5921
5922 fr.tx_rates |= mask;
5923 break;
5924 }
5925
5926 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
5927 ipw_write_reg32(priv, reg, *(u32 *) & fr);
5928 }
5929
5930 static void ipw_abort_scan(struct ipw_priv *priv)
5931 {
5932 int err;
5933
5934 if (priv->status & STATUS_SCAN_ABORTING) {
5935 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5936 return;
5937 }
5938 priv->status |= STATUS_SCAN_ABORTING;
5939
5940 err = ipw_send_scan_abort(priv);
5941 if (err)
5942 IPW_DEBUG_HC("Request to abort scan failed.\n");
5943 }
5944
5945 static void ipw_add_scan_channels(struct ipw_priv *priv,
5946 struct ipw_scan_request_ext *scan,
5947 int scan_type)
5948 {
5949 int channel_index = 0;
5950 const struct ieee80211_geo *geo;
5951 int i;
5952
5953 geo = ipw_get_geo(priv->ieee);
5954
5955 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5956 int start = channel_index;
5957 for (i = 0; i < geo->a_channels; i++) {
5958 if ((priv->status & STATUS_ASSOCIATED) &&
5959 geo->a[i].channel == priv->channel)
5960 continue;
5961 channel_index++;
5962 scan->channels_list[channel_index] = geo->a[i].channel;
5963 ipw_set_scan_type(scan, channel_index,
5964 geo->a[i].
5965 flags & IEEE80211_CH_PASSIVE_ONLY ?
5966 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
5967 scan_type);
5968 }
5969
5970 if (start != channel_index) {
5971 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
5972 (channel_index - start);
5973 channel_index++;
5974 }
5975 }
5976
5977 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5978 int start = channel_index;
5979 if (priv->config & CFG_SPEED_SCAN) {
5980 int index;
5981 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
5982 /* nop out the list */
5983 [0] = 0
5984 };
5985
5986 u8 channel;
5987 while (channel_index < IPW_SCAN_CHANNELS) {
5988 channel =
5989 priv->speed_scan[priv->speed_scan_pos];
5990 if (channel == 0) {
5991 priv->speed_scan_pos = 0;
5992 channel = priv->speed_scan[0];
5993 }
5994 if ((priv->status & STATUS_ASSOCIATED) &&
5995 channel == priv->channel) {
5996 priv->speed_scan_pos++;
5997 continue;
5998 }
5999
6000 /* If this channel has already been
6001 * added in scan, break from loop
6002 * and this will be the first channel
6003 * in the next scan.
6004 */
6005 if (channels[channel - 1] != 0)
6006 break;
6007
6008 channels[channel - 1] = 1;
6009 priv->speed_scan_pos++;
6010 channel_index++;
6011 scan->channels_list[channel_index] = channel;
6012 index =
6013 ipw_channel_to_index(priv->ieee, channel);
6014 ipw_set_scan_type(scan, channel_index,
6015 geo->bg[index].
6016 flags &
6017 IEEE80211_CH_PASSIVE_ONLY ?
6018 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6019 : scan_type);
6020 }
6021 } else {
6022 for (i = 0; i < geo->bg_channels; i++) {
6023 if ((priv->status & STATUS_ASSOCIATED) &&
6024 geo->bg[i].channel == priv->channel)
6025 continue;
6026 channel_index++;
6027 scan->channels_list[channel_index] =
6028 geo->bg[i].channel;
6029 ipw_set_scan_type(scan, channel_index,
6030 geo->bg[i].
6031 flags &
6032 IEEE80211_CH_PASSIVE_ONLY ?
6033 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6034 : scan_type);
6035 }
6036 }
6037
6038 if (start != channel_index) {
6039 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6040 (channel_index - start);
6041 }
6042 }
6043 }
6044
6045 static int ipw_request_scan(struct ipw_priv *priv)
6046 {
6047 struct ipw_scan_request_ext scan;
6048 int err = 0, scan_type;
6049
6050 if (!(priv->status & STATUS_INIT) ||
6051 (priv->status & STATUS_EXIT_PENDING))
6052 return 0;
6053
6054 down(&priv->sem);
6055
6056 if (priv->status & STATUS_SCANNING) {
6057 IPW_DEBUG_HC("Concurrent scan requested. Ignoring.\n");
6058 priv->status |= STATUS_SCAN_PENDING;
6059 goto done;
6060 }
6061
6062 if (!(priv->status & STATUS_SCAN_FORCED) &&
6063 priv->status & STATUS_SCAN_ABORTING) {
6064 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6065 priv->status |= STATUS_SCAN_PENDING;
6066 goto done;
6067 }
6068
6069 if (priv->status & STATUS_RF_KILL_MASK) {
6070 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6071 priv->status |= STATUS_SCAN_PENDING;
6072 goto done;
6073 }
6074
6075 memset(&scan, 0, sizeof(scan));
6076
6077 if (priv->config & CFG_SPEED_SCAN)
6078 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6079 cpu_to_le16(30);
6080 else
6081 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6082 cpu_to_le16(20);
6083
6084 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6085 cpu_to_le16(20);
6086 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6087
6088 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6089
6090 #ifdef CONFIG_IPW2200_MONITOR
6091 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6092 u8 channel;
6093 u8 band = 0;
6094
6095 switch (ipw_is_valid_channel(priv->ieee, priv->channel)) {
6096 case IEEE80211_52GHZ_BAND:
6097 band = (u8) (IPW_A_MODE << 6) | 1;
6098 channel = priv->channel;
6099 break;
6100
6101 case IEEE80211_24GHZ_BAND:
6102 band = (u8) (IPW_B_MODE << 6) | 1;
6103 channel = priv->channel;
6104 break;
6105
6106 default:
6107 band = (u8) (IPW_B_MODE << 6) | 1;
6108 channel = 9;
6109 break;
6110 }
6111
6112 scan.channels_list[0] = band;
6113 scan.channels_list[1] = channel;
6114 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6115
6116 /* NOTE: The card will sit on this channel for this time
6117 * period. Scan aborts are timing sensitive and frequently
6118 * result in firmware restarts. As such, it is best to
6119 * set a small dwell_time here and just keep re-issuing
6120 * scans. Otherwise fast channel hopping will not actually
6121 * hop channels.
6122 *
6123 * TODO: Move SPEED SCAN support to all modes and bands */
6124 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6125 cpu_to_le16(2000);
6126 } else {
6127 #endif /* CONFIG_IPW2200_MONITOR */
6128 /* If we are roaming, then make this a directed scan for the
6129 * current network. Otherwise, ensure that every other scan
6130 * is a fast channel hop scan */
6131 if ((priv->status & STATUS_ROAMING)
6132 || (!(priv->status & STATUS_ASSOCIATED)
6133 && (priv->config & CFG_STATIC_ESSID)
6134 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6135 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6136 if (err) {
6137 IPW_DEBUG_HC("Attempt to send SSID command "
6138 "failed.\n");
6139 goto done;
6140 }
6141
6142 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6143 } else
6144 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6145
6146 ipw_add_scan_channels(priv, &scan, scan_type);
6147 #ifdef CONFIG_IPW2200_MONITOR
6148 }
6149 #endif
6150
6151 err = ipw_send_scan_request_ext(priv, &scan);
6152 if (err) {
6153 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6154 goto done;
6155 }
6156
6157 priv->status |= STATUS_SCANNING;
6158 priv->status &= ~STATUS_SCAN_PENDING;
6159 queue_delayed_work(priv->workqueue, &priv->scan_check,
6160 IPW_SCAN_CHECK_WATCHDOG);
6161 done:
6162 up(&priv->sem);
6163 return err;
6164 }
6165
6166 static void ipw_bg_abort_scan(void *data)
6167 {
6168 struct ipw_priv *priv = data;
6169 down(&priv->sem);
6170 ipw_abort_scan(data);
6171 up(&priv->sem);
6172 }
6173
6174 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6175 {
6176 /* This is called when wpa_supplicant loads and closes the driver
6177 * interface. */
6178 priv->ieee->wpa_enabled = value;
6179 return 0;
6180 }
6181
6182 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6183 {
6184 struct ieee80211_device *ieee = priv->ieee;
6185 struct ieee80211_security sec = {
6186 .flags = SEC_AUTH_MODE,
6187 };
6188 int ret = 0;
6189
6190 if (value & IW_AUTH_ALG_SHARED_KEY) {
6191 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6192 ieee->open_wep = 0;
6193 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6194 sec.auth_mode = WLAN_AUTH_OPEN;
6195 ieee->open_wep = 1;
6196 } else
6197 return -EINVAL;
6198
6199 if (ieee->set_security)
6200 ieee->set_security(ieee->dev, &sec);
6201 else
6202 ret = -EOPNOTSUPP;
6203
6204 return ret;
6205 }
6206
6207 void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, int wpa_ie_len)
6208 {
6209 /* make sure WPA is enabled */
6210 ipw_wpa_enable(priv, 1);
6211
6212 ipw_disassociate(priv);
6213 }
6214
6215 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6216 char *capabilities, int length)
6217 {
6218 struct host_cmd cmd = {
6219 .cmd = IPW_CMD_RSN_CAPABILITIES,
6220 .len = length,
6221 };
6222
6223 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6224
6225 memcpy(cmd.param, capabilities, length);
6226 return ipw_send_cmd(priv, &cmd);
6227 }
6228
6229 /*
6230 * WE-18 support
6231 */
6232
6233 /* SIOCSIWGENIE */
6234 static int ipw_wx_set_genie(struct net_device *dev,
6235 struct iw_request_info *info,
6236 union iwreq_data *wrqu, char *extra)
6237 {
6238 struct ipw_priv *priv = ieee80211_priv(dev);
6239 struct ieee80211_device *ieee = priv->ieee;
6240 u8 *buf;
6241 int err = 0;
6242
6243 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6244 (wrqu->data.length && extra == NULL))
6245 return -EINVAL;
6246
6247 //down(&priv->sem);
6248
6249 //if (!ieee->wpa_enabled) {
6250 // err = -EOPNOTSUPP;
6251 // goto out;
6252 //}
6253
6254 if (wrqu->data.length) {
6255 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6256 if (buf == NULL) {
6257 err = -ENOMEM;
6258 goto out;
6259 }
6260
6261 memcpy(buf, extra, wrqu->data.length);
6262 kfree(ieee->wpa_ie);
6263 ieee->wpa_ie = buf;
6264 ieee->wpa_ie_len = wrqu->data.length;
6265 } else {
6266 kfree(ieee->wpa_ie);
6267 ieee->wpa_ie = NULL;
6268 ieee->wpa_ie_len = 0;
6269 }
6270
6271 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6272 out:
6273 //up(&priv->sem);
6274 return err;
6275 }
6276
6277 /* SIOCGIWGENIE */
6278 static int ipw_wx_get_genie(struct net_device *dev,
6279 struct iw_request_info *info,
6280 union iwreq_data *wrqu, char *extra)
6281 {
6282 struct ipw_priv *priv = ieee80211_priv(dev);
6283 struct ieee80211_device *ieee = priv->ieee;
6284 int err = 0;
6285
6286 //down(&priv->sem);
6287
6288 //if (!ieee->wpa_enabled) {
6289 // err = -EOPNOTSUPP;
6290 // goto out;
6291 //}
6292
6293 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6294 wrqu->data.length = 0;
6295 goto out;
6296 }
6297
6298 if (wrqu->data.length < ieee->wpa_ie_len) {
6299 err = -E2BIG;
6300 goto out;
6301 }
6302
6303 wrqu->data.length = ieee->wpa_ie_len;
6304 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6305
6306 out:
6307 //up(&priv->sem);
6308 return err;
6309 }
6310
6311 static int wext_cipher2level(int cipher)
6312 {
6313 switch (cipher) {
6314 case IW_AUTH_CIPHER_NONE:
6315 return SEC_LEVEL_0;
6316 case IW_AUTH_CIPHER_WEP40:
6317 case IW_AUTH_CIPHER_WEP104:
6318 return SEC_LEVEL_1;
6319 case IW_AUTH_CIPHER_TKIP:
6320 return SEC_LEVEL_2;
6321 case IW_AUTH_CIPHER_CCMP:
6322 return SEC_LEVEL_3;
6323 default:
6324 return -1;
6325 }
6326 }
6327
6328 /* SIOCSIWAUTH */
6329 static int ipw_wx_set_auth(struct net_device *dev,
6330 struct iw_request_info *info,
6331 union iwreq_data *wrqu, char *extra)
6332 {
6333 struct ipw_priv *priv = ieee80211_priv(dev);
6334 struct ieee80211_device *ieee = priv->ieee;
6335 struct iw_param *param = &wrqu->param;
6336 struct ieee80211_crypt_data *crypt;
6337 unsigned long flags;
6338 int ret = 0;
6339
6340 switch (param->flags & IW_AUTH_INDEX) {
6341 case IW_AUTH_WPA_VERSION:
6342 break;
6343 case IW_AUTH_CIPHER_PAIRWISE:
6344 ipw_set_hw_decrypt_unicast(priv,
6345 wext_cipher2level(param->value));
6346 break;
6347 case IW_AUTH_CIPHER_GROUP:
6348 ipw_set_hw_decrypt_multicast(priv,
6349 wext_cipher2level(param->value));
6350 break;
6351 case IW_AUTH_KEY_MGMT:
6352 /*
6353 * ipw2200 does not use these parameters
6354 */
6355 break;
6356
6357 case IW_AUTH_TKIP_COUNTERMEASURES:
6358 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6359 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6360 break;
6361
6362 flags = crypt->ops->get_flags(crypt->priv);
6363
6364 if (param->value)
6365 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6366 else
6367 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6368
6369 crypt->ops->set_flags(flags, crypt->priv);
6370
6371 break;
6372
6373 case IW_AUTH_DROP_UNENCRYPTED:{
6374 /* HACK:
6375 *
6376 * wpa_supplicant calls set_wpa_enabled when the driver
6377 * is loaded and unloaded, regardless of if WPA is being
6378 * used. No other calls are made which can be used to
6379 * determine if encryption will be used or not prior to
6380 * association being expected. If encryption is not being
6381 * used, drop_unencrypted is set to false, else true -- we
6382 * can use this to determine if the CAP_PRIVACY_ON bit should
6383 * be set.
6384 */
6385 struct ieee80211_security sec = {
6386 .flags = SEC_ENABLED,
6387 .enabled = param->value,
6388 };
6389 priv->ieee->drop_unencrypted = param->value;
6390 /* We only change SEC_LEVEL for open mode. Others
6391 * are set by ipw_wpa_set_encryption.
6392 */
6393 if (!param->value) {
6394 sec.flags |= SEC_LEVEL;
6395 sec.level = SEC_LEVEL_0;
6396 } else {
6397 sec.flags |= SEC_LEVEL;
6398 sec.level = SEC_LEVEL_1;
6399 }
6400 if (priv->ieee->set_security)
6401 priv->ieee->set_security(priv->ieee->dev, &sec);
6402 break;
6403 }
6404
6405 case IW_AUTH_80211_AUTH_ALG:
6406 ret = ipw_wpa_set_auth_algs(priv, param->value);
6407 break;
6408
6409 case IW_AUTH_WPA_ENABLED:
6410 ret = ipw_wpa_enable(priv, param->value);
6411 break;
6412
6413 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6414 ieee->ieee802_1x = param->value;
6415 break;
6416
6417 //case IW_AUTH_ROAMING_CONTROL:
6418 case IW_AUTH_PRIVACY_INVOKED:
6419 ieee->privacy_invoked = param->value;
6420 break;
6421
6422 default:
6423 return -EOPNOTSUPP;
6424 }
6425 return ret;
6426 }
6427
6428 /* SIOCGIWAUTH */
6429 static int ipw_wx_get_auth(struct net_device *dev,
6430 struct iw_request_info *info,
6431 union iwreq_data *wrqu, char *extra)
6432 {
6433 struct ipw_priv *priv = ieee80211_priv(dev);
6434 struct ieee80211_device *ieee = priv->ieee;
6435 struct ieee80211_crypt_data *crypt;
6436 struct iw_param *param = &wrqu->param;
6437 int ret = 0;
6438
6439 switch (param->flags & IW_AUTH_INDEX) {
6440 case IW_AUTH_WPA_VERSION:
6441 case IW_AUTH_CIPHER_PAIRWISE:
6442 case IW_AUTH_CIPHER_GROUP:
6443 case IW_AUTH_KEY_MGMT:
6444 /*
6445 * wpa_supplicant will control these internally
6446 */
6447 ret = -EOPNOTSUPP;
6448 break;
6449
6450 case IW_AUTH_TKIP_COUNTERMEASURES:
6451 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6452 if (!crypt || !crypt->ops->get_flags)
6453 break;
6454
6455 param->value = (crypt->ops->get_flags(crypt->priv) &
6456 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6457
6458 break;
6459
6460 case IW_AUTH_DROP_UNENCRYPTED:
6461 param->value = ieee->drop_unencrypted;
6462 break;
6463
6464 case IW_AUTH_80211_AUTH_ALG:
6465 param->value = ieee->sec.auth_mode;
6466 break;
6467
6468 case IW_AUTH_WPA_ENABLED:
6469 param->value = ieee->wpa_enabled;
6470 break;
6471
6472 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6473 param->value = ieee->ieee802_1x;
6474 break;
6475
6476 case IW_AUTH_ROAMING_CONTROL:
6477 case IW_AUTH_PRIVACY_INVOKED:
6478 param->value = ieee->privacy_invoked;
6479 break;
6480
6481 default:
6482 return -EOPNOTSUPP;
6483 }
6484 return 0;
6485 }
6486
6487 /* SIOCSIWENCODEEXT */
6488 static int ipw_wx_set_encodeext(struct net_device *dev,
6489 struct iw_request_info *info,
6490 union iwreq_data *wrqu, char *extra)
6491 {
6492 struct ipw_priv *priv = ieee80211_priv(dev);
6493 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6494
6495 if (hwcrypto) {
6496 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6497 /* IPW HW can't build TKIP MIC,
6498 host decryption still needed */
6499 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6500 priv->ieee->host_mc_decrypt = 1;
6501 else {
6502 priv->ieee->host_encrypt = 0;
6503 priv->ieee->host_encrypt_msdu = 1;
6504 priv->ieee->host_decrypt = 1;
6505 }
6506 } else {
6507 priv->ieee->host_encrypt = 0;
6508 priv->ieee->host_encrypt_msdu = 0;
6509 priv->ieee->host_decrypt = 0;
6510 priv->ieee->host_mc_decrypt = 0;
6511 }
6512 }
6513
6514 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6515 }
6516
6517 /* SIOCGIWENCODEEXT */
6518 static int ipw_wx_get_encodeext(struct net_device *dev,
6519 struct iw_request_info *info,
6520 union iwreq_data *wrqu, char *extra)
6521 {
6522 struct ipw_priv *priv = ieee80211_priv(dev);
6523 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6524 }
6525
6526 /* SIOCSIWMLME */
6527 static int ipw_wx_set_mlme(struct net_device *dev,
6528 struct iw_request_info *info,
6529 union iwreq_data *wrqu, char *extra)
6530 {
6531 struct ipw_priv *priv = ieee80211_priv(dev);
6532 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6533 u16 reason;
6534
6535 reason = cpu_to_le16(mlme->reason_code);
6536
6537 switch (mlme->cmd) {
6538 case IW_MLME_DEAUTH:
6539 // silently ignore
6540 break;
6541
6542 case IW_MLME_DISASSOC:
6543 ipw_disassociate(priv);
6544 break;
6545
6546 default:
6547 return -EOPNOTSUPP;
6548 }
6549 return 0;
6550 }
6551
6552 #ifdef CONFIG_IPW_QOS
6553
6554 /* QoS */
6555 /*
6556 * get the modulation type of the current network or
6557 * the card current mode
6558 */
6559 u8 ipw_qos_current_mode(struct ipw_priv * priv)
6560 {
6561 u8 mode = 0;
6562
6563 if (priv->status & STATUS_ASSOCIATED) {
6564 unsigned long flags;
6565
6566 spin_lock_irqsave(&priv->ieee->lock, flags);
6567 mode = priv->assoc_network->mode;
6568 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6569 } else {
6570 mode = priv->ieee->mode;
6571 }
6572 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6573 return mode;
6574 }
6575
6576 /*
6577 * Handle management frame beacon and probe response
6578 */
6579 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6580 int active_network,
6581 struct ieee80211_network *network)
6582 {
6583 u32 size = sizeof(struct ieee80211_qos_parameters);
6584
6585 if (network->capability & WLAN_CAPABILITY_IBSS)
6586 network->qos_data.active = network->qos_data.supported;
6587
6588 if (network->flags & NETWORK_HAS_QOS_MASK) {
6589 if (active_network &&
6590 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6591 network->qos_data.active = network->qos_data.supported;
6592
6593 if ((network->qos_data.active == 1) && (active_network == 1) &&
6594 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6595 (network->qos_data.old_param_count !=
6596 network->qos_data.param_count)) {
6597 network->qos_data.old_param_count =
6598 network->qos_data.param_count;
6599 schedule_work(&priv->qos_activate);
6600 IPW_DEBUG_QOS("QoS parameters change call "
6601 "qos_activate\n");
6602 }
6603 } else {
6604 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6605 memcpy(&network->qos_data.parameters,
6606 &def_parameters_CCK, size);
6607 else
6608 memcpy(&network->qos_data.parameters,
6609 &def_parameters_OFDM, size);
6610
6611 if ((network->qos_data.active == 1) && (active_network == 1)) {
6612 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6613 schedule_work(&priv->qos_activate);
6614 }
6615
6616 network->qos_data.active = 0;
6617 network->qos_data.supported = 0;
6618 }
6619 if ((priv->status & STATUS_ASSOCIATED) &&
6620 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6621 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6622 if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6623 !(network->flags & NETWORK_EMPTY_ESSID))
6624 if ((network->ssid_len ==
6625 priv->assoc_network->ssid_len) &&
6626 !memcmp(network->ssid,
6627 priv->assoc_network->ssid,
6628 network->ssid_len)) {
6629 queue_work(priv->workqueue,
6630 &priv->merge_networks);
6631 }
6632 }
6633
6634 return 0;
6635 }
6636
6637 /*
6638 * This function set up the firmware to support QoS. It sends
6639 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6640 */
6641 static int ipw_qos_activate(struct ipw_priv *priv,
6642 struct ieee80211_qos_data *qos_network_data)
6643 {
6644 int err;
6645 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6646 struct ieee80211_qos_parameters *active_one = NULL;
6647 u32 size = sizeof(struct ieee80211_qos_parameters);
6648 u32 burst_duration;
6649 int i;
6650 u8 type;
6651
6652 type = ipw_qos_current_mode(priv);
6653
6654 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6655 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6656 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6657 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6658
6659 if (qos_network_data == NULL) {
6660 if (type == IEEE_B) {
6661 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6662 active_one = &def_parameters_CCK;
6663 } else
6664 active_one = &def_parameters_OFDM;
6665
6666 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6667 burst_duration = ipw_qos_get_burst_duration(priv);
6668 for (i = 0; i < QOS_QUEUE_NUM; i++)
6669 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6670 (u16) burst_duration;
6671 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6672 if (type == IEEE_B) {
6673 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6674 type);
6675 if (priv->qos_data.qos_enable == 0)
6676 active_one = &def_parameters_CCK;
6677 else
6678 active_one = priv->qos_data.def_qos_parm_CCK;
6679 } else {
6680 if (priv->qos_data.qos_enable == 0)
6681 active_one = &def_parameters_OFDM;
6682 else
6683 active_one = priv->qos_data.def_qos_parm_OFDM;
6684 }
6685 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6686 } else {
6687 unsigned long flags;
6688 int active;
6689
6690 spin_lock_irqsave(&priv->ieee->lock, flags);
6691 active_one = &(qos_network_data->parameters);
6692 qos_network_data->old_param_count =
6693 qos_network_data->param_count;
6694 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6695 active = qos_network_data->supported;
6696 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6697
6698 if (active == 0) {
6699 burst_duration = ipw_qos_get_burst_duration(priv);
6700 for (i = 0; i < QOS_QUEUE_NUM; i++)
6701 qos_parameters[QOS_PARAM_SET_ACTIVE].
6702 tx_op_limit[i] = (u16) burst_duration;
6703 }
6704 }
6705
6706 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6707 err = ipw_send_qos_params_command(priv,
6708 (struct ieee80211_qos_parameters *)
6709 &(qos_parameters[0]));
6710 if (err)
6711 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6712
6713 return err;
6714 }
6715
6716 /*
6717 * send IPW_CMD_WME_INFO to the firmware
6718 */
6719 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6720 {
6721 int ret = 0;
6722 struct ieee80211_qos_information_element qos_info;
6723
6724 if (priv == NULL)
6725 return -1;
6726
6727 qos_info.elementID = QOS_ELEMENT_ID;
6728 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6729
6730 qos_info.version = QOS_VERSION_1;
6731 qos_info.ac_info = 0;
6732
6733 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6734 qos_info.qui_type = QOS_OUI_TYPE;
6735 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6736
6737 ret = ipw_send_qos_info_command(priv, &qos_info);
6738 if (ret != 0) {
6739 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6740 }
6741 return ret;
6742 }
6743
6744 /*
6745 * Set the QoS parameter with the association request structure
6746 */
6747 static int ipw_qos_association(struct ipw_priv *priv,
6748 struct ieee80211_network *network)
6749 {
6750 int err = 0;
6751 struct ieee80211_qos_data *qos_data = NULL;
6752 struct ieee80211_qos_data ibss_data = {
6753 .supported = 1,
6754 .active = 1,
6755 };
6756
6757 switch (priv->ieee->iw_mode) {
6758 case IW_MODE_ADHOC:
6759 if (!(network->capability & WLAN_CAPABILITY_IBSS))
6760 BUG();
6761
6762 qos_data = &ibss_data;
6763 break;
6764
6765 case IW_MODE_INFRA:
6766 qos_data = &network->qos_data;
6767 break;
6768
6769 default:
6770 BUG();
6771 break;
6772 }
6773
6774 err = ipw_qos_activate(priv, qos_data);
6775 if (err) {
6776 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6777 return err;
6778 }
6779
6780 if (priv->qos_data.qos_enable && qos_data->supported) {
6781 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6782 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6783 return ipw_qos_set_info_element(priv);
6784 }
6785
6786 return 0;
6787 }
6788
6789 /*
6790 * handling the beaconing responces. if we get different QoS setting
6791 * of the network from the the associated setting adjust the QoS
6792 * setting
6793 */
6794 static int ipw_qos_association_resp(struct ipw_priv *priv,
6795 struct ieee80211_network *network)
6796 {
6797 int ret = 0;
6798 unsigned long flags;
6799 u32 size = sizeof(struct ieee80211_qos_parameters);
6800 int set_qos_param = 0;
6801
6802 if ((priv == NULL) || (network == NULL) ||
6803 (priv->assoc_network == NULL))
6804 return ret;
6805
6806 if (!(priv->status & STATUS_ASSOCIATED))
6807 return ret;
6808
6809 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6810 return ret;
6811
6812 spin_lock_irqsave(&priv->ieee->lock, flags);
6813 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
6814 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
6815 sizeof(struct ieee80211_qos_data));
6816 priv->assoc_network->qos_data.active = 1;
6817 if ((network->qos_data.old_param_count !=
6818 network->qos_data.param_count)) {
6819 set_qos_param = 1;
6820 network->qos_data.old_param_count =
6821 network->qos_data.param_count;
6822 }
6823
6824 } else {
6825 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
6826 memcpy(&priv->assoc_network->qos_data.parameters,
6827 &def_parameters_CCK, size);
6828 else
6829 memcpy(&priv->assoc_network->qos_data.parameters,
6830 &def_parameters_OFDM, size);
6831 priv->assoc_network->qos_data.active = 0;
6832 priv->assoc_network->qos_data.supported = 0;
6833 set_qos_param = 1;
6834 }
6835
6836 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6837
6838 if (set_qos_param == 1)
6839 schedule_work(&priv->qos_activate);
6840
6841 return ret;
6842 }
6843
6844 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
6845 {
6846 u32 ret = 0;
6847
6848 if ((priv == NULL))
6849 return 0;
6850
6851 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
6852 ret = priv->qos_data.burst_duration_CCK;
6853 else
6854 ret = priv->qos_data.burst_duration_OFDM;
6855
6856 return ret;
6857 }
6858
6859 /*
6860 * Initialize the setting of QoS global
6861 */
6862 static void ipw_qos_init(struct ipw_priv *priv, int enable,
6863 int burst_enable, u32 burst_duration_CCK,
6864 u32 burst_duration_OFDM)
6865 {
6866 priv->qos_data.qos_enable = enable;
6867
6868 if (priv->qos_data.qos_enable) {
6869 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
6870 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
6871 IPW_DEBUG_QOS("QoS is enabled\n");
6872 } else {
6873 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
6874 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
6875 IPW_DEBUG_QOS("QoS is not enabled\n");
6876 }
6877
6878 priv->qos_data.burst_enable = burst_enable;
6879
6880 if (burst_enable) {
6881 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
6882 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
6883 } else {
6884 priv->qos_data.burst_duration_CCK = 0;
6885 priv->qos_data.burst_duration_OFDM = 0;
6886 }
6887 }
6888
6889 /*
6890 * map the packet priority to the right TX Queue
6891 */
6892 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
6893 {
6894 if (priority > 7 || !priv->qos_data.qos_enable)
6895 priority = 0;
6896
6897 return from_priority_to_tx_queue[priority] - 1;
6898 }
6899
6900 /*
6901 * add QoS parameter to the TX command
6902 */
6903 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
6904 u16 priority,
6905 struct tfd_data *tfd, u8 unicast)
6906 {
6907 int ret = 0;
6908 int tx_queue_id = 0;
6909 struct ieee80211_qos_data *qos_data = NULL;
6910 int active, supported;
6911 unsigned long flags;
6912
6913 if (!(priv->status & STATUS_ASSOCIATED))
6914 return 0;
6915
6916 qos_data = &priv->assoc_network->qos_data;
6917
6918 spin_lock_irqsave(&priv->ieee->lock, flags);
6919
6920 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6921 if (unicast == 0)
6922 qos_data->active = 0;
6923 else
6924 qos_data->active = qos_data->supported;
6925 }
6926
6927 active = qos_data->active;
6928 supported = qos_data->supported;
6929
6930 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6931
6932 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
6933 "unicast %d\n",
6934 priv->qos_data.qos_enable, active, supported, unicast);
6935 if (active && priv->qos_data.qos_enable) {
6936 ret = from_priority_to_tx_queue[priority];
6937 tx_queue_id = ret - 1;
6938 IPW_DEBUG_QOS("QoS packet priority is %d \n", priority);
6939 if (priority <= 7) {
6940 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
6941 tfd->tfd.tfd_26.mchdr.qos_ctrl = priority;
6942 tfd->tfd.tfd_26.mchdr.frame_ctl |=
6943 IEEE80211_STYPE_QOS_DATA;
6944
6945 if (priv->qos_data.qos_no_ack_mask &
6946 (1UL << tx_queue_id)) {
6947 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
6948 tfd->tfd.tfd_26.mchdr.qos_ctrl |=
6949 CTRL_QOS_NO_ACK;
6950 }
6951 }
6952 }
6953
6954 return ret;
6955 }
6956
6957 /*
6958 * background support to run QoS activate functionality
6959 */
6960 static void ipw_bg_qos_activate(void *data)
6961 {
6962 struct ipw_priv *priv = data;
6963
6964 if (priv == NULL)
6965 return;
6966
6967 down(&priv->sem);
6968
6969 if (priv->status & STATUS_ASSOCIATED)
6970 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
6971
6972 up(&priv->sem);
6973 }
6974
6975 static int ipw_handle_probe_response(struct net_device *dev,
6976 struct ieee80211_probe_response *resp,
6977 struct ieee80211_network *network)
6978 {
6979 struct ipw_priv *priv = ieee80211_priv(dev);
6980 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
6981 (network == priv->assoc_network));
6982
6983 ipw_qos_handle_probe_response(priv, active_network, network);
6984
6985 return 0;
6986 }
6987
6988 static int ipw_handle_beacon(struct net_device *dev,
6989 struct ieee80211_beacon *resp,
6990 struct ieee80211_network *network)
6991 {
6992 struct ipw_priv *priv = ieee80211_priv(dev);
6993 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
6994 (network == priv->assoc_network));
6995
6996 ipw_qos_handle_probe_response(priv, active_network, network);
6997
6998 return 0;
6999 }
7000
7001 static int ipw_handle_assoc_response(struct net_device *dev,
7002 struct ieee80211_assoc_response *resp,
7003 struct ieee80211_network *network)
7004 {
7005 struct ipw_priv *priv = ieee80211_priv(dev);
7006 ipw_qos_association_resp(priv, network);
7007 return 0;
7008 }
7009
7010 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7011 *qos_param)
7012 {
7013 struct host_cmd cmd = {
7014 .cmd = IPW_CMD_QOS_PARAMETERS,
7015 .len = (sizeof(struct ieee80211_qos_parameters) * 3)
7016 };
7017
7018 memcpy(cmd.param, qos_param, sizeof(*qos_param) * 3);
7019 return ipw_send_cmd(priv, &cmd);
7020 }
7021
7022 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7023 *qos_param)
7024 {
7025 struct host_cmd cmd = {
7026 .cmd = IPW_CMD_WME_INFO,
7027 .len = sizeof(*qos_param)
7028 };
7029
7030 memcpy(cmd.param, qos_param, sizeof(*qos_param));
7031 return ipw_send_cmd(priv, &cmd);
7032 }
7033
7034 #endif /* CONFIG_IPW_QOS */
7035
7036 static int ipw_associate_network(struct ipw_priv *priv,
7037 struct ieee80211_network *network,
7038 struct ipw_supported_rates *rates, int roaming)
7039 {
7040 int err;
7041
7042 if (priv->config & CFG_FIXED_RATE)
7043 ipw_set_fixed_rate(priv, network->mode);
7044
7045 if (!(priv->config & CFG_STATIC_ESSID)) {
7046 priv->essid_len = min(network->ssid_len,
7047 (u8) IW_ESSID_MAX_SIZE);
7048 memcpy(priv->essid, network->ssid, priv->essid_len);
7049 }
7050
7051 network->last_associate = jiffies;
7052
7053 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7054 priv->assoc_request.channel = network->channel;
7055 if ((priv->capability & CAP_PRIVACY_ON) &&
7056 (priv->capability & CAP_SHARED_KEY)) {
7057 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7058 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7059
7060 if ((priv->capability & CAP_PRIVACY_ON) &&
7061 (priv->ieee->sec.level == SEC_LEVEL_1) &&
7062 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
7063 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7064 } else {
7065 priv->assoc_request.auth_type = AUTH_OPEN;
7066 priv->assoc_request.auth_key = 0;
7067 }
7068
7069 if (priv->ieee->wpa_ie_len) {
7070 priv->assoc_request.policy_support = 0x02; /* RSN active */
7071 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7072 priv->ieee->wpa_ie_len);
7073 }
7074
7075 /*
7076 * It is valid for our ieee device to support multiple modes, but
7077 * when it comes to associating to a given network we have to choose
7078 * just one mode.
7079 */
7080 if (network->mode & priv->ieee->mode & IEEE_A)
7081 priv->assoc_request.ieee_mode = IPW_A_MODE;
7082 else if (network->mode & priv->ieee->mode & IEEE_G)
7083 priv->assoc_request.ieee_mode = IPW_G_MODE;
7084 else if (network->mode & priv->ieee->mode & IEEE_B)
7085 priv->assoc_request.ieee_mode = IPW_B_MODE;
7086
7087 priv->assoc_request.capability = network->capability;
7088 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7089 && !(priv->config & CFG_PREAMBLE_LONG)) {
7090 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7091 } else {
7092 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7093
7094 /* Clear the short preamble if we won't be supporting it */
7095 priv->assoc_request.capability &=
7096 ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7097 }
7098
7099 /* Clear capability bits that aren't used in Ad Hoc */
7100 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7101 priv->assoc_request.capability &=
7102 ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7103
7104 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7105 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7106 roaming ? "Rea" : "A",
7107 escape_essid(priv->essid, priv->essid_len),
7108 network->channel,
7109 ipw_modes[priv->assoc_request.ieee_mode],
7110 rates->num_rates,
7111 (priv->assoc_request.preamble_length ==
7112 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7113 network->capability &
7114 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7115 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7116 priv->capability & CAP_PRIVACY_ON ?
7117 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7118 "(open)") : "",
7119 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7120 priv->capability & CAP_PRIVACY_ON ?
7121 '1' + priv->ieee->sec.active_key : '.',
7122 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7123
7124 priv->assoc_request.beacon_interval = network->beacon_interval;
7125 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7126 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7127 priv->assoc_request.assoc_type = HC_IBSS_START;
7128 priv->assoc_request.assoc_tsf_msw = 0;
7129 priv->assoc_request.assoc_tsf_lsw = 0;
7130 } else {
7131 if (unlikely(roaming))
7132 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7133 else
7134 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7135 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7136 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7137 }
7138
7139 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7140
7141 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7142 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7143 priv->assoc_request.atim_window = network->atim_window;
7144 } else {
7145 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7146 priv->assoc_request.atim_window = 0;
7147 }
7148
7149 priv->assoc_request.listen_interval = network->listen_interval;
7150
7151 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7152 if (err) {
7153 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7154 return err;
7155 }
7156
7157 rates->ieee_mode = priv->assoc_request.ieee_mode;
7158 rates->purpose = IPW_RATE_CONNECT;
7159 ipw_send_supported_rates(priv, rates);
7160
7161 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7162 priv->sys_config.dot11g_auto_detection = 1;
7163 else
7164 priv->sys_config.dot11g_auto_detection = 0;
7165
7166 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7167 priv->sys_config.answer_broadcast_ssid_probe = 1;
7168 else
7169 priv->sys_config.answer_broadcast_ssid_probe = 0;
7170
7171 err = ipw_send_system_config(priv, &priv->sys_config);
7172 if (err) {
7173 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7174 return err;
7175 }
7176
7177 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7178 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7179 if (err) {
7180 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7181 return err;
7182 }
7183
7184 /*
7185 * If preemption is enabled, it is possible for the association
7186 * to complete before we return from ipw_send_associate. Therefore
7187 * we have to be sure and update our priviate data first.
7188 */
7189 priv->channel = network->channel;
7190 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7191 priv->status |= STATUS_ASSOCIATING;
7192 priv->status &= ~STATUS_SECURITY_UPDATED;
7193
7194 priv->assoc_network = network;
7195
7196 #ifdef CONFIG_IPW_QOS
7197 ipw_qos_association(priv, network);
7198 #endif
7199
7200 err = ipw_send_associate(priv, &priv->assoc_request);
7201 if (err) {
7202 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7203 return err;
7204 }
7205
7206 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7207 escape_essid(priv->essid, priv->essid_len),
7208 MAC_ARG(priv->bssid));
7209
7210 return 0;
7211 }
7212
7213 static void ipw_roam(void *data)
7214 {
7215 struct ipw_priv *priv = data;
7216 struct ieee80211_network *network = NULL;
7217 struct ipw_network_match match = {
7218 .network = priv->assoc_network
7219 };
7220
7221 /* The roaming process is as follows:
7222 *
7223 * 1. Missed beacon threshold triggers the roaming process by
7224 * setting the status ROAM bit and requesting a scan.
7225 * 2. When the scan completes, it schedules the ROAM work
7226 * 3. The ROAM work looks at all of the known networks for one that
7227 * is a better network than the currently associated. If none
7228 * found, the ROAM process is over (ROAM bit cleared)
7229 * 4. If a better network is found, a disassociation request is
7230 * sent.
7231 * 5. When the disassociation completes, the roam work is again
7232 * scheduled. The second time through, the driver is no longer
7233 * associated, and the newly selected network is sent an
7234 * association request.
7235 * 6. At this point ,the roaming process is complete and the ROAM
7236 * status bit is cleared.
7237 */
7238
7239 /* If we are no longer associated, and the roaming bit is no longer
7240 * set, then we are not actively roaming, so just return */
7241 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7242 return;
7243
7244 if (priv->status & STATUS_ASSOCIATED) {
7245 /* First pass through ROAM process -- look for a better
7246 * network */
7247 unsigned long flags;
7248 u8 rssi = priv->assoc_network->stats.rssi;
7249 priv->assoc_network->stats.rssi = -128;
7250 spin_lock_irqsave(&priv->ieee->lock, flags);
7251 list_for_each_entry(network, &priv->ieee->network_list, list) {
7252 if (network != priv->assoc_network)
7253 ipw_best_network(priv, &match, network, 1);
7254 }
7255 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7256 priv->assoc_network->stats.rssi = rssi;
7257
7258 if (match.network == priv->assoc_network) {
7259 IPW_DEBUG_ASSOC("No better APs in this network to "
7260 "roam to.\n");
7261 priv->status &= ~STATUS_ROAMING;
7262 ipw_debug_config(priv);
7263 return;
7264 }
7265
7266 ipw_send_disassociate(priv, 1);
7267 priv->assoc_network = match.network;
7268
7269 return;
7270 }
7271
7272 /* Second pass through ROAM process -- request association */
7273 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7274 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7275 priv->status &= ~STATUS_ROAMING;
7276 }
7277
7278 static void ipw_bg_roam(void *data)
7279 {
7280 struct ipw_priv *priv = data;
7281 down(&priv->sem);
7282 ipw_roam(data);
7283 up(&priv->sem);
7284 }
7285
7286 static int ipw_associate(void *data)
7287 {
7288 struct ipw_priv *priv = data;
7289
7290 struct ieee80211_network *network = NULL;
7291 struct ipw_network_match match = {
7292 .network = NULL
7293 };
7294 struct ipw_supported_rates *rates;
7295 struct list_head *element;
7296 unsigned long flags;
7297
7298 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7299 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7300 return 0;
7301 }
7302
7303 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7304 IPW_DEBUG_ASSOC("Not attempting association (already in "
7305 "progress)\n");
7306 return 0;
7307 }
7308
7309 if (priv->status & STATUS_DISASSOCIATING) {
7310 IPW_DEBUG_ASSOC("Not attempting association (in "
7311 "disassociating)\n ");
7312 queue_work(priv->workqueue, &priv->associate);
7313 return 0;
7314 }
7315
7316 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7317 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7318 "initialized)\n");
7319 return 0;
7320 }
7321
7322 if (!(priv->config & CFG_ASSOCIATE) &&
7323 !(priv->config & (CFG_STATIC_ESSID |
7324 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7325 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7326 return 0;
7327 }
7328
7329 /* Protect our use of the network_list */
7330 spin_lock_irqsave(&priv->ieee->lock, flags);
7331 list_for_each_entry(network, &priv->ieee->network_list, list)
7332 ipw_best_network(priv, &match, network, 0);
7333
7334 network = match.network;
7335 rates = &match.rates;
7336
7337 if (network == NULL &&
7338 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7339 priv->config & CFG_ADHOC_CREATE &&
7340 priv->config & CFG_STATIC_ESSID &&
7341 priv->config & CFG_STATIC_CHANNEL &&
7342 !list_empty(&priv->ieee->network_free_list)) {
7343 element = priv->ieee->network_free_list.next;
7344 network = list_entry(element, struct ieee80211_network, list);
7345 ipw_adhoc_create(priv, network);
7346 rates = &priv->rates;
7347 list_del(element);
7348 list_add_tail(&network->list, &priv->ieee->network_list);
7349 }
7350 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7351
7352 /* If we reached the end of the list, then we don't have any valid
7353 * matching APs */
7354 if (!network) {
7355 ipw_debug_config(priv);
7356
7357 if (!(priv->status & STATUS_SCANNING)) {
7358 if (!(priv->config & CFG_SPEED_SCAN))
7359 queue_delayed_work(priv->workqueue,
7360 &priv->request_scan,
7361 SCAN_INTERVAL);
7362 else
7363 queue_work(priv->workqueue,
7364 &priv->request_scan);
7365 }
7366
7367 return 0;
7368 }
7369
7370 ipw_associate_network(priv, network, rates, 0);
7371
7372 return 1;
7373 }
7374
7375 static void ipw_bg_associate(void *data)
7376 {
7377 struct ipw_priv *priv = data;
7378 down(&priv->sem);
7379 ipw_associate(data);
7380 up(&priv->sem);
7381 }
7382
7383 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7384 struct sk_buff *skb)
7385 {
7386 struct ieee80211_hdr *hdr;
7387 u16 fc;
7388
7389 hdr = (struct ieee80211_hdr *)skb->data;
7390 fc = le16_to_cpu(hdr->frame_ctl);
7391 if (!(fc & IEEE80211_FCTL_PROTECTED))
7392 return;
7393
7394 fc &= ~IEEE80211_FCTL_PROTECTED;
7395 hdr->frame_ctl = cpu_to_le16(fc);
7396 switch (priv->ieee->sec.level) {
7397 case SEC_LEVEL_3:
7398 /* Remove CCMP HDR */
7399 memmove(skb->data + IEEE80211_3ADDR_LEN,
7400 skb->data + IEEE80211_3ADDR_LEN + 8,
7401 skb->len - IEEE80211_3ADDR_LEN - 8);
7402 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7403 break;
7404 case SEC_LEVEL_2:
7405 break;
7406 case SEC_LEVEL_1:
7407 /* Remove IV */
7408 memmove(skb->data + IEEE80211_3ADDR_LEN,
7409 skb->data + IEEE80211_3ADDR_LEN + 4,
7410 skb->len - IEEE80211_3ADDR_LEN - 4);
7411 skb_trim(skb, skb->len - 8); /* IV + ICV */
7412 break;
7413 case SEC_LEVEL_0:
7414 break;
7415 default:
7416 printk(KERN_ERR "Unknow security level %d\n",
7417 priv->ieee->sec.level);
7418 break;
7419 }
7420 }
7421
7422 static void ipw_handle_data_packet(struct ipw_priv *priv,
7423 struct ipw_rx_mem_buffer *rxb,
7424 struct ieee80211_rx_stats *stats)
7425 {
7426 struct ieee80211_hdr_4addr *hdr;
7427 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7428
7429 /* We received data from the HW, so stop the watchdog */
7430 priv->net_dev->trans_start = jiffies;
7431
7432 /* We only process data packets if the
7433 * interface is open */
7434 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7435 skb_tailroom(rxb->skb))) {
7436 priv->ieee->stats.rx_errors++;
7437 priv->wstats.discard.misc++;
7438 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7439 return;
7440 } else if (unlikely(!netif_running(priv->net_dev))) {
7441 priv->ieee->stats.rx_dropped++;
7442 priv->wstats.discard.misc++;
7443 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7444 return;
7445 }
7446
7447 /* Advance skb->data to the start of the actual payload */
7448 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7449
7450 /* Set the size of the skb to the size of the frame */
7451 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7452
7453 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7454
7455 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7456 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7457 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7458 (is_multicast_ether_addr(hdr->addr1) ?
7459 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7460 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7461
7462 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7463 priv->ieee->stats.rx_errors++;
7464 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7465 rxb->skb = NULL;
7466 __ipw_led_activity_on(priv);
7467 }
7468 }
7469
7470 #ifdef CONFIG_IEEE80211_RADIOTAP
7471 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7472 struct ipw_rx_mem_buffer *rxb,
7473 struct ieee80211_rx_stats *stats)
7474 {
7475 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7476 struct ipw_rx_frame *frame = &pkt->u.frame;
7477
7478 /* initial pull of some data */
7479 u16 received_channel = frame->received_channel;
7480 u8 antennaAndPhy = frame->antennaAndPhy;
7481 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7482 u16 pktrate = frame->rate;
7483
7484 /* Magic struct that slots into the radiotap header -- no reason
7485 * to build this manually element by element, we can write it much
7486 * more efficiently than we can parse it. ORDER MATTERS HERE */
7487 struct ipw_rt_hdr {
7488 struct ieee80211_radiotap_header rt_hdr;
7489 u8 rt_flags; /* radiotap packet flags */
7490 u8 rt_rate; /* rate in 500kb/s */
7491 u16 rt_channel; /* channel in mhz */
7492 u16 rt_chbitmask; /* channel bitfield */
7493 s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
7494 u8 rt_antenna; /* antenna number */
7495 } *ipw_rt;
7496
7497 short len = le16_to_cpu(pkt->u.frame.length);
7498
7499 /* We received data from the HW, so stop the watchdog */
7500 priv->net_dev->trans_start = jiffies;
7501
7502 /* We only process data packets if the
7503 * interface is open */
7504 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7505 skb_tailroom(rxb->skb))) {
7506 priv->ieee->stats.rx_errors++;
7507 priv->wstats.discard.misc++;
7508 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7509 return;
7510 } else if (unlikely(!netif_running(priv->net_dev))) {
7511 priv->ieee->stats.rx_dropped++;
7512 priv->wstats.discard.misc++;
7513 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7514 return;
7515 }
7516
7517 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7518 * that now */
7519 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7520 /* FIXME: Should alloc bigger skb instead */
7521 priv->ieee->stats.rx_dropped++;
7522 priv->wstats.discard.misc++;
7523 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7524 return;
7525 }
7526
7527 /* copy the frame itself */
7528 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7529 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7530
7531 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7532 * part of our real header, saves a little time.
7533 *
7534 * No longer necessary since we fill in all our data. Purge before merging
7535 * patch officially.
7536 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7537 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7538 */
7539
7540 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7541
7542 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7543 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7544 ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr); /* total header+data */
7545
7546 /* Big bitfield of all the fields we provide in radiotap */
7547 ipw_rt->rt_hdr.it_present =
7548 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7549 (1 << IEEE80211_RADIOTAP_RATE) |
7550 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7551 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7552 (1 << IEEE80211_RADIOTAP_ANTENNA));
7553
7554 /* Zero the flags, we'll add to them as we go */
7555 ipw_rt->rt_flags = 0;
7556
7557 /* Convert signal to DBM */
7558 ipw_rt->rt_dbmsignal = antsignal;
7559
7560 /* Convert the channel data and set the flags */
7561 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7562 if (received_channel > 14) { /* 802.11a */
7563 ipw_rt->rt_chbitmask =
7564 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7565 } else if (antennaAndPhy & 32) { /* 802.11b */
7566 ipw_rt->rt_chbitmask =
7567 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7568 } else { /* 802.11g */
7569 ipw_rt->rt_chbitmask =
7570 (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7571 }
7572
7573 /* set the rate in multiples of 500k/s */
7574 switch (pktrate) {
7575 case IPW_TX_RATE_1MB:
7576 ipw_rt->rt_rate = 2;
7577 break;
7578 case IPW_TX_RATE_2MB:
7579 ipw_rt->rt_rate = 4;
7580 break;
7581 case IPW_TX_RATE_5MB:
7582 ipw_rt->rt_rate = 10;
7583 break;
7584 case IPW_TX_RATE_6MB:
7585 ipw_rt->rt_rate = 12;
7586 break;
7587 case IPW_TX_RATE_9MB:
7588 ipw_rt->rt_rate = 18;
7589 break;
7590 case IPW_TX_RATE_11MB:
7591 ipw_rt->rt_rate = 22;
7592 break;
7593 case IPW_TX_RATE_12MB:
7594 ipw_rt->rt_rate = 24;
7595 break;
7596 case IPW_TX_RATE_18MB:
7597 ipw_rt->rt_rate = 36;
7598 break;
7599 case IPW_TX_RATE_24MB:
7600 ipw_rt->rt_rate = 48;
7601 break;
7602 case IPW_TX_RATE_36MB:
7603 ipw_rt->rt_rate = 72;
7604 break;
7605 case IPW_TX_RATE_48MB:
7606 ipw_rt->rt_rate = 96;
7607 break;
7608 case IPW_TX_RATE_54MB:
7609 ipw_rt->rt_rate = 108;
7610 break;
7611 default:
7612 ipw_rt->rt_rate = 0;
7613 break;
7614 }
7615
7616 /* antenna number */
7617 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7618
7619 /* set the preamble flag if we have it */
7620 if ((antennaAndPhy & 64))
7621 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7622
7623 /* Set the size of the skb to the size of the frame */
7624 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7625
7626 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7627
7628 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7629 priv->ieee->stats.rx_errors++;
7630 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7631 rxb->skb = NULL;
7632 /* no LED during capture */
7633 }
7634 }
7635 #endif
7636
7637 static int is_network_packet(struct ipw_priv *priv,
7638 struct ieee80211_hdr_4addr *header)
7639 {
7640 /* Filter incoming packets to determine if they are targetted toward
7641 * this network, discarding packets coming from ourselves */
7642 switch (priv->ieee->iw_mode) {
7643 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
7644 /* packets from our adapter are dropped (echo) */
7645 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
7646 return 0;
7647
7648 /* {broad,multi}cast packets to our BSSID go through */
7649 if (is_multicast_ether_addr(header->addr1))
7650 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
7651
7652 /* packets to our adapter go through */
7653 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7654 ETH_ALEN);
7655
7656 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
7657 /* packets from our adapter are dropped (echo) */
7658 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
7659 return 0;
7660
7661 /* {broad,multi}cast packets to our BSS go through */
7662 if (is_multicast_ether_addr(header->addr1))
7663 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
7664
7665 /* packets to our adapter go through */
7666 return !memcmp(header->addr1, priv->net_dev->dev_addr,
7667 ETH_ALEN);
7668 }
7669
7670 return 1;
7671 }
7672
7673 #define IPW_PACKET_RETRY_TIME HZ
7674
7675 static int is_duplicate_packet(struct ipw_priv *priv,
7676 struct ieee80211_hdr_4addr *header)
7677 {
7678 u16 sc = le16_to_cpu(header->seq_ctl);
7679 u16 seq = WLAN_GET_SEQ_SEQ(sc);
7680 u16 frag = WLAN_GET_SEQ_FRAG(sc);
7681 u16 *last_seq, *last_frag;
7682 unsigned long *last_time;
7683
7684 switch (priv->ieee->iw_mode) {
7685 case IW_MODE_ADHOC:
7686 {
7687 struct list_head *p;
7688 struct ipw_ibss_seq *entry = NULL;
7689 u8 *mac = header->addr2;
7690 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
7691
7692 __list_for_each(p, &priv->ibss_mac_hash[index]) {
7693 entry =
7694 list_entry(p, struct ipw_ibss_seq, list);
7695 if (!memcmp(entry->mac, mac, ETH_ALEN))
7696 break;
7697 }
7698 if (p == &priv->ibss_mac_hash[index]) {
7699 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
7700 if (!entry) {
7701 IPW_ERROR
7702 ("Cannot malloc new mac entry\n");
7703 return 0;
7704 }
7705 memcpy(entry->mac, mac, ETH_ALEN);
7706 entry->seq_num = seq;
7707 entry->frag_num = frag;
7708 entry->packet_time = jiffies;
7709 list_add(&entry->list,
7710 &priv->ibss_mac_hash[index]);
7711 return 0;
7712 }
7713 last_seq = &entry->seq_num;
7714 last_frag = &entry->frag_num;
7715 last_time = &entry->packet_time;
7716 break;
7717 }
7718 case IW_MODE_INFRA:
7719 last_seq = &priv->last_seq_num;
7720 last_frag = &priv->last_frag_num;
7721 last_time = &priv->last_packet_time;
7722 break;
7723 default:
7724 return 0;
7725 }
7726 if ((*last_seq == seq) &&
7727 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
7728 if (*last_frag == frag)
7729 goto drop;
7730 if (*last_frag + 1 != frag)
7731 /* out-of-order fragment */
7732 goto drop;
7733 } else
7734 *last_seq = seq;
7735
7736 *last_frag = frag;
7737 *last_time = jiffies;
7738 return 0;
7739
7740 drop:
7741 /* Comment this line now since we observed the card receives
7742 * duplicate packets but the FCTL_RETRY bit is not set in the
7743 * IBSS mode with fragmentation enabled.
7744 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
7745 return 1;
7746 }
7747
7748 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
7749 struct ipw_rx_mem_buffer *rxb,
7750 struct ieee80211_rx_stats *stats)
7751 {
7752 struct sk_buff *skb = rxb->skb;
7753 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
7754 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
7755 (skb->data + IPW_RX_FRAME_SIZE);
7756
7757 ieee80211_rx_mgt(priv->ieee, header, stats);
7758
7759 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
7760 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7761 IEEE80211_STYPE_PROBE_RESP) ||
7762 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
7763 IEEE80211_STYPE_BEACON))) {
7764 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
7765 ipw_add_station(priv, header->addr2);
7766 }
7767
7768 if (priv->config & CFG_NET_STATS) {
7769 IPW_DEBUG_HC("sending stat packet\n");
7770
7771 /* Set the size of the skb to the size of the full
7772 * ipw header and 802.11 frame */
7773 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
7774 IPW_RX_FRAME_SIZE);
7775
7776 /* Advance past the ipw packet header to the 802.11 frame */
7777 skb_pull(skb, IPW_RX_FRAME_SIZE);
7778
7779 /* Push the ieee80211_rx_stats before the 802.11 frame */
7780 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
7781
7782 skb->dev = priv->ieee->dev;
7783
7784 /* Point raw at the ieee80211_stats */
7785 skb->mac.raw = skb->data;
7786
7787 skb->pkt_type = PACKET_OTHERHOST;
7788 skb->protocol = __constant_htons(ETH_P_80211_STATS);
7789 memset(skb->cb, 0, sizeof(rxb->skb->cb));
7790 netif_rx(skb);
7791 rxb->skb = NULL;
7792 }
7793 }
7794
7795 /*
7796 * Main entry function for recieving a packet with 80211 headers. This
7797 * should be called when ever the FW has notified us that there is a new
7798 * skb in the recieve queue.
7799 */
7800 static void ipw_rx(struct ipw_priv *priv)
7801 {
7802 struct ipw_rx_mem_buffer *rxb;
7803 struct ipw_rx_packet *pkt;
7804 struct ieee80211_hdr_4addr *header;
7805 u32 r, w, i;
7806 u8 network_packet;
7807
7808 r = ipw_read32(priv, IPW_RX_READ_INDEX);
7809 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
7810 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
7811
7812 while (i != r) {
7813 rxb = priv->rxq->queue[i];
7814 #ifdef CONFIG_IPW2200_DEBUG
7815 if (unlikely(rxb == NULL)) {
7816 printk(KERN_CRIT "Queue not allocated!\n");
7817 break;
7818 }
7819 #endif
7820 priv->rxq->queue[i] = NULL;
7821
7822 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
7823 IPW_RX_BUF_SIZE,
7824 PCI_DMA_FROMDEVICE);
7825
7826 pkt = (struct ipw_rx_packet *)rxb->skb->data;
7827 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
7828 pkt->header.message_type,
7829 pkt->header.rx_seq_num, pkt->header.control_bits);
7830
7831 switch (pkt->header.message_type) {
7832 case RX_FRAME_TYPE: /* 802.11 frame */ {
7833 struct ieee80211_rx_stats stats = {
7834 .rssi =
7835 le16_to_cpu(pkt->u.frame.rssi_dbm) -
7836 IPW_RSSI_TO_DBM,
7837 .signal =
7838 le16_to_cpu(pkt->u.frame.signal),
7839 .noise =
7840 le16_to_cpu(pkt->u.frame.noise),
7841 .rate = pkt->u.frame.rate,
7842 .mac_time = jiffies,
7843 .received_channel =
7844 pkt->u.frame.received_channel,
7845 .freq =
7846 (pkt->u.frame.
7847 control & (1 << 0)) ?
7848 IEEE80211_24GHZ_BAND :
7849 IEEE80211_52GHZ_BAND,
7850 .len = le16_to_cpu(pkt->u.frame.length),
7851 };
7852
7853 if (stats.rssi != 0)
7854 stats.mask |= IEEE80211_STATMASK_RSSI;
7855 if (stats.signal != 0)
7856 stats.mask |= IEEE80211_STATMASK_SIGNAL;
7857 if (stats.noise != 0)
7858 stats.mask |= IEEE80211_STATMASK_NOISE;
7859 if (stats.rate != 0)
7860 stats.mask |= IEEE80211_STATMASK_RATE;
7861
7862 priv->rx_packets++;
7863
7864 #ifdef CONFIG_IPW2200_MONITOR
7865 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7866 #ifdef CONFIG_IEEE80211_RADIOTAP
7867 ipw_handle_data_packet_monitor(priv,
7868 rxb,
7869 &stats);
7870 #else
7871 ipw_handle_data_packet(priv, rxb,
7872 &stats);
7873 #endif
7874 break;
7875 }
7876 #endif
7877
7878 header =
7879 (struct ieee80211_hdr_4addr *)(rxb->skb->
7880 data +
7881 IPW_RX_FRAME_SIZE);
7882 /* TODO: Check Ad-Hoc dest/source and make sure
7883 * that we are actually parsing these packets
7884 * correctly -- we should probably use the
7885 * frame control of the packet and disregard
7886 * the current iw_mode */
7887
7888 network_packet =
7889 is_network_packet(priv, header);
7890 if (network_packet && priv->assoc_network) {
7891 priv->assoc_network->stats.rssi =
7892 stats.rssi;
7893 average_add(&priv->average_rssi,
7894 stats.rssi);
7895 priv->last_rx_rssi = stats.rssi;
7896 }
7897
7898 IPW_DEBUG_RX("Frame: len=%u\n",
7899 le16_to_cpu(pkt->u.frame.length));
7900
7901 if (le16_to_cpu(pkt->u.frame.length) <
7902 frame_hdr_len(header)) {
7903 IPW_DEBUG_DROP
7904 ("Received packet is too small. "
7905 "Dropping.\n");
7906 priv->ieee->stats.rx_errors++;
7907 priv->wstats.discard.misc++;
7908 break;
7909 }
7910
7911 switch (WLAN_FC_GET_TYPE
7912 (le16_to_cpu(header->frame_ctl))) {
7913
7914 case IEEE80211_FTYPE_MGMT:
7915 ipw_handle_mgmt_packet(priv, rxb,
7916 &stats);
7917 break;
7918
7919 case IEEE80211_FTYPE_CTL:
7920 break;
7921
7922 case IEEE80211_FTYPE_DATA:
7923 if (unlikely(!network_packet ||
7924 is_duplicate_packet(priv,
7925 header)))
7926 {
7927 IPW_DEBUG_DROP("Dropping: "
7928 MAC_FMT ", "
7929 MAC_FMT ", "
7930 MAC_FMT "\n",
7931 MAC_ARG(header->
7932 addr1),
7933 MAC_ARG(header->
7934 addr2),
7935 MAC_ARG(header->
7936 addr3));
7937 break;
7938 }
7939
7940 ipw_handle_data_packet(priv, rxb,
7941 &stats);
7942
7943 break;
7944 }
7945 break;
7946 }
7947
7948 case RX_HOST_NOTIFICATION_TYPE:{
7949 IPW_DEBUG_RX
7950 ("Notification: subtype=%02X flags=%02X size=%d\n",
7951 pkt->u.notification.subtype,
7952 pkt->u.notification.flags,
7953 pkt->u.notification.size);
7954 ipw_rx_notification(priv, &pkt->u.notification);
7955 break;
7956 }
7957
7958 default:
7959 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
7960 pkt->header.message_type);
7961 break;
7962 }
7963
7964 /* For now we just don't re-use anything. We can tweak this
7965 * later to try and re-use notification packets and SKBs that
7966 * fail to Rx correctly */
7967 if (rxb->skb != NULL) {
7968 dev_kfree_skb_any(rxb->skb);
7969 rxb->skb = NULL;
7970 }
7971
7972 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
7973 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
7974 list_add_tail(&rxb->list, &priv->rxq->rx_used);
7975
7976 i = (i + 1) % RX_QUEUE_SIZE;
7977 }
7978
7979 /* Backtrack one entry */
7980 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
7981
7982 ipw_rx_queue_restock(priv);
7983 }
7984
7985 #define DEFAULT_RTS_THRESHOLD 2304U
7986 #define MIN_RTS_THRESHOLD 1U
7987 #define MAX_RTS_THRESHOLD 2304U
7988 #define DEFAULT_BEACON_INTERVAL 100U
7989 #define DEFAULT_SHORT_RETRY_LIMIT 7U
7990 #define DEFAULT_LONG_RETRY_LIMIT 4U
7991
7992 static int ipw_sw_reset(struct ipw_priv *priv, int init)
7993 {
7994 int band, modulation;
7995 int old_mode = priv->ieee->iw_mode;
7996
7997 /* Initialize module parameter values here */
7998 priv->config = 0;
7999
8000 /* We default to disabling the LED code as right now it causes
8001 * too many systems to lock up... */
8002 if (!led)
8003 priv->config |= CFG_NO_LED;
8004
8005 if (associate)
8006 priv->config |= CFG_ASSOCIATE;
8007 else
8008 IPW_DEBUG_INFO("Auto associate disabled.\n");
8009
8010 if (auto_create)
8011 priv->config |= CFG_ADHOC_CREATE;
8012 else
8013 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8014
8015 if (disable) {
8016 priv->status |= STATUS_RF_KILL_SW;
8017 IPW_DEBUG_INFO("Radio disabled.\n");
8018 }
8019
8020 if (channel != 0) {
8021 priv->config |= CFG_STATIC_CHANNEL;
8022 priv->channel = channel;
8023 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8024 /* TODO: Validate that provided channel is in range */
8025 }
8026 #ifdef CONFIG_IPW_QOS
8027 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8028 burst_duration_CCK, burst_duration_OFDM);
8029 #endif /* CONFIG_IPW_QOS */
8030
8031 switch (mode) {
8032 case 1:
8033 priv->ieee->iw_mode = IW_MODE_ADHOC;
8034 priv->net_dev->type = ARPHRD_ETHER;
8035
8036 break;
8037 #ifdef CONFIG_IPW2200_MONITOR
8038 case 2:
8039 priv->ieee->iw_mode = IW_MODE_MONITOR;
8040 #ifdef CONFIG_IEEE80211_RADIOTAP
8041 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8042 #else
8043 priv->net_dev->type = ARPHRD_IEEE80211;
8044 #endif
8045 break;
8046 #endif
8047 default:
8048 case 0:
8049 priv->net_dev->type = ARPHRD_ETHER;
8050 priv->ieee->iw_mode = IW_MODE_INFRA;
8051 break;
8052 }
8053
8054 if (hwcrypto) {
8055 priv->ieee->host_encrypt = 0;
8056 priv->ieee->host_encrypt_msdu = 0;
8057 priv->ieee->host_decrypt = 0;
8058 priv->ieee->host_mc_decrypt = 0;
8059 }
8060 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8061
8062 /* IPW2200/2915 is abled to do hardware fragmentation. */
8063 priv->ieee->host_open_frag = 0;
8064
8065 if ((priv->pci_dev->device == 0x4223) ||
8066 (priv->pci_dev->device == 0x4224)) {
8067 if (init)
8068 printk(KERN_INFO DRV_NAME
8069 ": Detected Intel PRO/Wireless 2915ABG Network "
8070 "Connection\n");
8071 priv->ieee->abg_true = 1;
8072 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8073 modulation = IEEE80211_OFDM_MODULATION |
8074 IEEE80211_CCK_MODULATION;
8075 priv->adapter = IPW_2915ABG;
8076 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8077 } else {
8078 if (init)
8079 printk(KERN_INFO DRV_NAME
8080 ": Detected Intel PRO/Wireless 2200BG Network "
8081 "Connection\n");
8082
8083 priv->ieee->abg_true = 0;
8084 band = IEEE80211_24GHZ_BAND;
8085 modulation = IEEE80211_OFDM_MODULATION |
8086 IEEE80211_CCK_MODULATION;
8087 priv->adapter = IPW_2200BG;
8088 priv->ieee->mode = IEEE_G | IEEE_B;
8089 }
8090
8091 priv->ieee->freq_band = band;
8092 priv->ieee->modulation = modulation;
8093
8094 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8095
8096 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8097 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8098
8099 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8100 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8101 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8102
8103 /* If power management is turned on, default to AC mode */
8104 priv->power_mode = IPW_POWER_AC;
8105 priv->tx_power = IPW_TX_POWER_DEFAULT;
8106
8107 return old_mode == priv->ieee->iw_mode;
8108 }
8109
8110 /*
8111 * This file defines the Wireless Extension handlers. It does not
8112 * define any methods of hardware manipulation and relies on the
8113 * functions defined in ipw_main to provide the HW interaction.
8114 *
8115 * The exception to this is the use of the ipw_get_ordinal()
8116 * function used to poll the hardware vs. making unecessary calls.
8117 *
8118 */
8119
8120 static int ipw_wx_get_name(struct net_device *dev,
8121 struct iw_request_info *info,
8122 union iwreq_data *wrqu, char *extra)
8123 {
8124 struct ipw_priv *priv = ieee80211_priv(dev);
8125 down(&priv->sem);
8126 if (priv->status & STATUS_RF_KILL_MASK)
8127 strcpy(wrqu->name, "radio off");
8128 else if (!(priv->status & STATUS_ASSOCIATED))
8129 strcpy(wrqu->name, "unassociated");
8130 else
8131 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8132 ipw_modes[priv->assoc_request.ieee_mode]);
8133 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8134 up(&priv->sem);
8135 return 0;
8136 }
8137
8138 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8139 {
8140 if (channel == 0) {
8141 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8142 priv->config &= ~CFG_STATIC_CHANNEL;
8143 IPW_DEBUG_ASSOC("Attempting to associate with new "
8144 "parameters.\n");
8145 ipw_associate(priv);
8146 return 0;
8147 }
8148
8149 priv->config |= CFG_STATIC_CHANNEL;
8150
8151 if (priv->channel == channel) {
8152 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8153 channel);
8154 return 0;
8155 }
8156
8157 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8158 priv->channel = channel;
8159
8160 #ifdef CONFIG_IPW2200_MONITOR
8161 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8162 int i;
8163 if (priv->status & STATUS_SCANNING) {
8164 IPW_DEBUG_SCAN("Scan abort triggered due to "
8165 "channel change.\n");
8166 ipw_abort_scan(priv);
8167 }
8168
8169 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8170 udelay(10);
8171
8172 if (priv->status & STATUS_SCANNING)
8173 IPW_DEBUG_SCAN("Still scanning...\n");
8174 else
8175 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8176 1000 - i);
8177
8178 return 0;
8179 }
8180 #endif /* CONFIG_IPW2200_MONITOR */
8181
8182 /* Network configuration changed -- force [re]association */
8183 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8184 if (!ipw_disassociate(priv))
8185 ipw_associate(priv);
8186
8187 return 0;
8188 }
8189
8190 static int ipw_wx_set_freq(struct net_device *dev,
8191 struct iw_request_info *info,
8192 union iwreq_data *wrqu, char *extra)
8193 {
8194 struct ipw_priv *priv = ieee80211_priv(dev);
8195 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
8196 struct iw_freq *fwrq = &wrqu->freq;
8197 int ret = 0, i;
8198 u8 channel, flags;
8199 int band;
8200
8201 if (fwrq->m == 0) {
8202 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8203 down(&priv->sem);
8204 ret = ipw_set_channel(priv, 0);
8205 up(&priv->sem);
8206 return ret;
8207 }
8208 /* if setting by freq convert to channel */
8209 if (fwrq->e == 1) {
8210 channel = ipw_freq_to_channel(priv->ieee, fwrq->m);
8211 if (channel == 0)
8212 return -EINVAL;
8213 } else
8214 channel = fwrq->m;
8215
8216 if (!(band = ipw_is_valid_channel(priv->ieee, channel)))
8217 return -EINVAL;
8218
8219 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8220 i = ipw_channel_to_index(priv->ieee, channel);
8221 if (i == -1)
8222 return -EINVAL;
8223
8224 flags = (band == IEEE80211_24GHZ_BAND) ?
8225 geo->bg[i].flags : geo->a[i].flags;
8226 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8227 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8228 return -EINVAL;
8229 }
8230 }
8231
8232 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8233 down(&priv->sem);
8234 ret = ipw_set_channel(priv, channel);
8235 up(&priv->sem);
8236 return ret;
8237 }
8238
8239 static int ipw_wx_get_freq(struct net_device *dev,
8240 struct iw_request_info *info,
8241 union iwreq_data *wrqu, char *extra)
8242 {
8243 struct ipw_priv *priv = ieee80211_priv(dev);
8244
8245 wrqu->freq.e = 0;
8246
8247 /* If we are associated, trying to associate, or have a statically
8248 * configured CHANNEL then return that; otherwise return ANY */
8249 down(&priv->sem);
8250 if (priv->config & CFG_STATIC_CHANNEL ||
8251 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
8252 wrqu->freq.m = priv->channel;
8253 else
8254 wrqu->freq.m = 0;
8255
8256 up(&priv->sem);
8257 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8258 return 0;
8259 }
8260
8261 static int ipw_wx_set_mode(struct net_device *dev,
8262 struct iw_request_info *info,
8263 union iwreq_data *wrqu, char *extra)
8264 {
8265 struct ipw_priv *priv = ieee80211_priv(dev);
8266 int err = 0;
8267
8268 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8269
8270 switch (wrqu->mode) {
8271 #ifdef CONFIG_IPW2200_MONITOR
8272 case IW_MODE_MONITOR:
8273 #endif
8274 case IW_MODE_ADHOC:
8275 case IW_MODE_INFRA:
8276 break;
8277 case IW_MODE_AUTO:
8278 wrqu->mode = IW_MODE_INFRA;
8279 break;
8280 default:
8281 return -EINVAL;
8282 }
8283 if (wrqu->mode == priv->ieee->iw_mode)
8284 return 0;
8285
8286 down(&priv->sem);
8287
8288 ipw_sw_reset(priv, 0);
8289
8290 #ifdef CONFIG_IPW2200_MONITOR
8291 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8292 priv->net_dev->type = ARPHRD_ETHER;
8293
8294 if (wrqu->mode == IW_MODE_MONITOR)
8295 #ifdef CONFIG_IEEE80211_RADIOTAP
8296 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8297 #else
8298 priv->net_dev->type = ARPHRD_IEEE80211;
8299 #endif
8300 #endif /* CONFIG_IPW2200_MONITOR */
8301
8302 /* Free the existing firmware and reset the fw_loaded
8303 * flag so ipw_load() will bring in the new firmawre */
8304 free_firmware();
8305
8306 priv->ieee->iw_mode = wrqu->mode;
8307
8308 queue_work(priv->workqueue, &priv->adapter_restart);
8309 up(&priv->sem);
8310 return err;
8311 }
8312
8313 static int ipw_wx_get_mode(struct net_device *dev,
8314 struct iw_request_info *info,
8315 union iwreq_data *wrqu, char *extra)
8316 {
8317 struct ipw_priv *priv = ieee80211_priv(dev);
8318 down(&priv->sem);
8319 wrqu->mode = priv->ieee->iw_mode;
8320 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8321 up(&priv->sem);
8322 return 0;
8323 }
8324
8325 /* Values are in microsecond */
8326 static const s32 timeout_duration[] = {
8327 350000,
8328 250000,
8329 75000,
8330 37000,
8331 25000,
8332 };
8333
8334 static const s32 period_duration[] = {
8335 400000,
8336 700000,
8337 1000000,
8338 1000000,
8339 1000000
8340 };
8341
8342 static int ipw_wx_get_range(struct net_device *dev,
8343 struct iw_request_info *info,
8344 union iwreq_data *wrqu, char *extra)
8345 {
8346 struct ipw_priv *priv = ieee80211_priv(dev);
8347 struct iw_range *range = (struct iw_range *)extra;
8348 const struct ieee80211_geo *geo = ipw_get_geo(priv->ieee);
8349 int i = 0, j;
8350
8351 wrqu->data.length = sizeof(*range);
8352 memset(range, 0, sizeof(*range));
8353
8354 /* 54Mbs == ~27 Mb/s real (802.11g) */
8355 range->throughput = 27 * 1000 * 1000;
8356
8357 range->max_qual.qual = 100;
8358 /* TODO: Find real max RSSI and stick here */
8359 range->max_qual.level = 0;
8360 range->max_qual.noise = priv->ieee->worst_rssi + 0x100;
8361 range->max_qual.updated = 7; /* Updated all three */
8362
8363 range->avg_qual.qual = 70;
8364 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8365 range->avg_qual.level = 0; /* FIXME to real average level */
8366 range->avg_qual.noise = 0;
8367 range->avg_qual.updated = 7; /* Updated all three */
8368 down(&priv->sem);
8369 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8370
8371 for (i = 0; i < range->num_bitrates; i++)
8372 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8373 500000;
8374
8375 range->max_rts = DEFAULT_RTS_THRESHOLD;
8376 range->min_frag = MIN_FRAG_THRESHOLD;
8377 range->max_frag = MAX_FRAG_THRESHOLD;
8378
8379 range->encoding_size[0] = 5;
8380 range->encoding_size[1] = 13;
8381 range->num_encoding_sizes = 2;
8382 range->max_encoding_tokens = WEP_KEYS;
8383
8384 /* Set the Wireless Extension versions */
8385 range->we_version_compiled = WIRELESS_EXT;
8386 range->we_version_source = 16;
8387
8388 i = 0;
8389 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8390 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES;
8391 i++, j++) {
8392 range->freq[i].i = geo->bg[j].channel;
8393 range->freq[i].m = geo->bg[j].freq * 100000;
8394 range->freq[i].e = 1;
8395 }
8396 }
8397
8398 if (priv->ieee->mode & IEEE_A) {
8399 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES;
8400 i++, j++) {
8401 range->freq[i].i = geo->a[j].channel;
8402 range->freq[i].m = geo->a[j].freq * 100000;
8403 range->freq[i].e = 1;
8404 }
8405 }
8406
8407 range->num_channels = i;
8408 range->num_frequency = i;
8409
8410 up(&priv->sem);
8411
8412 /* Event capability (kernel + driver) */
8413 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8414 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8415 IW_EVENT_CAPA_MASK(SIOCGIWAP));
8416 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8417
8418 IPW_DEBUG_WX("GET Range\n");
8419 return 0;
8420 }
8421
8422 static int ipw_wx_set_wap(struct net_device *dev,
8423 struct iw_request_info *info,
8424 union iwreq_data *wrqu, char *extra)
8425 {
8426 struct ipw_priv *priv = ieee80211_priv(dev);
8427
8428 static const unsigned char any[] = {
8429 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8430 };
8431 static const unsigned char off[] = {
8432 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8433 };
8434
8435 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8436 return -EINVAL;
8437 down(&priv->sem);
8438 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8439 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8440 /* we disable mandatory BSSID association */
8441 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8442 priv->config &= ~CFG_STATIC_BSSID;
8443 IPW_DEBUG_ASSOC("Attempting to associate with new "
8444 "parameters.\n");
8445 ipw_associate(priv);
8446 up(&priv->sem);
8447 return 0;
8448 }
8449
8450 priv->config |= CFG_STATIC_BSSID;
8451 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8452 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8453 up(&priv->sem);
8454 return 0;
8455 }
8456
8457 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8458 MAC_ARG(wrqu->ap_addr.sa_data));
8459
8460 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8461
8462 /* Network configuration changed -- force [re]association */
8463 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8464 if (!ipw_disassociate(priv))
8465 ipw_associate(priv);
8466
8467 up(&priv->sem);
8468 return 0;
8469 }
8470
8471 static int ipw_wx_get_wap(struct net_device *dev,
8472 struct iw_request_info *info,
8473 union iwreq_data *wrqu, char *extra)
8474 {
8475 struct ipw_priv *priv = ieee80211_priv(dev);
8476 /* If we are associated, trying to associate, or have a statically
8477 * configured BSSID then return that; otherwise return ANY */
8478 down(&priv->sem);
8479 if (priv->config & CFG_STATIC_BSSID ||
8480 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8481 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8482 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8483 } else
8484 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8485
8486 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8487 MAC_ARG(wrqu->ap_addr.sa_data));
8488 up(&priv->sem);
8489 return 0;
8490 }
8491
8492 static int ipw_wx_set_essid(struct net_device *dev,
8493 struct iw_request_info *info,
8494 union iwreq_data *wrqu, char *extra)
8495 {
8496 struct ipw_priv *priv = ieee80211_priv(dev);
8497 char *essid = ""; /* ANY */
8498 int length = 0;
8499 down(&priv->sem);
8500 if (wrqu->essid.flags && wrqu->essid.length) {
8501 length = wrqu->essid.length - 1;
8502 essid = extra;
8503 }
8504 if (length == 0) {
8505 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8506 if ((priv->config & CFG_STATIC_ESSID) &&
8507 !(priv->status & (STATUS_ASSOCIATED |
8508 STATUS_ASSOCIATING))) {
8509 IPW_DEBUG_ASSOC("Attempting to associate with new "
8510 "parameters.\n");
8511 priv->config &= ~CFG_STATIC_ESSID;
8512 ipw_associate(priv);
8513 }
8514 up(&priv->sem);
8515 return 0;
8516 }
8517
8518 length = min(length, IW_ESSID_MAX_SIZE);
8519
8520 priv->config |= CFG_STATIC_ESSID;
8521
8522 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
8523 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8524 up(&priv->sem);
8525 return 0;
8526 }
8527
8528 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
8529 length);
8530
8531 priv->essid_len = length;
8532 memcpy(priv->essid, essid, priv->essid_len);
8533
8534 /* Network configuration changed -- force [re]association */
8535 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8536 if (!ipw_disassociate(priv))
8537 ipw_associate(priv);
8538
8539 up(&priv->sem);
8540 return 0;
8541 }
8542
8543 static int ipw_wx_get_essid(struct net_device *dev,
8544 struct iw_request_info *info,
8545 union iwreq_data *wrqu, char *extra)
8546 {
8547 struct ipw_priv *priv = ieee80211_priv(dev);
8548
8549 /* If we are associated, trying to associate, or have a statically
8550 * configured ESSID then return that; otherwise return ANY */
8551 down(&priv->sem);
8552 if (priv->config & CFG_STATIC_ESSID ||
8553 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8554 IPW_DEBUG_WX("Getting essid: '%s'\n",
8555 escape_essid(priv->essid, priv->essid_len));
8556 memcpy(extra, priv->essid, priv->essid_len);
8557 wrqu->essid.length = priv->essid_len;
8558 wrqu->essid.flags = 1; /* active */
8559 } else {
8560 IPW_DEBUG_WX("Getting essid: ANY\n");
8561 wrqu->essid.length = 0;
8562 wrqu->essid.flags = 0; /* active */
8563 }
8564 up(&priv->sem);
8565 return 0;
8566 }
8567
8568 static int ipw_wx_set_nick(struct net_device *dev,
8569 struct iw_request_info *info,
8570 union iwreq_data *wrqu, char *extra)
8571 {
8572 struct ipw_priv *priv = ieee80211_priv(dev);
8573
8574 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8575 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8576 return -E2BIG;
8577 down(&priv->sem);
8578 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
8579 memset(priv->nick, 0, sizeof(priv->nick));
8580 memcpy(priv->nick, extra, wrqu->data.length);
8581 IPW_DEBUG_TRACE("<<\n");
8582 up(&priv->sem);
8583 return 0;
8584
8585 }
8586
8587 static int ipw_wx_get_nick(struct net_device *dev,
8588 struct iw_request_info *info,
8589 union iwreq_data *wrqu, char *extra)
8590 {
8591 struct ipw_priv *priv = ieee80211_priv(dev);
8592 IPW_DEBUG_WX("Getting nick\n");
8593 down(&priv->sem);
8594 wrqu->data.length = strlen(priv->nick) + 1;
8595 memcpy(extra, priv->nick, wrqu->data.length);
8596 wrqu->data.flags = 1; /* active */
8597 up(&priv->sem);
8598 return 0;
8599 }
8600
8601 static int ipw_wx_set_rate(struct net_device *dev,
8602 struct iw_request_info *info,
8603 union iwreq_data *wrqu, char *extra)
8604 {
8605 /* TODO: We should use semaphores or locks for access to priv */
8606 struct ipw_priv *priv = ieee80211_priv(dev);
8607 u32 target_rate = wrqu->bitrate.value;
8608 u32 fixed, mask;
8609
8610 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
8611 /* value = X, fixed = 1 means only rate X */
8612 /* value = X, fixed = 0 means all rates lower equal X */
8613
8614 if (target_rate == -1) {
8615 fixed = 0;
8616 mask = IEEE80211_DEFAULT_RATES_MASK;
8617 /* Now we should reassociate */
8618 goto apply;
8619 }
8620
8621 mask = 0;
8622 fixed = wrqu->bitrate.fixed;
8623
8624 if (target_rate == 1000000 || !fixed)
8625 mask |= IEEE80211_CCK_RATE_1MB_MASK;
8626 if (target_rate == 1000000)
8627 goto apply;
8628
8629 if (target_rate == 2000000 || !fixed)
8630 mask |= IEEE80211_CCK_RATE_2MB_MASK;
8631 if (target_rate == 2000000)
8632 goto apply;
8633
8634 if (target_rate == 5500000 || !fixed)
8635 mask |= IEEE80211_CCK_RATE_5MB_MASK;
8636 if (target_rate == 5500000)
8637 goto apply;
8638
8639 if (target_rate == 6000000 || !fixed)
8640 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
8641 if (target_rate == 6000000)
8642 goto apply;
8643
8644 if (target_rate == 9000000 || !fixed)
8645 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
8646 if (target_rate == 9000000)
8647 goto apply;
8648
8649 if (target_rate == 11000000 || !fixed)
8650 mask |= IEEE80211_CCK_RATE_11MB_MASK;
8651 if (target_rate == 11000000)
8652 goto apply;
8653
8654 if (target_rate == 12000000 || !fixed)
8655 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
8656 if (target_rate == 12000000)
8657 goto apply;
8658
8659 if (target_rate == 18000000 || !fixed)
8660 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
8661 if (target_rate == 18000000)
8662 goto apply;
8663
8664 if (target_rate == 24000000 || !fixed)
8665 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
8666 if (target_rate == 24000000)
8667 goto apply;
8668
8669 if (target_rate == 36000000 || !fixed)
8670 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
8671 if (target_rate == 36000000)
8672 goto apply;
8673
8674 if (target_rate == 48000000 || !fixed)
8675 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
8676 if (target_rate == 48000000)
8677 goto apply;
8678
8679 if (target_rate == 54000000 || !fixed)
8680 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
8681 if (target_rate == 54000000)
8682 goto apply;
8683
8684 IPW_DEBUG_WX("invalid rate specified, returning error\n");
8685 return -EINVAL;
8686
8687 apply:
8688 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
8689 mask, fixed ? "fixed" : "sub-rates");
8690 down(&priv->sem);
8691 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
8692 priv->config &= ~CFG_FIXED_RATE;
8693 ipw_set_fixed_rate(priv, priv->ieee->mode);
8694 } else
8695 priv->config |= CFG_FIXED_RATE;
8696
8697 if (priv->rates_mask == mask) {
8698 IPW_DEBUG_WX("Mask set to current mask.\n");
8699 up(&priv->sem);
8700 return 0;
8701 }
8702
8703 priv->rates_mask = mask;
8704
8705 /* Network configuration changed -- force [re]association */
8706 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
8707 if (!ipw_disassociate(priv))
8708 ipw_associate(priv);
8709
8710 up(&priv->sem);
8711 return 0;
8712 }
8713
8714 static int ipw_wx_get_rate(struct net_device *dev,
8715 struct iw_request_info *info,
8716 union iwreq_data *wrqu, char *extra)
8717 {
8718 struct ipw_priv *priv = ieee80211_priv(dev);
8719 down(&priv->sem);
8720 wrqu->bitrate.value = priv->last_rate;
8721 up(&priv->sem);
8722 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
8723 return 0;
8724 }
8725
8726 static int ipw_wx_set_rts(struct net_device *dev,
8727 struct iw_request_info *info,
8728 union iwreq_data *wrqu, char *extra)
8729 {
8730 struct ipw_priv *priv = ieee80211_priv(dev);
8731 down(&priv->sem);
8732 if (wrqu->rts.disabled)
8733 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8734 else {
8735 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
8736 wrqu->rts.value > MAX_RTS_THRESHOLD) {
8737 up(&priv->sem);
8738 return -EINVAL;
8739 }
8740 priv->rts_threshold = wrqu->rts.value;
8741 }
8742
8743 ipw_send_rts_threshold(priv, priv->rts_threshold);
8744 up(&priv->sem);
8745 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
8746 return 0;
8747 }
8748
8749 static int ipw_wx_get_rts(struct net_device *dev,
8750 struct iw_request_info *info,
8751 union iwreq_data *wrqu, char *extra)
8752 {
8753 struct ipw_priv *priv = ieee80211_priv(dev);
8754 down(&priv->sem);
8755 wrqu->rts.value = priv->rts_threshold;
8756 wrqu->rts.fixed = 0; /* no auto select */
8757 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
8758 up(&priv->sem);
8759 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
8760 return 0;
8761 }
8762
8763 static int ipw_wx_set_txpow(struct net_device *dev,
8764 struct iw_request_info *info,
8765 union iwreq_data *wrqu, char *extra)
8766 {
8767 struct ipw_priv *priv = ieee80211_priv(dev);
8768 int err = 0;
8769
8770 down(&priv->sem);
8771 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
8772 err = -EINPROGRESS;
8773 goto out;
8774 }
8775
8776 if (!wrqu->power.fixed)
8777 wrqu->power.value = IPW_TX_POWER_DEFAULT;
8778
8779 if (wrqu->power.flags != IW_TXPOW_DBM) {
8780 err = -EINVAL;
8781 goto out;
8782 }
8783
8784 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
8785 (wrqu->power.value < IPW_TX_POWER_MIN)) {
8786 err = -EINVAL;
8787 goto out;
8788 }
8789
8790 priv->tx_power = wrqu->power.value;
8791 err = ipw_set_tx_power(priv);
8792 out:
8793 up(&priv->sem);
8794 return err;
8795 }
8796
8797 static int ipw_wx_get_txpow(struct net_device *dev,
8798 struct iw_request_info *info,
8799 union iwreq_data *wrqu, char *extra)
8800 {
8801 struct ipw_priv *priv = ieee80211_priv(dev);
8802 down(&priv->sem);
8803 wrqu->power.value = priv->tx_power;
8804 wrqu->power.fixed = 1;
8805 wrqu->power.flags = IW_TXPOW_DBM;
8806 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
8807 up(&priv->sem);
8808
8809 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
8810 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
8811
8812 return 0;
8813 }
8814
8815 static int ipw_wx_set_frag(struct net_device *dev,
8816 struct iw_request_info *info,
8817 union iwreq_data *wrqu, char *extra)
8818 {
8819 struct ipw_priv *priv = ieee80211_priv(dev);
8820 down(&priv->sem);
8821 if (wrqu->frag.disabled)
8822 priv->ieee->fts = DEFAULT_FTS;
8823 else {
8824 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
8825 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
8826 up(&priv->sem);
8827 return -EINVAL;
8828 }
8829
8830 priv->ieee->fts = wrqu->frag.value & ~0x1;
8831 }
8832
8833 ipw_send_frag_threshold(priv, wrqu->frag.value);
8834 up(&priv->sem);
8835 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
8836 return 0;
8837 }
8838
8839 static int ipw_wx_get_frag(struct net_device *dev,
8840 struct iw_request_info *info,
8841 union iwreq_data *wrqu, char *extra)
8842 {
8843 struct ipw_priv *priv = ieee80211_priv(dev);
8844 down(&priv->sem);
8845 wrqu->frag.value = priv->ieee->fts;
8846 wrqu->frag.fixed = 0; /* no auto select */
8847 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
8848 up(&priv->sem);
8849 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
8850
8851 return 0;
8852 }
8853
8854 static int ipw_wx_set_retry(struct net_device *dev,
8855 struct iw_request_info *info,
8856 union iwreq_data *wrqu, char *extra)
8857 {
8858 struct ipw_priv *priv = ieee80211_priv(dev);
8859
8860 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
8861 return -EINVAL;
8862
8863 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
8864 return 0;
8865
8866 if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
8867 return -EINVAL;
8868
8869 down(&priv->sem);
8870 if (wrqu->retry.flags & IW_RETRY_MIN)
8871 priv->short_retry_limit = (u8) wrqu->retry.value;
8872 else if (wrqu->retry.flags & IW_RETRY_MAX)
8873 priv->long_retry_limit = (u8) wrqu->retry.value;
8874 else {
8875 priv->short_retry_limit = (u8) wrqu->retry.value;
8876 priv->long_retry_limit = (u8) wrqu->retry.value;
8877 }
8878
8879 ipw_send_retry_limit(priv, priv->short_retry_limit,
8880 priv->long_retry_limit);
8881 up(&priv->sem);
8882 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
8883 priv->short_retry_limit, priv->long_retry_limit);
8884 return 0;
8885 }
8886
8887 static int ipw_wx_get_retry(struct net_device *dev,
8888 struct iw_request_info *info,
8889 union iwreq_data *wrqu, char *extra)
8890 {
8891 struct ipw_priv *priv = ieee80211_priv(dev);
8892
8893 down(&priv->sem);
8894 wrqu->retry.disabled = 0;
8895
8896 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
8897 up(&priv->sem);
8898 return -EINVAL;
8899 }
8900
8901 if (wrqu->retry.flags & IW_RETRY_MAX) {
8902 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
8903 wrqu->retry.value = priv->long_retry_limit;
8904 } else if (wrqu->retry.flags & IW_RETRY_MIN) {
8905 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
8906 wrqu->retry.value = priv->short_retry_limit;
8907 } else {
8908 wrqu->retry.flags = IW_RETRY_LIMIT;
8909 wrqu->retry.value = priv->short_retry_limit;
8910 }
8911 up(&priv->sem);
8912
8913 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
8914
8915 return 0;
8916 }
8917
8918 static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
8919 int essid_len)
8920 {
8921 struct ipw_scan_request_ext scan;
8922 int err = 0, scan_type;
8923
8924 if (!(priv->status & STATUS_INIT) ||
8925 (priv->status & STATUS_EXIT_PENDING))
8926 return 0;
8927
8928 down(&priv->sem);
8929
8930 if (priv->status & STATUS_RF_KILL_MASK) {
8931 IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
8932 priv->status |= STATUS_SCAN_PENDING;
8933 goto done;
8934 }
8935
8936 IPW_DEBUG_HC("starting request direct scan!\n");
8937
8938 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
8939 /* We should not sleep here; otherwise we will block most
8940 * of the system (for instance, we hold rtnl_lock when we
8941 * get here).
8942 */
8943 err = -EAGAIN;
8944 goto done;
8945 }
8946 memset(&scan, 0, sizeof(scan));
8947
8948 if (priv->config & CFG_SPEED_SCAN)
8949 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8950 cpu_to_le16(30);
8951 else
8952 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
8953 cpu_to_le16(20);
8954
8955 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
8956 cpu_to_le16(20);
8957 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
8958 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
8959
8960 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
8961
8962 err = ipw_send_ssid(priv, essid, essid_len);
8963 if (err) {
8964 IPW_DEBUG_HC("Attempt to send SSID command failed\n");
8965 goto done;
8966 }
8967 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
8968
8969 ipw_add_scan_channels(priv, &scan, scan_type);
8970
8971 err = ipw_send_scan_request_ext(priv, &scan);
8972 if (err) {
8973 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
8974 goto done;
8975 }
8976
8977 priv->status |= STATUS_SCANNING;
8978
8979 done:
8980 up(&priv->sem);
8981 return err;
8982 }
8983
8984 static int ipw_wx_set_scan(struct net_device *dev,
8985 struct iw_request_info *info,
8986 union iwreq_data *wrqu, char *extra)
8987 {
8988 struct ipw_priv *priv = ieee80211_priv(dev);
8989 struct iw_scan_req *req = NULL;
8990 if (wrqu->data.length
8991 && wrqu->data.length == sizeof(struct iw_scan_req)) {
8992 req = (struct iw_scan_req *)extra;
8993 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
8994 ipw_request_direct_scan(priv, req->essid,
8995 req->essid_len);
8996 return 0;
8997 }
8998 }
8999
9000 IPW_DEBUG_WX("Start scan\n");
9001
9002 queue_work(priv->workqueue, &priv->request_scan);
9003
9004 return 0;
9005 }
9006
9007 static int ipw_wx_get_scan(struct net_device *dev,
9008 struct iw_request_info *info,
9009 union iwreq_data *wrqu, char *extra)
9010 {
9011 struct ipw_priv *priv = ieee80211_priv(dev);
9012 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9013 }
9014
9015 static int ipw_wx_set_encode(struct net_device *dev,
9016 struct iw_request_info *info,
9017 union iwreq_data *wrqu, char *key)
9018 {
9019 struct ipw_priv *priv = ieee80211_priv(dev);
9020 int ret;
9021 u32 cap = priv->capability;
9022
9023 down(&priv->sem);
9024 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9025
9026 /* In IBSS mode, we need to notify the firmware to update
9027 * the beacon info after we changed the capability. */
9028 if (cap != priv->capability &&
9029 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9030 priv->status & STATUS_ASSOCIATED)
9031 ipw_disassociate(priv);
9032
9033 up(&priv->sem);
9034 return ret;
9035 }
9036
9037 static int ipw_wx_get_encode(struct net_device *dev,
9038 struct iw_request_info *info,
9039 union iwreq_data *wrqu, char *key)
9040 {
9041 struct ipw_priv *priv = ieee80211_priv(dev);
9042 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9043 }
9044
9045 static int ipw_wx_set_power(struct net_device *dev,
9046 struct iw_request_info *info,
9047 union iwreq_data *wrqu, char *extra)
9048 {
9049 struct ipw_priv *priv = ieee80211_priv(dev);
9050 int err;
9051 down(&priv->sem);
9052 if (wrqu->power.disabled) {
9053 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9054 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9055 if (err) {
9056 IPW_DEBUG_WX("failed setting power mode.\n");
9057 up(&priv->sem);
9058 return err;
9059 }
9060 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9061 up(&priv->sem);
9062 return 0;
9063 }
9064
9065 switch (wrqu->power.flags & IW_POWER_MODE) {
9066 case IW_POWER_ON: /* If not specified */
9067 case IW_POWER_MODE: /* If set all mask */
9068 case IW_POWER_ALL_R: /* If explicitely state all */
9069 break;
9070 default: /* Otherwise we don't support it */
9071 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9072 wrqu->power.flags);
9073 up(&priv->sem);
9074 return -EOPNOTSUPP;
9075 }
9076
9077 /* If the user hasn't specified a power management mode yet, default
9078 * to BATTERY */
9079 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9080 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9081 else
9082 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9083 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9084 if (err) {
9085 IPW_DEBUG_WX("failed setting power mode.\n");
9086 up(&priv->sem);
9087 return err;
9088 }
9089
9090 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9091 up(&priv->sem);
9092 return 0;
9093 }
9094
9095 static int ipw_wx_get_power(struct net_device *dev,
9096 struct iw_request_info *info,
9097 union iwreq_data *wrqu, char *extra)
9098 {
9099 struct ipw_priv *priv = ieee80211_priv(dev);
9100 down(&priv->sem);
9101 if (!(priv->power_mode & IPW_POWER_ENABLED))
9102 wrqu->power.disabled = 1;
9103 else
9104 wrqu->power.disabled = 0;
9105
9106 up(&priv->sem);
9107 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9108
9109 return 0;
9110 }
9111
9112 static int ipw_wx_set_powermode(struct net_device *dev,
9113 struct iw_request_info *info,
9114 union iwreq_data *wrqu, char *extra)
9115 {
9116 struct ipw_priv *priv = ieee80211_priv(dev);
9117 int mode = *(int *)extra;
9118 int err;
9119 down(&priv->sem);
9120 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9121 mode = IPW_POWER_AC;
9122 priv->power_mode = mode;
9123 } else {
9124 priv->power_mode = IPW_POWER_ENABLED | mode;
9125 }
9126
9127 if (priv->power_mode != mode) {
9128 err = ipw_send_power_mode(priv, mode);
9129
9130 if (err) {
9131 IPW_DEBUG_WX("failed setting power mode.\n");
9132 up(&priv->sem);
9133 return err;
9134 }
9135 }
9136 up(&priv->sem);
9137 return 0;
9138 }
9139
9140 #define MAX_WX_STRING 80
9141 static int ipw_wx_get_powermode(struct net_device *dev,
9142 struct iw_request_info *info,
9143 union iwreq_data *wrqu, char *extra)
9144 {
9145 struct ipw_priv *priv = ieee80211_priv(dev);
9146 int level = IPW_POWER_LEVEL(priv->power_mode);
9147 char *p = extra;
9148
9149 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9150
9151 switch (level) {
9152 case IPW_POWER_AC:
9153 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9154 break;
9155 case IPW_POWER_BATTERY:
9156 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9157 break;
9158 default:
9159 p += snprintf(p, MAX_WX_STRING - (p - extra),
9160 "(Timeout %dms, Period %dms)",
9161 timeout_duration[level - 1] / 1000,
9162 period_duration[level - 1] / 1000);
9163 }
9164
9165 if (!(priv->power_mode & IPW_POWER_ENABLED))
9166 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9167
9168 wrqu->data.length = p - extra + 1;
9169
9170 return 0;
9171 }
9172
9173 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9174 struct iw_request_info *info,
9175 union iwreq_data *wrqu, char *extra)
9176 {
9177 struct ipw_priv *priv = ieee80211_priv(dev);
9178 int mode = *(int *)extra;
9179 u8 band = 0, modulation = 0;
9180
9181 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9182 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9183 return -EINVAL;
9184 }
9185 down(&priv->sem);
9186 if (priv->adapter == IPW_2915ABG) {
9187 priv->ieee->abg_true = 1;
9188 if (mode & IEEE_A) {
9189 band |= IEEE80211_52GHZ_BAND;
9190 modulation |= IEEE80211_OFDM_MODULATION;
9191 } else
9192 priv->ieee->abg_true = 0;
9193 } else {
9194 if (mode & IEEE_A) {
9195 IPW_WARNING("Attempt to set 2200BG into "
9196 "802.11a mode\n");
9197 up(&priv->sem);
9198 return -EINVAL;
9199 }
9200
9201 priv->ieee->abg_true = 0;
9202 }
9203
9204 if (mode & IEEE_B) {
9205 band |= IEEE80211_24GHZ_BAND;
9206 modulation |= IEEE80211_CCK_MODULATION;
9207 } else
9208 priv->ieee->abg_true = 0;
9209
9210 if (mode & IEEE_G) {
9211 band |= IEEE80211_24GHZ_BAND;
9212 modulation |= IEEE80211_OFDM_MODULATION;
9213 } else
9214 priv->ieee->abg_true = 0;
9215
9216 priv->ieee->mode = mode;
9217 priv->ieee->freq_band = band;
9218 priv->ieee->modulation = modulation;
9219 init_supported_rates(priv, &priv->rates);
9220
9221 /* Network configuration changed -- force [re]association */
9222 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9223 if (!ipw_disassociate(priv)) {
9224 ipw_send_supported_rates(priv, &priv->rates);
9225 ipw_associate(priv);
9226 }
9227
9228 /* Update the band LEDs */
9229 ipw_led_band_on(priv);
9230
9231 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9232 mode & IEEE_A ? 'a' : '.',
9233 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9234 up(&priv->sem);
9235 return 0;
9236 }
9237
9238 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9239 struct iw_request_info *info,
9240 union iwreq_data *wrqu, char *extra)
9241 {
9242 struct ipw_priv *priv = ieee80211_priv(dev);
9243 down(&priv->sem);
9244 switch (priv->ieee->mode) {
9245 case IEEE_A:
9246 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9247 break;
9248 case IEEE_B:
9249 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9250 break;
9251 case IEEE_A | IEEE_B:
9252 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9253 break;
9254 case IEEE_G:
9255 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9256 break;
9257 case IEEE_A | IEEE_G:
9258 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9259 break;
9260 case IEEE_B | IEEE_G:
9261 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9262 break;
9263 case IEEE_A | IEEE_B | IEEE_G:
9264 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9265 break;
9266 default:
9267 strncpy(extra, "unknown", MAX_WX_STRING);
9268 break;
9269 }
9270
9271 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9272
9273 wrqu->data.length = strlen(extra) + 1;
9274 up(&priv->sem);
9275
9276 return 0;
9277 }
9278
9279 static int ipw_wx_set_preamble(struct net_device *dev,
9280 struct iw_request_info *info,
9281 union iwreq_data *wrqu, char *extra)
9282 {
9283 struct ipw_priv *priv = ieee80211_priv(dev);
9284 int mode = *(int *)extra;
9285 down(&priv->sem);
9286 /* Switching from SHORT -> LONG requires a disassociation */
9287 if (mode == 1) {
9288 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9289 priv->config |= CFG_PREAMBLE_LONG;
9290
9291 /* Network configuration changed -- force [re]association */
9292 IPW_DEBUG_ASSOC
9293 ("[re]association triggered due to preamble change.\n");
9294 if (!ipw_disassociate(priv))
9295 ipw_associate(priv);
9296 }
9297 goto done;
9298 }
9299
9300 if (mode == 0) {
9301 priv->config &= ~CFG_PREAMBLE_LONG;
9302 goto done;
9303 }
9304 up(&priv->sem);
9305 return -EINVAL;
9306
9307 done:
9308 up(&priv->sem);
9309 return 0;
9310 }
9311
9312 static int ipw_wx_get_preamble(struct net_device *dev,
9313 struct iw_request_info *info,
9314 union iwreq_data *wrqu, char *extra)
9315 {
9316 struct ipw_priv *priv = ieee80211_priv(dev);
9317 down(&priv->sem);
9318 if (priv->config & CFG_PREAMBLE_LONG)
9319 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9320 else
9321 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9322 up(&priv->sem);
9323 return 0;
9324 }
9325
9326 #ifdef CONFIG_IPW2200_MONITOR
9327 static int ipw_wx_set_monitor(struct net_device *dev,
9328 struct iw_request_info *info,
9329 union iwreq_data *wrqu, char *extra)
9330 {
9331 struct ipw_priv *priv = ieee80211_priv(dev);
9332 int *parms = (int *)extra;
9333 int enable = (parms[0] > 0);
9334 down(&priv->sem);
9335 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9336 if (enable) {
9337 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9338 #ifdef CONFIG_IEEE80211_RADIOTAP
9339 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9340 #else
9341 priv->net_dev->type = ARPHRD_IEEE80211;
9342 #endif
9343 queue_work(priv->workqueue, &priv->adapter_restart);
9344 }
9345
9346 ipw_set_channel(priv, parms[1]);
9347 } else {
9348 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9349 up(&priv->sem);
9350 return 0;
9351 }
9352 priv->net_dev->type = ARPHRD_ETHER;
9353 queue_work(priv->workqueue, &priv->adapter_restart);
9354 }
9355 up(&priv->sem);
9356 return 0;
9357 }
9358
9359 #endif // CONFIG_IPW2200_MONITOR
9360
9361 static int ipw_wx_reset(struct net_device *dev,
9362 struct iw_request_info *info,
9363 union iwreq_data *wrqu, char *extra)
9364 {
9365 struct ipw_priv *priv = ieee80211_priv(dev);
9366 IPW_DEBUG_WX("RESET\n");
9367 queue_work(priv->workqueue, &priv->adapter_restart);
9368 return 0;
9369 }
9370
9371 static int ipw_wx_sw_reset(struct net_device *dev,
9372 struct iw_request_info *info,
9373 union iwreq_data *wrqu, char *extra)
9374 {
9375 struct ipw_priv *priv = ieee80211_priv(dev);
9376 union iwreq_data wrqu_sec = {
9377 .encoding = {
9378 .flags = IW_ENCODE_DISABLED,
9379 },
9380 };
9381 int ret;
9382
9383 IPW_DEBUG_WX("SW_RESET\n");
9384
9385 down(&priv->sem);
9386
9387 ret = ipw_sw_reset(priv, 0);
9388 if (!ret) {
9389 free_firmware();
9390 ipw_adapter_restart(priv);
9391 }
9392
9393 /* The SW reset bit might have been toggled on by the 'disable'
9394 * module parameter, so take appropriate action */
9395 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9396
9397 up(&priv->sem);
9398 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9399 down(&priv->sem);
9400
9401 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9402 /* Configuration likely changed -- force [re]association */
9403 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9404 "reset.\n");
9405 if (!ipw_disassociate(priv))
9406 ipw_associate(priv);
9407 }
9408
9409 up(&priv->sem);
9410
9411 return 0;
9412 }
9413
9414 /* Rebase the WE IOCTLs to zero for the handler array */
9415 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9416 static iw_handler ipw_wx_handlers[] = {
9417 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9418 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9419 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9420 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9421 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9422 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9423 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9424 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9425 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9426 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9427 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9428 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9429 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9430 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9431 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9432 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9433 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9434 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9435 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9436 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9437 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9438 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9439 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9440 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9441 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9442 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9443 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9444 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9445 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9446 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9447 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9448 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9449 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9450 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9451 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9452 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9453 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9454 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9455 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9456 };
9457
9458 enum {
9459 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9460 IPW_PRIV_GET_POWER,
9461 IPW_PRIV_SET_MODE,
9462 IPW_PRIV_GET_MODE,
9463 IPW_PRIV_SET_PREAMBLE,
9464 IPW_PRIV_GET_PREAMBLE,
9465 IPW_PRIV_RESET,
9466 IPW_PRIV_SW_RESET,
9467 #ifdef CONFIG_IPW2200_MONITOR
9468 IPW_PRIV_SET_MONITOR,
9469 #endif
9470 };
9471
9472 static struct iw_priv_args ipw_priv_args[] = {
9473 {
9474 .cmd = IPW_PRIV_SET_POWER,
9475 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9476 .name = "set_power"},
9477 {
9478 .cmd = IPW_PRIV_GET_POWER,
9479 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9480 .name = "get_power"},
9481 {
9482 .cmd = IPW_PRIV_SET_MODE,
9483 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9484 .name = "set_mode"},
9485 {
9486 .cmd = IPW_PRIV_GET_MODE,
9487 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9488 .name = "get_mode"},
9489 {
9490 .cmd = IPW_PRIV_SET_PREAMBLE,
9491 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9492 .name = "set_preamble"},
9493 {
9494 .cmd = IPW_PRIV_GET_PREAMBLE,
9495 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9496 .name = "get_preamble"},
9497 {
9498 IPW_PRIV_RESET,
9499 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9500 {
9501 IPW_PRIV_SW_RESET,
9502 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9503 #ifdef CONFIG_IPW2200_MONITOR
9504 {
9505 IPW_PRIV_SET_MONITOR,
9506 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9507 #endif /* CONFIG_IPW2200_MONITOR */
9508 };
9509
9510 static iw_handler ipw_priv_handler[] = {
9511 ipw_wx_set_powermode,
9512 ipw_wx_get_powermode,
9513 ipw_wx_set_wireless_mode,
9514 ipw_wx_get_wireless_mode,
9515 ipw_wx_set_preamble,
9516 ipw_wx_get_preamble,
9517 ipw_wx_reset,
9518 ipw_wx_sw_reset,
9519 #ifdef CONFIG_IPW2200_MONITOR
9520 ipw_wx_set_monitor,
9521 #endif
9522 };
9523
9524 static struct iw_handler_def ipw_wx_handler_def = {
9525 .standard = ipw_wx_handlers,
9526 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9527 .num_private = ARRAY_SIZE(ipw_priv_handler),
9528 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9529 .private = ipw_priv_handler,
9530 .private_args = ipw_priv_args,
9531 .get_wireless_stats = ipw_get_wireless_stats,
9532 };
9533
9534 /*
9535 * Get wireless statistics.
9536 * Called by /proc/net/wireless
9537 * Also called by SIOCGIWSTATS
9538 */
9539 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
9540 {
9541 struct ipw_priv *priv = ieee80211_priv(dev);
9542 struct iw_statistics *wstats;
9543
9544 wstats = &priv->wstats;
9545
9546 /* if hw is disabled, then ipw_get_ordinal() can't be called.
9547 * netdev->get_wireless_stats seems to be called before fw is
9548 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
9549 * and associated; if not associcated, the values are all meaningless
9550 * anyway, so set them all to NULL and INVALID */
9551 if (!(priv->status & STATUS_ASSOCIATED)) {
9552 wstats->miss.beacon = 0;
9553 wstats->discard.retries = 0;
9554 wstats->qual.qual = 0;
9555 wstats->qual.level = 0;
9556 wstats->qual.noise = 0;
9557 wstats->qual.updated = 7;
9558 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
9559 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
9560 return wstats;
9561 }
9562
9563 wstats->qual.qual = priv->quality;
9564 wstats->qual.level = average_value(&priv->average_rssi);
9565 wstats->qual.noise = average_value(&priv->average_noise);
9566 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
9567 IW_QUAL_NOISE_UPDATED;
9568
9569 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
9570 wstats->discard.retries = priv->last_tx_failures;
9571 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
9572
9573 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
9574 goto fail_get_ordinal;
9575 wstats->discard.retries += tx_retry; */
9576
9577 return wstats;
9578 }
9579
9580 /* net device stuff */
9581
9582 static void init_sys_config(struct ipw_sys_config *sys_config)
9583 {
9584 memset(sys_config, 0, sizeof(struct ipw_sys_config));
9585 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
9586 sys_config->answer_broadcast_ssid_probe = 0;
9587 sys_config->accept_all_data_frames = 0;
9588 sys_config->accept_non_directed_frames = 1;
9589 sys_config->exclude_unicast_unencrypted = 0;
9590 sys_config->disable_unicast_decryption = 1;
9591 sys_config->exclude_multicast_unencrypted = 0;
9592 sys_config->disable_multicast_decryption = 1;
9593 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
9594 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
9595 sys_config->dot11g_auto_detection = 0;
9596 sys_config->enable_cts_to_self = 0;
9597 sys_config->bt_coexist_collision_thr = 0;
9598 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256
9599 }
9600
9601 static int ipw_net_open(struct net_device *dev)
9602 {
9603 struct ipw_priv *priv = ieee80211_priv(dev);
9604 IPW_DEBUG_INFO("dev->open\n");
9605 /* we should be verifying the device is ready to be opened */
9606 down(&priv->sem);
9607 if (!(priv->status & STATUS_RF_KILL_MASK) &&
9608 (priv->status & STATUS_ASSOCIATED))
9609 netif_start_queue(dev);
9610 up(&priv->sem);
9611 return 0;
9612 }
9613
9614 static int ipw_net_stop(struct net_device *dev)
9615 {
9616 IPW_DEBUG_INFO("dev->close\n");
9617 netif_stop_queue(dev);
9618 return 0;
9619 }
9620
9621 /*
9622 todo:
9623
9624 modify to send one tfd per fragment instead of using chunking. otherwise
9625 we need to heavily modify the ieee80211_skb_to_txb.
9626 */
9627
9628 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
9629 int pri)
9630 {
9631 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
9632 txb->fragments[0]->data;
9633 int i = 0;
9634 struct tfd_frame *tfd;
9635 #ifdef CONFIG_IPW_QOS
9636 int tx_id = ipw_get_tx_queue_number(priv, pri);
9637 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9638 #else
9639 struct clx2_tx_queue *txq = &priv->txq[0];
9640 #endif
9641 struct clx2_queue *q = &txq->q;
9642 u8 id, hdr_len, unicast;
9643 u16 remaining_bytes;
9644 int fc;
9645
9646 /* If there isn't room in the queue, we return busy and let the
9647 * network stack requeue the packet for us */
9648 if (ipw_queue_space(q) < q->high_mark)
9649 return NETDEV_TX_BUSY;
9650
9651 switch (priv->ieee->iw_mode) {
9652 case IW_MODE_ADHOC:
9653 hdr_len = IEEE80211_3ADDR_LEN;
9654 unicast = !is_multicast_ether_addr(hdr->addr1);
9655 id = ipw_find_station(priv, hdr->addr1);
9656 if (id == IPW_INVALID_STATION) {
9657 id = ipw_add_station(priv, hdr->addr1);
9658 if (id == IPW_INVALID_STATION) {
9659 IPW_WARNING("Attempt to send data to "
9660 "invalid cell: " MAC_FMT "\n",
9661 MAC_ARG(hdr->addr1));
9662 goto drop;
9663 }
9664 }
9665 break;
9666
9667 case IW_MODE_INFRA:
9668 default:
9669 unicast = !is_multicast_ether_addr(hdr->addr3);
9670 hdr_len = IEEE80211_3ADDR_LEN;
9671 id = 0;
9672 break;
9673 }
9674
9675 tfd = &txq->bd[q->first_empty];
9676 txq->txb[q->first_empty] = txb;
9677 memset(tfd, 0, sizeof(*tfd));
9678 tfd->u.data.station_number = id;
9679
9680 tfd->control_flags.message_type = TX_FRAME_TYPE;
9681 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
9682
9683 tfd->u.data.cmd_id = DINO_CMD_TX;
9684 tfd->u.data.len = cpu_to_le16(txb->payload_size);
9685 remaining_bytes = txb->payload_size;
9686
9687 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
9688 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
9689 else
9690 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
9691
9692 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
9693 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
9694
9695 fc = le16_to_cpu(hdr->frame_ctl);
9696 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
9697
9698 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
9699
9700 if (likely(unicast))
9701 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9702
9703 if (txb->encrypted && !priv->ieee->host_encrypt) {
9704 switch (priv->ieee->sec.level) {
9705 case SEC_LEVEL_3:
9706 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9707 IEEE80211_FCTL_PROTECTED;
9708 /* XXX: ACK flag must be set for CCMP even if it
9709 * is a multicast/broadcast packet, because CCMP
9710 * group communication encrypted by GTK is
9711 * actually done by the AP. */
9712 if (!unicast)
9713 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
9714
9715 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9716 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
9717 tfd->u.data.key_index = 0;
9718 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
9719 break;
9720 case SEC_LEVEL_2:
9721 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9722 IEEE80211_FCTL_PROTECTED;
9723 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
9724 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
9725 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
9726 break;
9727 case SEC_LEVEL_1:
9728 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
9729 IEEE80211_FCTL_PROTECTED;
9730 tfd->u.data.key_index = priv->ieee->tx_keyidx;
9731 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
9732 40)
9733 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
9734 else
9735 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
9736 break;
9737 case SEC_LEVEL_0:
9738 break;
9739 default:
9740 printk(KERN_ERR "Unknow security level %d\n",
9741 priv->ieee->sec.level);
9742 break;
9743 }
9744 } else
9745 /* No hardware encryption */
9746 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
9747
9748 #ifdef CONFIG_IPW_QOS
9749 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data), unicast);
9750 #endif /* CONFIG_IPW_QOS */
9751
9752 /* payload */
9753 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
9754 txb->nr_frags));
9755 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
9756 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
9757 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
9758 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
9759 i, le32_to_cpu(tfd->u.data.num_chunks),
9760 txb->fragments[i]->len - hdr_len);
9761 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
9762 i, tfd->u.data.num_chunks,
9763 txb->fragments[i]->len - hdr_len);
9764 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
9765 txb->fragments[i]->len - hdr_len);
9766
9767 tfd->u.data.chunk_ptr[i] =
9768 cpu_to_le32(pci_map_single
9769 (priv->pci_dev,
9770 txb->fragments[i]->data + hdr_len,
9771 txb->fragments[i]->len - hdr_len,
9772 PCI_DMA_TODEVICE));
9773 tfd->u.data.chunk_len[i] =
9774 cpu_to_le16(txb->fragments[i]->len - hdr_len);
9775 }
9776
9777 if (i != txb->nr_frags) {
9778 struct sk_buff *skb;
9779 u16 remaining_bytes = 0;
9780 int j;
9781
9782 for (j = i; j < txb->nr_frags; j++)
9783 remaining_bytes += txb->fragments[j]->len - hdr_len;
9784
9785 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
9786 remaining_bytes);
9787 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
9788 if (skb != NULL) {
9789 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
9790 for (j = i; j < txb->nr_frags; j++) {
9791 int size = txb->fragments[j]->len - hdr_len;
9792
9793 printk(KERN_INFO "Adding frag %d %d...\n",
9794 j, size);
9795 memcpy(skb_put(skb, size),
9796 txb->fragments[j]->data + hdr_len, size);
9797 }
9798 dev_kfree_skb_any(txb->fragments[i]);
9799 txb->fragments[i] = skb;
9800 tfd->u.data.chunk_ptr[i] =
9801 cpu_to_le32(pci_map_single
9802 (priv->pci_dev, skb->data,
9803 tfd->u.data.chunk_len[i],
9804 PCI_DMA_TODEVICE));
9805
9806 tfd->u.data.num_chunks =
9807 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
9808 1);
9809 }
9810 }
9811
9812 /* kick DMA */
9813 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
9814 ipw_write32(priv, q->reg_w, q->first_empty);
9815
9816 return NETDEV_TX_OK;
9817
9818 drop:
9819 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
9820 ieee80211_txb_free(txb);
9821 return NETDEV_TX_OK;
9822 }
9823
9824 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
9825 {
9826 struct ipw_priv *priv = ieee80211_priv(dev);
9827 #ifdef CONFIG_IPW_QOS
9828 int tx_id = ipw_get_tx_queue_number(priv, pri);
9829 struct clx2_tx_queue *txq = &priv->txq[tx_id];
9830 #else
9831 struct clx2_tx_queue *txq = &priv->txq[0];
9832 #endif /* CONFIG_IPW_QOS */
9833
9834 if (ipw_queue_space(&txq->q) < txq->q.high_mark)
9835 return 1;
9836
9837 return 0;
9838 }
9839
9840 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
9841 struct net_device *dev, int pri)
9842 {
9843 struct ipw_priv *priv = ieee80211_priv(dev);
9844 unsigned long flags;
9845 int ret;
9846
9847 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
9848 spin_lock_irqsave(&priv->lock, flags);
9849
9850 if (!(priv->status & STATUS_ASSOCIATED)) {
9851 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
9852 priv->ieee->stats.tx_carrier_errors++;
9853 netif_stop_queue(dev);
9854 goto fail_unlock;
9855 }
9856
9857 ret = ipw_tx_skb(priv, txb, pri);
9858 if (ret == NETDEV_TX_OK)
9859 __ipw_led_activity_on(priv);
9860 spin_unlock_irqrestore(&priv->lock, flags);
9861
9862 return ret;
9863
9864 fail_unlock:
9865 spin_unlock_irqrestore(&priv->lock, flags);
9866 return 1;
9867 }
9868
9869 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
9870 {
9871 struct ipw_priv *priv = ieee80211_priv(dev);
9872
9873 priv->ieee->stats.tx_packets = priv->tx_packets;
9874 priv->ieee->stats.rx_packets = priv->rx_packets;
9875 return &priv->ieee->stats;
9876 }
9877
9878 static void ipw_net_set_multicast_list(struct net_device *dev)
9879 {
9880
9881 }
9882
9883 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
9884 {
9885 struct ipw_priv *priv = ieee80211_priv(dev);
9886 struct sockaddr *addr = p;
9887 if (!is_valid_ether_addr(addr->sa_data))
9888 return -EADDRNOTAVAIL;
9889 down(&priv->sem);
9890 priv->config |= CFG_CUSTOM_MAC;
9891 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
9892 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
9893 priv->net_dev->name, MAC_ARG(priv->mac_addr));
9894 queue_work(priv->workqueue, &priv->adapter_restart);
9895 up(&priv->sem);
9896 return 0;
9897 }
9898
9899 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
9900 struct ethtool_drvinfo *info)
9901 {
9902 struct ipw_priv *p = ieee80211_priv(dev);
9903 char vers[64];
9904 char date[32];
9905 u32 len;
9906
9907 strcpy(info->driver, DRV_NAME);
9908 strcpy(info->version, DRV_VERSION);
9909
9910 len = sizeof(vers);
9911 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
9912 len = sizeof(date);
9913 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
9914
9915 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
9916 vers, date);
9917 strcpy(info->bus_info, pci_name(p->pci_dev));
9918 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
9919 }
9920
9921 static u32 ipw_ethtool_get_link(struct net_device *dev)
9922 {
9923 struct ipw_priv *priv = ieee80211_priv(dev);
9924 return (priv->status & STATUS_ASSOCIATED) != 0;
9925 }
9926
9927 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
9928 {
9929 return IPW_EEPROM_IMAGE_SIZE;
9930 }
9931
9932 static int ipw_ethtool_get_eeprom(struct net_device *dev,
9933 struct ethtool_eeprom *eeprom, u8 * bytes)
9934 {
9935 struct ipw_priv *p = ieee80211_priv(dev);
9936
9937 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9938 return -EINVAL;
9939 down(&p->sem);
9940 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
9941 up(&p->sem);
9942 return 0;
9943 }
9944
9945 static int ipw_ethtool_set_eeprom(struct net_device *dev,
9946 struct ethtool_eeprom *eeprom, u8 * bytes)
9947 {
9948 struct ipw_priv *p = ieee80211_priv(dev);
9949 int i;
9950
9951 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
9952 return -EINVAL;
9953 down(&p->sem);
9954 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
9955 for (i = IPW_EEPROM_DATA;
9956 i < IPW_EEPROM_DATA + IPW_EEPROM_IMAGE_SIZE; i++)
9957 ipw_write8(p, i, p->eeprom[i]);
9958 up(&p->sem);
9959 return 0;
9960 }
9961
9962 static struct ethtool_ops ipw_ethtool_ops = {
9963 .get_link = ipw_ethtool_get_link,
9964 .get_drvinfo = ipw_ethtool_get_drvinfo,
9965 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
9966 .get_eeprom = ipw_ethtool_get_eeprom,
9967 .set_eeprom = ipw_ethtool_set_eeprom,
9968 };
9969
9970 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
9971 {
9972 struct ipw_priv *priv = data;
9973 u32 inta, inta_mask;
9974
9975 if (!priv)
9976 return IRQ_NONE;
9977
9978 spin_lock(&priv->lock);
9979
9980 if (!(priv->status & STATUS_INT_ENABLED)) {
9981 /* Shared IRQ */
9982 goto none;
9983 }
9984
9985 inta = ipw_read32(priv, IPW_INTA_RW);
9986 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
9987
9988 if (inta == 0xFFFFFFFF) {
9989 /* Hardware disappeared */
9990 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
9991 goto none;
9992 }
9993
9994 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
9995 /* Shared interrupt */
9996 goto none;
9997 }
9998
9999 /* tell the device to stop sending interrupts */
10000 ipw_disable_interrupts(priv);
10001
10002 /* ack current interrupts */
10003 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10004 ipw_write32(priv, IPW_INTA_RW, inta);
10005
10006 /* Cache INTA value for our tasklet */
10007 priv->isr_inta = inta;
10008
10009 tasklet_schedule(&priv->irq_tasklet);
10010
10011 spin_unlock(&priv->lock);
10012
10013 return IRQ_HANDLED;
10014 none:
10015 spin_unlock(&priv->lock);
10016 return IRQ_NONE;
10017 }
10018
10019 static void ipw_rf_kill(void *adapter)
10020 {
10021 struct ipw_priv *priv = adapter;
10022 unsigned long flags;
10023
10024 spin_lock_irqsave(&priv->lock, flags);
10025
10026 if (rf_kill_active(priv)) {
10027 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10028 if (priv->workqueue)
10029 queue_delayed_work(priv->workqueue,
10030 &priv->rf_kill, 2 * HZ);
10031 goto exit_unlock;
10032 }
10033
10034 /* RF Kill is now disabled, so bring the device back up */
10035
10036 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10037 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10038 "device\n");
10039
10040 /* we can not do an adapter restart while inside an irq lock */
10041 queue_work(priv->workqueue, &priv->adapter_restart);
10042 } else
10043 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10044 "enabled\n");
10045
10046 exit_unlock:
10047 spin_unlock_irqrestore(&priv->lock, flags);
10048 }
10049
10050 static void ipw_bg_rf_kill(void *data)
10051 {
10052 struct ipw_priv *priv = data;
10053 down(&priv->sem);
10054 ipw_rf_kill(data);
10055 up(&priv->sem);
10056 }
10057
10058 void ipw_link_up(struct ipw_priv *priv)
10059 {
10060 priv->last_seq_num = -1;
10061 priv->last_frag_num = -1;
10062 priv->last_packet_time = 0;
10063
10064 netif_carrier_on(priv->net_dev);
10065 if (netif_queue_stopped(priv->net_dev)) {
10066 IPW_DEBUG_NOTIF("waking queue\n");
10067 netif_wake_queue(priv->net_dev);
10068 } else {
10069 IPW_DEBUG_NOTIF("starting queue\n");
10070 netif_start_queue(priv->net_dev);
10071 }
10072
10073 cancel_delayed_work(&priv->request_scan);
10074 ipw_reset_stats(priv);
10075 /* Ensure the rate is updated immediately */
10076 priv->last_rate = ipw_get_current_rate(priv);
10077 ipw_gather_stats(priv);
10078 ipw_led_link_up(priv);
10079 notify_wx_assoc_event(priv);
10080
10081 if (priv->config & CFG_BACKGROUND_SCAN)
10082 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10083 }
10084
10085 static void ipw_bg_link_up(void *data)
10086 {
10087 struct ipw_priv *priv = data;
10088 down(&priv->sem);
10089 ipw_link_up(data);
10090 up(&priv->sem);
10091 }
10092
10093 void ipw_link_down(struct ipw_priv *priv)
10094 {
10095 ipw_led_link_down(priv);
10096 netif_carrier_off(priv->net_dev);
10097 netif_stop_queue(priv->net_dev);
10098 notify_wx_assoc_event(priv);
10099
10100 /* Cancel any queued work ... */
10101 cancel_delayed_work(&priv->request_scan);
10102 cancel_delayed_work(&priv->adhoc_check);
10103 cancel_delayed_work(&priv->gather_stats);
10104
10105 ipw_reset_stats(priv);
10106
10107 if (!(priv->status & STATUS_EXIT_PENDING)) {
10108 /* Queue up another scan... */
10109 queue_work(priv->workqueue, &priv->request_scan);
10110 }
10111 }
10112
10113 static void ipw_bg_link_down(void *data)
10114 {
10115 struct ipw_priv *priv = data;
10116 down(&priv->sem);
10117 ipw_link_down(data);
10118 up(&priv->sem);
10119 }
10120
10121 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10122 {
10123 int ret = 0;
10124
10125 priv->workqueue = create_workqueue(DRV_NAME);
10126 init_waitqueue_head(&priv->wait_command_queue);
10127 init_waitqueue_head(&priv->wait_state);
10128
10129 INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
10130 INIT_WORK(&priv->associate, ipw_bg_associate, priv);
10131 INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
10132 INIT_WORK(&priv->system_config, ipw_system_config, priv);
10133 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
10134 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
10135 INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
10136 INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
10137 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10138 INIT_WORK(&priv->request_scan,
10139 (void (*)(void *))ipw_request_scan, priv);
10140 INIT_WORK(&priv->gather_stats,
10141 (void (*)(void *))ipw_bg_gather_stats, priv);
10142 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
10143 INIT_WORK(&priv->roam, ipw_bg_roam, priv);
10144 INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
10145 INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
10146 INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
10147 INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
10148 priv);
10149 INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
10150 priv);
10151 INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
10152 priv);
10153 INIT_WORK(&priv->merge_networks,
10154 (void (*)(void *))ipw_merge_adhoc_network, priv);
10155
10156 #ifdef CONFIG_IPW_QOS
10157 INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
10158 priv);
10159 #endif /* CONFIG_IPW_QOS */
10160
10161 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10162 ipw_irq_tasklet, (unsigned long)priv);
10163
10164 return ret;
10165 }
10166
10167 static void shim__set_security(struct net_device *dev,
10168 struct ieee80211_security *sec)
10169 {
10170 struct ipw_priv *priv = ieee80211_priv(dev);
10171 int i;
10172 for (i = 0; i < 4; i++) {
10173 if (sec->flags & (1 << i)) {
10174 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10175 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10176 if (sec->key_sizes[i] == 0)
10177 priv->ieee->sec.flags &= ~(1 << i);
10178 else {
10179 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10180 sec->key_sizes[i]);
10181 priv->ieee->sec.flags |= (1 << i);
10182 }
10183 priv->status |= STATUS_SECURITY_UPDATED;
10184 } else if (sec->level != SEC_LEVEL_1)
10185 priv->ieee->sec.flags &= ~(1 << i);
10186 }
10187
10188 if (sec->flags & SEC_ACTIVE_KEY) {
10189 if (sec->active_key <= 3) {
10190 priv->ieee->sec.active_key = sec->active_key;
10191 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10192 } else
10193 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10194 priv->status |= STATUS_SECURITY_UPDATED;
10195 } else
10196 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10197
10198 if ((sec->flags & SEC_AUTH_MODE) &&
10199 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10200 priv->ieee->sec.auth_mode = sec->auth_mode;
10201 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10202 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10203 priv->capability |= CAP_SHARED_KEY;
10204 else
10205 priv->capability &= ~CAP_SHARED_KEY;
10206 priv->status |= STATUS_SECURITY_UPDATED;
10207 }
10208
10209 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10210 priv->ieee->sec.flags |= SEC_ENABLED;
10211 priv->ieee->sec.enabled = sec->enabled;
10212 priv->status |= STATUS_SECURITY_UPDATED;
10213 if (sec->enabled)
10214 priv->capability |= CAP_PRIVACY_ON;
10215 else
10216 priv->capability &= ~CAP_PRIVACY_ON;
10217 }
10218
10219 if (sec->flags & SEC_ENCRYPT)
10220 priv->ieee->sec.encrypt = sec->encrypt;
10221
10222 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10223 priv->ieee->sec.level = sec->level;
10224 priv->ieee->sec.flags |= SEC_LEVEL;
10225 priv->status |= STATUS_SECURITY_UPDATED;
10226 }
10227
10228 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10229 ipw_set_hwcrypto_keys(priv);
10230
10231 /* To match current functionality of ipw2100 (which works well w/
10232 * various supplicants, we don't force a disassociate if the
10233 * privacy capability changes ... */
10234 #if 0
10235 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10236 (((priv->assoc_request.capability &
10237 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
10238 (!(priv->assoc_request.capability &
10239 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
10240 IPW_DEBUG_ASSOC("Disassociating due to capability "
10241 "change.\n");
10242 ipw_disassociate(priv);
10243 }
10244 #endif
10245 }
10246
10247 static int init_supported_rates(struct ipw_priv *priv,
10248 struct ipw_supported_rates *rates)
10249 {
10250 /* TODO: Mask out rates based on priv->rates_mask */
10251
10252 memset(rates, 0, sizeof(*rates));
10253 /* configure supported rates */
10254 switch (priv->ieee->freq_band) {
10255 case IEEE80211_52GHZ_BAND:
10256 rates->ieee_mode = IPW_A_MODE;
10257 rates->purpose = IPW_RATE_CAPABILITIES;
10258 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10259 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10260 break;
10261
10262 default: /* Mixed or 2.4Ghz */
10263 rates->ieee_mode = IPW_G_MODE;
10264 rates->purpose = IPW_RATE_CAPABILITIES;
10265 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10266 IEEE80211_CCK_DEFAULT_RATES_MASK);
10267 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10268 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10269 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10270 }
10271 break;
10272 }
10273
10274 return 0;
10275 }
10276
10277 static int ipw_config(struct ipw_priv *priv)
10278 {
10279 /* This is only called from ipw_up, which resets/reloads the firmware
10280 so, we don't need to first disable the card before we configure
10281 it */
10282 if (ipw_set_tx_power(priv))
10283 goto error;
10284
10285 /* initialize adapter address */
10286 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10287 goto error;
10288
10289 /* set basic system config settings */
10290 init_sys_config(&priv->sys_config);
10291 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10292 priv->sys_config.answer_broadcast_ssid_probe = 1;
10293 else
10294 priv->sys_config.answer_broadcast_ssid_probe = 0;
10295
10296 if (ipw_send_system_config(priv, &priv->sys_config))
10297 goto error;
10298
10299 init_supported_rates(priv, &priv->rates);
10300 if (ipw_send_supported_rates(priv, &priv->rates))
10301 goto error;
10302
10303 /* Set request-to-send threshold */
10304 if (priv->rts_threshold) {
10305 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10306 goto error;
10307 }
10308 #ifdef CONFIG_IPW_QOS
10309 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10310 ipw_qos_activate(priv, NULL);
10311 #endif /* CONFIG_IPW_QOS */
10312
10313 if (ipw_set_random_seed(priv))
10314 goto error;
10315
10316 /* final state transition to the RUN state */
10317 if (ipw_send_host_complete(priv))
10318 goto error;
10319
10320 priv->status |= STATUS_INIT;
10321
10322 ipw_led_init(priv);
10323 ipw_led_radio_on(priv);
10324 priv->notif_missed_beacons = 0;
10325
10326 /* Set hardware WEP key if it is configured. */
10327 if ((priv->capability & CAP_PRIVACY_ON) &&
10328 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10329 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10330 ipw_set_hwcrypto_keys(priv);
10331
10332 return 0;
10333
10334 error:
10335 return -EIO;
10336 }
10337
10338 /*
10339 * NOTE:
10340 *
10341 * These tables have been tested in conjunction with the
10342 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10343 *
10344 * Altering this values, using it on other hardware, or in geographies
10345 * not intended for resale of the above mentioned Intel adapters has
10346 * not been tested.
10347 *
10348 */
10349 static const struct ieee80211_geo ipw_geos[] = {
10350 { /* Restricted */
10351 "---",
10352 .bg_channels = 11,
10353 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10354 {2427, 4}, {2432, 5}, {2437, 6},
10355 {2442, 7}, {2447, 8}, {2452, 9},
10356 {2457, 10}, {2462, 11}},
10357 },
10358
10359 { /* Custom US/Canada */
10360 "ZZF",
10361 .bg_channels = 11,
10362 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10363 {2427, 4}, {2432, 5}, {2437, 6},
10364 {2442, 7}, {2447, 8}, {2452, 9},
10365 {2457, 10}, {2462, 11}},
10366 .a_channels = 8,
10367 .a = {{5180, 36},
10368 {5200, 40},
10369 {5220, 44},
10370 {5240, 48},
10371 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10372 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10373 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10374 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10375 },
10376
10377 { /* Rest of World */
10378 "ZZD",
10379 .bg_channels = 13,
10380 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10381 {2427, 4}, {2432, 5}, {2437, 6},
10382 {2442, 7}, {2447, 8}, {2452, 9},
10383 {2457, 10}, {2462, 11}, {2467, 12},
10384 {2472, 13}},
10385 },
10386
10387 { /* Custom USA & Europe & High */
10388 "ZZA",
10389 .bg_channels = 11,
10390 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10391 {2427, 4}, {2432, 5}, {2437, 6},
10392 {2442, 7}, {2447, 8}, {2452, 9},
10393 {2457, 10}, {2462, 11}},
10394 .a_channels = 13,
10395 .a = {{5180, 36},
10396 {5200, 40},
10397 {5220, 44},
10398 {5240, 48},
10399 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10400 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10401 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10402 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10403 {5745, 149},
10404 {5765, 153},
10405 {5785, 157},
10406 {5805, 161},
10407 {5825, 165}},
10408 },
10409
10410 { /* Custom NA & Europe */
10411 "ZZB",
10412 .bg_channels = 11,
10413 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10414 {2427, 4}, {2432, 5}, {2437, 6},
10415 {2442, 7}, {2447, 8}, {2452, 9},
10416 {2457, 10}, {2462, 11}},
10417 .a_channels = 13,
10418 .a = {{5180, 36},
10419 {5200, 40},
10420 {5220, 44},
10421 {5240, 48},
10422 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10423 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10424 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10425 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10426 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10427 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10428 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10429 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10430 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10431 },
10432
10433 { /* Custom Japan */
10434 "ZZC",
10435 .bg_channels = 11,
10436 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10437 {2427, 4}, {2432, 5}, {2437, 6},
10438 {2442, 7}, {2447, 8}, {2452, 9},
10439 {2457, 10}, {2462, 11}},
10440 .a_channels = 4,
10441 .a = {{5170, 34}, {5190, 38},
10442 {5210, 42}, {5230, 46}},
10443 },
10444
10445 { /* Custom */
10446 "ZZM",
10447 .bg_channels = 11,
10448 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10449 {2427, 4}, {2432, 5}, {2437, 6},
10450 {2442, 7}, {2447, 8}, {2452, 9},
10451 {2457, 10}, {2462, 11}},
10452 },
10453
10454 { /* Europe */
10455 "ZZE",
10456 .bg_channels = 13,
10457 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10458 {2427, 4}, {2432, 5}, {2437, 6},
10459 {2442, 7}, {2447, 8}, {2452, 9},
10460 {2457, 10}, {2462, 11}, {2467, 12},
10461 {2472, 13}},
10462 .a_channels = 19,
10463 .a = {{5180, 36},
10464 {5200, 40},
10465 {5220, 44},
10466 {5240, 48},
10467 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10468 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10469 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10470 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10471 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10472 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10473 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10474 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10475 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10476 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10477 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10478 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10479 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10480 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10481 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
10482 },
10483
10484 { /* Custom Japan */
10485 "ZZJ",
10486 .bg_channels = 14,
10487 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10488 {2427, 4}, {2432, 5}, {2437, 6},
10489 {2442, 7}, {2447, 8}, {2452, 9},
10490 {2457, 10}, {2462, 11}, {2467, 12},
10491 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
10492 .a_channels = 4,
10493 .a = {{5170, 34}, {5190, 38},
10494 {5210, 42}, {5230, 46}},
10495 },
10496
10497 { /* Rest of World */
10498 "ZZR",
10499 .bg_channels = 14,
10500 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10501 {2427, 4}, {2432, 5}, {2437, 6},
10502 {2442, 7}, {2447, 8}, {2452, 9},
10503 {2457, 10}, {2462, 11}, {2467, 12},
10504 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
10505 IEEE80211_CH_PASSIVE_ONLY}},
10506 },
10507
10508 { /* High Band */
10509 "ZZH",
10510 .bg_channels = 13,
10511 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10512 {2427, 4}, {2432, 5}, {2437, 6},
10513 {2442, 7}, {2447, 8}, {2452, 9},
10514 {2457, 10}, {2462, 11},
10515 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10516 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10517 .a_channels = 4,
10518 .a = {{5745, 149}, {5765, 153},
10519 {5785, 157}, {5805, 161}},
10520 },
10521
10522 { /* Custom Europe */
10523 "ZZG",
10524 .bg_channels = 13,
10525 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10526 {2427, 4}, {2432, 5}, {2437, 6},
10527 {2442, 7}, {2447, 8}, {2452, 9},
10528 {2457, 10}, {2462, 11},
10529 {2467, 12}, {2472, 13}},
10530 .a_channels = 4,
10531 .a = {{5180, 36}, {5200, 40},
10532 {5220, 44}, {5240, 48}},
10533 },
10534
10535 { /* Europe */
10536 "ZZK",
10537 .bg_channels = 13,
10538 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10539 {2427, 4}, {2432, 5}, {2437, 6},
10540 {2442, 7}, {2447, 8}, {2452, 9},
10541 {2457, 10}, {2462, 11},
10542 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
10543 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
10544 .a_channels = 24,
10545 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10546 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10547 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10548 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10549 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10550 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10551 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10552 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10553 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
10554 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
10555 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
10556 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
10557 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
10558 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
10559 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
10560 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
10561 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
10562 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
10563 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
10564 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10565 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10566 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10567 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10568 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10569 },
10570
10571 { /* Europe */
10572 "ZZL",
10573 .bg_channels = 11,
10574 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10575 {2427, 4}, {2432, 5}, {2437, 6},
10576 {2442, 7}, {2447, 8}, {2452, 9},
10577 {2457, 10}, {2462, 11}},
10578 .a_channels = 13,
10579 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
10580 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
10581 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
10582 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
10583 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10584 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10585 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10586 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10587 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10588 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10589 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
10590 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
10591 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
10592 }
10593 };
10594
10595 /* GEO code borrowed from ieee80211_geo.c */
10596 static int ipw_is_valid_channel(struct ieee80211_device *ieee, u8 channel)
10597 {
10598 int i;
10599
10600 /* Driver needs to initialize the geography map before using
10601 * these helper functions */
10602 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10603
10604 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10605 for (i = 0; i < ieee->geo.bg_channels; i++)
10606 /* NOTE: If G mode is currently supported but
10607 * this is a B only channel, we don't see it
10608 * as valid. */
10609 if ((ieee->geo.bg[i].channel == channel) &&
10610 (!(ieee->mode & IEEE_G) ||
10611 !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY)))
10612 return IEEE80211_24GHZ_BAND;
10613
10614 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10615 for (i = 0; i < ieee->geo.a_channels; i++)
10616 if (ieee->geo.a[i].channel == channel)
10617 return IEEE80211_52GHZ_BAND;
10618
10619 return 0;
10620 }
10621
10622 static int ipw_channel_to_index(struct ieee80211_device *ieee, u8 channel)
10623 {
10624 int i;
10625
10626 /* Driver needs to initialize the geography map before using
10627 * these helper functions */
10628 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10629
10630 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10631 for (i = 0; i < ieee->geo.bg_channels; i++)
10632 if (ieee->geo.bg[i].channel == channel)
10633 return i;
10634
10635 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10636 for (i = 0; i < ieee->geo.a_channels; i++)
10637 if (ieee->geo.a[i].channel == channel)
10638 return i;
10639
10640 return -1;
10641 }
10642
10643 static u8 ipw_freq_to_channel(struct ieee80211_device *ieee, u32 freq)
10644 {
10645 int i;
10646
10647 /* Driver needs to initialize the geography map before using
10648 * these helper functions */
10649 BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0);
10650
10651 freq /= 100000;
10652
10653 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
10654 for (i = 0; i < ieee->geo.bg_channels; i++)
10655 if (ieee->geo.bg[i].freq == freq)
10656 return ieee->geo.bg[i].channel;
10657
10658 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
10659 for (i = 0; i < ieee->geo.a_channels; i++)
10660 if (ieee->geo.a[i].freq == freq)
10661 return ieee->geo.a[i].channel;
10662
10663 return 0;
10664 }
10665
10666 static int ipw_set_geo(struct ieee80211_device *ieee,
10667 const struct ieee80211_geo *geo)
10668 {
10669 memcpy(ieee->geo.name, geo->name, 3);
10670 ieee->geo.name[3] = '\0';
10671 ieee->geo.bg_channels = geo->bg_channels;
10672 ieee->geo.a_channels = geo->a_channels;
10673 memcpy(ieee->geo.bg, geo->bg, geo->bg_channels *
10674 sizeof(struct ieee80211_channel));
10675 memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
10676 sizeof(struct ieee80211_channel));
10677 return 0;
10678 }
10679
10680 static const struct ieee80211_geo *ipw_get_geo(struct ieee80211_device *ieee)
10681 {
10682 return &ieee->geo;
10683 }
10684
10685 #define MAX_HW_RESTARTS 5
10686 static int ipw_up(struct ipw_priv *priv)
10687 {
10688 int rc, i, j;
10689
10690 if (priv->status & STATUS_EXIT_PENDING)
10691 return -EIO;
10692
10693 if (cmdlog && !priv->cmdlog) {
10694 priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
10695 GFP_KERNEL);
10696 if (priv->cmdlog == NULL) {
10697 IPW_ERROR("Error allocating %d command log entries.\n",
10698 cmdlog);
10699 } else {
10700 memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
10701 priv->cmdlog_len = cmdlog;
10702 }
10703 }
10704
10705 for (i = 0; i < MAX_HW_RESTARTS; i++) {
10706 /* Load the microcode, firmware, and eeprom.
10707 * Also start the clocks. */
10708 rc = ipw_load(priv);
10709 if (rc) {
10710 IPW_ERROR("Unable to load firmware: %d\n", rc);
10711 return rc;
10712 }
10713
10714 ipw_init_ordinals(priv);
10715 if (!(priv->config & CFG_CUSTOM_MAC))
10716 eeprom_parse_mac(priv, priv->mac_addr);
10717 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
10718
10719 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
10720 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
10721 ipw_geos[j].name, 3))
10722 break;
10723 }
10724 if (j == ARRAY_SIZE(ipw_geos)) {
10725 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
10726 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
10727 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
10728 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
10729 j = 0;
10730 }
10731 if (ipw_set_geo(priv->ieee, &ipw_geos[j])) {
10732 IPW_WARNING("Could not set geography.");
10733 return 0;
10734 }
10735
10736 IPW_DEBUG_INFO("Geography %03d [%s] detected.\n",
10737 j, priv->ieee->geo.name);
10738
10739 if (priv->status & STATUS_RF_KILL_SW) {
10740 IPW_WARNING("Radio disabled by module parameter.\n");
10741 return 0;
10742 } else if (rf_kill_active(priv)) {
10743 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
10744 "Kill switch must be turned off for "
10745 "wireless networking to work.\n");
10746 queue_delayed_work(priv->workqueue, &priv->rf_kill,
10747 2 * HZ);
10748 return 0;
10749 }
10750
10751 rc = ipw_config(priv);
10752 if (!rc) {
10753 IPW_DEBUG_INFO("Configured device on count %i\n", i);
10754
10755 /* If configure to try and auto-associate, kick
10756 * off a scan. */
10757 queue_work(priv->workqueue, &priv->request_scan);
10758
10759 return 0;
10760 }
10761
10762 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
10763 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
10764 i, MAX_HW_RESTARTS);
10765
10766 /* We had an error bringing up the hardware, so take it
10767 * all the way back down so we can try again */
10768 ipw_down(priv);
10769 }
10770
10771 /* tried to restart and config the device for as long as our
10772 * patience could withstand */
10773 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
10774
10775 return -EIO;
10776 }
10777
10778 static void ipw_bg_up(void *data)
10779 {
10780 struct ipw_priv *priv = data;
10781 down(&priv->sem);
10782 ipw_up(data);
10783 up(&priv->sem);
10784 }
10785
10786 static void ipw_deinit(struct ipw_priv *priv)
10787 {
10788 int i;
10789
10790 if (priv->status & STATUS_SCANNING) {
10791 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
10792 ipw_abort_scan(priv);
10793 }
10794
10795 if (priv->status & STATUS_ASSOCIATED) {
10796 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
10797 ipw_disassociate(priv);
10798 }
10799
10800 ipw_led_shutdown(priv);
10801
10802 /* Wait up to 1s for status to change to not scanning and not
10803 * associated (disassociation can take a while for a ful 802.11
10804 * exchange */
10805 for (i = 1000; i && (priv->status &
10806 (STATUS_DISASSOCIATING |
10807 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
10808 udelay(10);
10809
10810 if (priv->status & (STATUS_DISASSOCIATING |
10811 STATUS_ASSOCIATED | STATUS_SCANNING))
10812 IPW_DEBUG_INFO("Still associated or scanning...\n");
10813 else
10814 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
10815
10816 /* Attempt to disable the card */
10817 ipw_send_card_disable(priv, 0);
10818
10819 priv->status &= ~STATUS_INIT;
10820 }
10821
10822 static void ipw_down(struct ipw_priv *priv)
10823 {
10824 int exit_pending = priv->status & STATUS_EXIT_PENDING;
10825
10826 priv->status |= STATUS_EXIT_PENDING;
10827
10828 if (ipw_is_init(priv))
10829 ipw_deinit(priv);
10830
10831 /* Wipe out the EXIT_PENDING status bit if we are not actually
10832 * exiting the module */
10833 if (!exit_pending)
10834 priv->status &= ~STATUS_EXIT_PENDING;
10835
10836 /* tell the device to stop sending interrupts */
10837 ipw_disable_interrupts(priv);
10838
10839 /* Clear all bits but the RF Kill */
10840 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
10841 netif_carrier_off(priv->net_dev);
10842 netif_stop_queue(priv->net_dev);
10843
10844 ipw_stop_nic(priv);
10845
10846 ipw_led_radio_off(priv);
10847 }
10848
10849 static void ipw_bg_down(void *data)
10850 {
10851 struct ipw_priv *priv = data;
10852 down(&priv->sem);
10853 ipw_down(data);
10854 up(&priv->sem);
10855 }
10856
10857 /* Called by register_netdev() */
10858 static int ipw_net_init(struct net_device *dev)
10859 {
10860 struct ipw_priv *priv = ieee80211_priv(dev);
10861 down(&priv->sem);
10862
10863 if (ipw_up(priv)) {
10864 up(&priv->sem);
10865 return -EIO;
10866 }
10867
10868 up(&priv->sem);
10869 return 0;
10870 }
10871
10872 /* PCI driver stuff */
10873 static struct pci_device_id card_ids[] = {
10874 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
10875 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
10876 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
10877 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
10878 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
10879 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
10880 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
10881 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
10882 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
10883 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
10884 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
10885 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
10886 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
10887 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
10888 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
10889 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
10890 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
10891 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
10892 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10893 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
10894 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10895 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
10896
10897 /* required last entry */
10898 {0,}
10899 };
10900
10901 MODULE_DEVICE_TABLE(pci, card_ids);
10902
10903 static struct attribute *ipw_sysfs_entries[] = {
10904 &dev_attr_rf_kill.attr,
10905 &dev_attr_direct_dword.attr,
10906 &dev_attr_indirect_byte.attr,
10907 &dev_attr_indirect_dword.attr,
10908 &dev_attr_mem_gpio_reg.attr,
10909 &dev_attr_command_event_reg.attr,
10910 &dev_attr_nic_type.attr,
10911 &dev_attr_status.attr,
10912 &dev_attr_cfg.attr,
10913 &dev_attr_error.attr,
10914 &dev_attr_event_log.attr,
10915 &dev_attr_cmd_log.attr,
10916 &dev_attr_eeprom_delay.attr,
10917 &dev_attr_ucode_version.attr,
10918 &dev_attr_rtc.attr,
10919 &dev_attr_scan_age.attr,
10920 &dev_attr_led.attr,
10921 &dev_attr_speed_scan.attr,
10922 &dev_attr_net_stats.attr,
10923 NULL
10924 };
10925
10926 static struct attribute_group ipw_attribute_group = {
10927 .name = NULL, /* put in device directory */
10928 .attrs = ipw_sysfs_entries,
10929 };
10930
10931 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10932 {
10933 int err = 0;
10934 struct net_device *net_dev;
10935 void __iomem *base;
10936 u32 length, val;
10937 struct ipw_priv *priv;
10938 int i;
10939
10940 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
10941 if (net_dev == NULL) {
10942 err = -ENOMEM;
10943 goto out;
10944 }
10945
10946 priv = ieee80211_priv(net_dev);
10947 priv->ieee = netdev_priv(net_dev);
10948
10949 priv->net_dev = net_dev;
10950 priv->pci_dev = pdev;
10951 #ifdef CONFIG_IPW2200_DEBUG
10952 ipw_debug_level = debug;
10953 #endif
10954 spin_lock_init(&priv->lock);
10955 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
10956 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
10957
10958 init_MUTEX(&priv->sem);
10959 if (pci_enable_device(pdev)) {
10960 err = -ENODEV;
10961 goto out_free_ieee80211;
10962 }
10963
10964 pci_set_master(pdev);
10965
10966 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10967 if (!err)
10968 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
10969 if (err) {
10970 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
10971 goto out_pci_disable_device;
10972 }
10973
10974 pci_set_drvdata(pdev, priv);
10975
10976 err = pci_request_regions(pdev, DRV_NAME);
10977 if (err)
10978 goto out_pci_disable_device;
10979
10980 /* We disable the RETRY_TIMEOUT register (0x41) to keep
10981 * PCI Tx retries from interfering with C3 CPU state */
10982 pci_read_config_dword(pdev, 0x40, &val);
10983 if ((val & 0x0000ff00) != 0)
10984 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
10985
10986 length = pci_resource_len(pdev, 0);
10987 priv->hw_len = length;
10988
10989 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
10990 if (!base) {
10991 err = -ENODEV;
10992 goto out_pci_release_regions;
10993 }
10994
10995 priv->hw_base = base;
10996 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
10997 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
10998
10999 err = ipw_setup_deferred_work(priv);
11000 if (err) {
11001 IPW_ERROR("Unable to setup deferred work\n");
11002 goto out_iounmap;
11003 }
11004
11005 ipw_sw_reset(priv, 1);
11006
11007 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
11008 if (err) {
11009 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11010 goto out_destroy_workqueue;
11011 }
11012
11013 SET_MODULE_OWNER(net_dev);
11014 SET_NETDEV_DEV(net_dev, &pdev->dev);
11015
11016 down(&priv->sem);
11017
11018 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11019 priv->ieee->set_security = shim__set_security;
11020 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11021
11022 #ifdef CONFIG_IPW_QOS
11023 priv->ieee->handle_probe_response = ipw_handle_beacon;
11024 priv->ieee->handle_beacon = ipw_handle_probe_response;
11025 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11026 #endif /* CONFIG_IPW_QOS */
11027
11028 priv->ieee->perfect_rssi = -20;
11029 priv->ieee->worst_rssi = -85;
11030
11031 net_dev->open = ipw_net_open;
11032 net_dev->stop = ipw_net_stop;
11033 net_dev->init = ipw_net_init;
11034 net_dev->get_stats = ipw_net_get_stats;
11035 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11036 net_dev->set_mac_address = ipw_net_set_mac_address;
11037 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11038 net_dev->wireless_data = &priv->wireless_data;
11039 net_dev->wireless_handlers = &ipw_wx_handler_def;
11040 net_dev->ethtool_ops = &ipw_ethtool_ops;
11041 net_dev->irq = pdev->irq;
11042 net_dev->base_addr = (unsigned long)priv->hw_base;
11043 net_dev->mem_start = pci_resource_start(pdev, 0);
11044 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11045
11046 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11047 if (err) {
11048 IPW_ERROR("failed to create sysfs device attributes\n");
11049 up(&priv->sem);
11050 goto out_release_irq;
11051 }
11052
11053 up(&priv->sem);
11054 err = register_netdev(net_dev);
11055 if (err) {
11056 IPW_ERROR("failed to register network device\n");
11057 goto out_remove_sysfs;
11058 }
11059 return 0;
11060
11061 out_remove_sysfs:
11062 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11063 out_release_irq:
11064 free_irq(pdev->irq, priv);
11065 out_destroy_workqueue:
11066 destroy_workqueue(priv->workqueue);
11067 priv->workqueue = NULL;
11068 out_iounmap:
11069 iounmap(priv->hw_base);
11070 out_pci_release_regions:
11071 pci_release_regions(pdev);
11072 out_pci_disable_device:
11073 pci_disable_device(pdev);
11074 pci_set_drvdata(pdev, NULL);
11075 out_free_ieee80211:
11076 free_ieee80211(priv->net_dev);
11077 out:
11078 return err;
11079 }
11080
11081 static void ipw_pci_remove(struct pci_dev *pdev)
11082 {
11083 struct ipw_priv *priv = pci_get_drvdata(pdev);
11084 struct list_head *p, *q;
11085 int i;
11086
11087 if (!priv)
11088 return;
11089
11090 down(&priv->sem);
11091
11092 priv->status |= STATUS_EXIT_PENDING;
11093 ipw_down(priv);
11094 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11095
11096 up(&priv->sem);
11097
11098 unregister_netdev(priv->net_dev);
11099
11100 if (priv->rxq) {
11101 ipw_rx_queue_free(priv, priv->rxq);
11102 priv->rxq = NULL;
11103 }
11104 ipw_tx_queue_free(priv);
11105
11106 if (priv->cmdlog) {
11107 kfree(priv->cmdlog);
11108 priv->cmdlog = NULL;
11109 }
11110 /* ipw_down will ensure that there is no more pending work
11111 * in the workqueue's, so we can safely remove them now. */
11112 cancel_delayed_work(&priv->adhoc_check);
11113 cancel_delayed_work(&priv->gather_stats);
11114 cancel_delayed_work(&priv->request_scan);
11115 cancel_delayed_work(&priv->rf_kill);
11116 cancel_delayed_work(&priv->scan_check);
11117 destroy_workqueue(priv->workqueue);
11118 priv->workqueue = NULL;
11119
11120 /* Free MAC hash list for ADHOC */
11121 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11122 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11123 kfree(list_entry(p, struct ipw_ibss_seq, list));
11124 list_del(p);
11125 }
11126 }
11127
11128 if (priv->error) {
11129 ipw_free_error_log(priv->error);
11130 priv->error = NULL;
11131 }
11132
11133 free_irq(pdev->irq, priv);
11134 iounmap(priv->hw_base);
11135 pci_release_regions(pdev);
11136 pci_disable_device(pdev);
11137 pci_set_drvdata(pdev, NULL);
11138 free_ieee80211(priv->net_dev);
11139 free_firmware();
11140 }
11141
11142 #ifdef CONFIG_PM
11143 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11144 {
11145 struct ipw_priv *priv = pci_get_drvdata(pdev);
11146 struct net_device *dev = priv->net_dev;
11147
11148 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11149
11150 /* Take down the device; powers it off, etc. */
11151 ipw_down(priv);
11152
11153 /* Remove the PRESENT state of the device */
11154 netif_device_detach(dev);
11155
11156 pci_save_state(pdev);
11157 pci_disable_device(pdev);
11158 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11159
11160 return 0;
11161 }
11162
11163 static int ipw_pci_resume(struct pci_dev *pdev)
11164 {
11165 struct ipw_priv *priv = pci_get_drvdata(pdev);
11166 struct net_device *dev = priv->net_dev;
11167 u32 val;
11168
11169 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11170
11171 pci_set_power_state(pdev, PCI_D0);
11172 pci_enable_device(pdev);
11173 pci_restore_state(pdev);
11174
11175 /*
11176 * Suspend/Resume resets the PCI configuration space, so we have to
11177 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11178 * from interfering with C3 CPU state. pci_restore_state won't help
11179 * here since it only restores the first 64 bytes pci config header.
11180 */
11181 pci_read_config_dword(pdev, 0x40, &val);
11182 if ((val & 0x0000ff00) != 0)
11183 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11184
11185 /* Set the device back into the PRESENT state; this will also wake
11186 * the queue of needed */
11187 netif_device_attach(dev);
11188
11189 /* Bring the device back up */
11190 queue_work(priv->workqueue, &priv->up);
11191
11192 return 0;
11193 }
11194 #endif
11195
11196 /* driver initialization stuff */
11197 static struct pci_driver ipw_driver = {
11198 .name = DRV_NAME,
11199 .id_table = card_ids,
11200 .probe = ipw_pci_probe,
11201 .remove = __devexit_p(ipw_pci_remove),
11202 #ifdef CONFIG_PM
11203 .suspend = ipw_pci_suspend,
11204 .resume = ipw_pci_resume,
11205 #endif
11206 };
11207
11208 static int __init ipw_init(void)
11209 {
11210 int ret;
11211
11212 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11213 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11214
11215 ret = pci_module_init(&ipw_driver);
11216 if (ret) {
11217 IPW_ERROR("Unable to initialize PCI module\n");
11218 return ret;
11219 }
11220
11221 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11222 if (ret) {
11223 IPW_ERROR("Unable to create driver sysfs file\n");
11224 pci_unregister_driver(&ipw_driver);
11225 return ret;
11226 }
11227
11228 return ret;
11229 }
11230
11231 static void __exit ipw_exit(void)
11232 {
11233 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11234 pci_unregister_driver(&ipw_driver);
11235 }
11236
11237 module_param(disable, int, 0444);
11238 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11239
11240 module_param(associate, int, 0444);
11241 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11242
11243 module_param(auto_create, int, 0444);
11244 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11245
11246 module_param(led, int, 0444);
11247 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11248
11249 module_param(debug, int, 0444);
11250 MODULE_PARM_DESC(debug, "debug output mask");
11251
11252 module_param(channel, int, 0444);
11253 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11254
11255 #ifdef CONFIG_IPW_QOS
11256 module_param(qos_enable, int, 0444);
11257 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11258
11259 module_param(qos_burst_enable, int, 0444);
11260 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11261
11262 module_param(qos_no_ack_mask, int, 0444);
11263 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11264
11265 module_param(burst_duration_CCK, int, 0444);
11266 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11267
11268 module_param(burst_duration_OFDM, int, 0444);
11269 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11270 #endif /* CONFIG_IPW_QOS */
11271
11272 #ifdef CONFIG_IPW2200_MONITOR
11273 module_param(mode, int, 0444);
11274 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11275 #else
11276 module_param(mode, int, 0444);
11277 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11278 #endif
11279
11280 module_param(hwcrypto, int, 0444);
11281 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default on)");
11282
11283 module_param(cmdlog, int, 0444);
11284 MODULE_PARM_DESC(cmdlog,
11285 "allocate a ring buffer for logging firmware commands");
11286
11287 module_exit(ipw_exit);
11288 module_init(ipw_init);
This page took 0.382128 seconds and 5 git commands to generate.