iwlwifi: document 4965 rate scaling
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl4965-base.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
b481de9c
ZY
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/version.h>
33#include <linux/init.h>
34#include <linux/pci.h>
35#include <linux/dma-mapping.h>
36#include <linux/delay.h>
37#include <linux/skbuff.h>
38#include <linux/netdevice.h>
39#include <linux/wireless.h>
40#include <linux/firmware.h>
b481de9c
ZY
41#include <linux/etherdevice.h>
42#include <linux/if_arp.h>
43
44#include <net/ieee80211_radiotap.h>
45#include <net/mac80211.h>
46
47#include <asm/div64.h>
48
b481de9c
ZY
49#include "iwl-4965.h"
50#include "iwl-helpers.h"
51
c8b0e6e1 52#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 53u32 iwl4965_debug_level;
b481de9c
ZY
54#endif
55
bb8c093b
CH
56static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
57 struct iwl4965_tx_queue *txq);
416e1438 58
b481de9c
ZY
59/******************************************************************************
60 *
61 * module boiler plate
62 *
63 ******************************************************************************/
64
65/* module parameters */
6440adb5
CB
66static int iwl4965_param_disable_hw_scan; /* def: 0 = use 4965's h/w scan */
67static int iwl4965_param_debug; /* def: 0 = minimal debug log messages */
9fbab516
BC
68static int iwl4965_param_disable; /* def: enable radio */
69static int iwl4965_param_antenna; /* def: 0 = both antennas (use diversity) */
70int iwl4965_param_hwcrypto; /* def: using software encryption */
6440adb5
CB
71static int iwl4965_param_qos_enable = 1; /* def: 1 = use quality of service */
72int iwl4965_param_queues_num = IWL_MAX_NUM_QUEUES; /* def: 16 Tx queues */
b481de9c
ZY
73
74/*
75 * module name, copyright, version, etc.
76 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
77 */
78
79#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
80
c8b0e6e1 81#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
82#define VD "d"
83#else
84#define VD
85#endif
86
c8b0e6e1 87#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
88#define VS "s"
89#else
90#define VS
91#endif
92
80f3e024 93#define IWLWIFI_VERSION "1.1.19k" VD VS
b481de9c
ZY
94#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation"
95#define DRV_VERSION IWLWIFI_VERSION
96
97/* Change firmware file name, using "-" and incrementing number,
98 * *only* when uCode interface or architecture changes so that it
99 * is not compatible with earlier drivers.
100 * This number will also appear in << 8 position of 1st dword of uCode file */
101#define IWL4965_UCODE_API "-1"
102
103MODULE_DESCRIPTION(DRV_DESCRIPTION);
104MODULE_VERSION(DRV_VERSION);
105MODULE_AUTHOR(DRV_COPYRIGHT);
106MODULE_LICENSE("GPL");
107
108__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
109{
110 u16 fc = le16_to_cpu(hdr->frame_control);
111 int hdr_len = ieee80211_get_hdrlen(fc);
112
113 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
114 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
115 return NULL;
116}
117
bb8c093b
CH
118static const struct ieee80211_hw_mode *iwl4965_get_hw_mode(
119 struct iwl4965_priv *priv, int mode)
b481de9c
ZY
120{
121 int i;
122
123 for (i = 0; i < 3; i++)
124 if (priv->modes[i].mode == mode)
125 return &priv->modes[i];
126
127 return NULL;
128}
129
bb8c093b 130static int iwl4965_is_empty_essid(const char *essid, int essid_len)
b481de9c
ZY
131{
132 /* Single white space is for Linksys APs */
133 if (essid_len == 1 && essid[0] == ' ')
134 return 1;
135
136 /* Otherwise, if the entire essid is 0, we assume it is hidden */
137 while (essid_len) {
138 essid_len--;
139 if (essid[essid_len] != '\0')
140 return 0;
141 }
142
143 return 1;
144}
145
bb8c093b 146static const char *iwl4965_escape_essid(const char *essid, u8 essid_len)
b481de9c
ZY
147{
148 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
149 const char *s = essid;
150 char *d = escaped;
151
bb8c093b 152 if (iwl4965_is_empty_essid(essid, essid_len)) {
b481de9c
ZY
153 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
154 return escaped;
155 }
156
157 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
158 while (essid_len--) {
159 if (*s == '\0') {
160 *d++ = '\\';
161 *d++ = '0';
162 s++;
163 } else
164 *d++ = *s++;
165 }
166 *d = '\0';
167 return escaped;
168}
169
bb8c093b 170static void iwl4965_print_hex_dump(int level, void *p, u32 len)
b481de9c 171{
c8b0e6e1 172#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 173 if (!(iwl4965_debug_level & level))
b481de9c
ZY
174 return;
175
176 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
177 p, len, 1);
178#endif
179}
180
181/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
182 * DMA services
183 *
184 * Theory of operation
185 *
6440adb5
CB
186 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
187 * of buffer descriptors, each of which points to one or more data buffers for
188 * the device to read from or fill. Driver and device exchange status of each
189 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
190 * entries in each circular buffer, to protect against confusing empty and full
191 * queue states.
192 *
193 * The device reads or writes the data in the queues via the device's several
194 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
b481de9c
ZY
195 *
196 * For Tx queue, there are low mark and high mark limits. If, after queuing
197 * the packet for Tx, free space become < low mark, Tx queue stopped. When
198 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
199 * Tx queue resumed.
200 *
6440adb5
CB
201 * The 4965 operates with up to 17 queues: One receive queue, one transmit
202 * queue (#4) for sending commands to the device firmware, and 15 other
203 * Tx queues that may be mapped to prioritized Tx DMA/FIFO channels.
e3851447
BC
204 *
205 * See more detailed info in iwl-4965-hw.h.
b481de9c
ZY
206 ***************************************************/
207
bb8c093b 208static int iwl4965_queue_space(const struct iwl4965_queue *q)
b481de9c 209{
fc4b6853 210 int s = q->read_ptr - q->write_ptr;
b481de9c 211
fc4b6853 212 if (q->read_ptr > q->write_ptr)
b481de9c
ZY
213 s -= q->n_bd;
214
215 if (s <= 0)
216 s += q->n_window;
217 /* keep some reserve to not confuse empty and full situations */
218 s -= 2;
219 if (s < 0)
220 s = 0;
221 return s;
222}
223
6440adb5
CB
224/**
225 * iwl4965_queue_inc_wrap - increment queue index, wrap back to beginning
226 * @index -- current index
227 * @n_bd -- total number of entries in queue (must be power of 2)
228 */
bb8c093b 229static inline int iwl4965_queue_inc_wrap(int index, int n_bd)
b481de9c
ZY
230{
231 return ++index & (n_bd - 1);
232}
233
6440adb5
CB
234/**
235 * iwl4965_queue_dec_wrap - decrement queue index, wrap back to end
236 * @index -- current index
237 * @n_bd -- total number of entries in queue (must be power of 2)
238 */
bb8c093b 239static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
b481de9c
ZY
240{
241 return --index & (n_bd - 1);
242}
243
bb8c093b 244static inline int x2_queue_used(const struct iwl4965_queue *q, int i)
b481de9c 245{
fc4b6853
TW
246 return q->write_ptr > q->read_ptr ?
247 (i >= q->read_ptr && i < q->write_ptr) :
248 !(i < q->read_ptr && i >= q->write_ptr);
b481de9c
ZY
249}
250
bb8c093b 251static inline u8 get_cmd_index(struct iwl4965_queue *q, u32 index, int is_huge)
b481de9c 252{
6440adb5 253 /* This is for scan command, the big buffer at end of command array */
b481de9c 254 if (is_huge)
6440adb5 255 return q->n_window; /* must be power of 2 */
b481de9c 256
6440adb5 257 /* Otherwise, use normal size buffers */
b481de9c
ZY
258 return index & (q->n_window - 1);
259}
260
6440adb5
CB
261/**
262 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes
263 */
bb8c093b 264static int iwl4965_queue_init(struct iwl4965_priv *priv, struct iwl4965_queue *q,
b481de9c
ZY
265 int count, int slots_num, u32 id)
266{
267 q->n_bd = count;
268 q->n_window = slots_num;
269 q->id = id;
270
bb8c093b
CH
271 /* count must be power-of-two size, otherwise iwl4965_queue_inc_wrap
272 * and iwl4965_queue_dec_wrap are broken. */
b481de9c
ZY
273 BUG_ON(!is_power_of_2(count));
274
275 /* slots_num must be power-of-two size, otherwise
276 * get_cmd_index is broken. */
277 BUG_ON(!is_power_of_2(slots_num));
278
279 q->low_mark = q->n_window / 4;
280 if (q->low_mark < 4)
281 q->low_mark = 4;
282
283 q->high_mark = q->n_window / 8;
284 if (q->high_mark < 2)
285 q->high_mark = 2;
286
fc4b6853 287 q->write_ptr = q->read_ptr = 0;
b481de9c
ZY
288
289 return 0;
290}
291
6440adb5
CB
292/**
293 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
294 */
bb8c093b
CH
295static int iwl4965_tx_queue_alloc(struct iwl4965_priv *priv,
296 struct iwl4965_tx_queue *txq, u32 id)
b481de9c
ZY
297{
298 struct pci_dev *dev = priv->pci_dev;
299
6440adb5
CB
300 /* Driver private data, only for Tx (not command) queues,
301 * not shared with device. */
b481de9c
ZY
302 if (id != IWL_CMD_QUEUE_NUM) {
303 txq->txb = kmalloc(sizeof(txq->txb[0]) *
304 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
305 if (!txq->txb) {
01ebd063 306 IWL_ERROR("kmalloc for auxiliary BD "
b481de9c
ZY
307 "structures failed\n");
308 goto error;
309 }
310 } else
311 txq->txb = NULL;
312
6440adb5
CB
313 /* Circular buffer of transmit frame descriptors (TFDs),
314 * shared with device */
b481de9c
ZY
315 txq->bd = pci_alloc_consistent(dev,
316 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
317 &txq->q.dma_addr);
318
319 if (!txq->bd) {
320 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
321 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
322 goto error;
323 }
324 txq->q.id = id;
325
326 return 0;
327
328 error:
329 if (txq->txb) {
330 kfree(txq->txb);
331 txq->txb = NULL;
332 }
333
334 return -ENOMEM;
335}
336
8b6eaea8
CB
337/**
338 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue
339 */
bb8c093b
CH
340int iwl4965_tx_queue_init(struct iwl4965_priv *priv,
341 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id)
b481de9c
ZY
342{
343 struct pci_dev *dev = priv->pci_dev;
344 int len;
345 int rc = 0;
346
8b6eaea8
CB
347 /*
348 * Alloc buffer array for commands (Tx or other types of commands).
349 * For the command queue (#4), allocate command space + one big
350 * command for scan, since scan command is very huge; the system will
351 * not have two scans at the same time, so only one is needed.
6440adb5 352 * For data Tx queues (all other queues), no super-size command
8b6eaea8
CB
353 * space is needed.
354 */
bb8c093b 355 len = sizeof(struct iwl4965_cmd) * slots_num;
b481de9c
ZY
356 if (txq_id == IWL_CMD_QUEUE_NUM)
357 len += IWL_MAX_SCAN_SIZE;
358 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
359 if (!txq->cmd)
360 return -ENOMEM;
361
8b6eaea8 362 /* Alloc driver data array and TFD circular buffer */
bb8c093b 363 rc = iwl4965_tx_queue_alloc(priv, txq, txq_id);
b481de9c
ZY
364 if (rc) {
365 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
366
367 return -ENOMEM;
368 }
369 txq->need_update = 0;
370
371 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
bb8c093b 372 * iwl4965_queue_inc_wrap and iwl4965_queue_dec_wrap are broken. */
b481de9c 373 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
8b6eaea8
CB
374
375 /* Initialize queue's high/low-water marks, and head/tail indexes */
bb8c093b 376 iwl4965_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
b481de9c 377
8b6eaea8 378 /* Tell device where to find queue */
bb8c093b 379 iwl4965_hw_tx_queue_init(priv, txq);
b481de9c
ZY
380
381 return 0;
382}
383
384/**
bb8c093b 385 * iwl4965_tx_queue_free - Deallocate DMA queue.
b481de9c
ZY
386 * @txq: Transmit queue to deallocate.
387 *
388 * Empty queue by removing and destroying all BD's.
6440adb5
CB
389 * Free all buffers.
390 * 0-fill, but do not free "txq" descriptor structure.
b481de9c 391 */
bb8c093b 392void iwl4965_tx_queue_free(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq)
b481de9c 393{
bb8c093b 394 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
395 struct pci_dev *dev = priv->pci_dev;
396 int len;
397
398 if (q->n_bd == 0)
399 return;
400
401 /* first, empty all BD's */
fc4b6853 402 for (; q->write_ptr != q->read_ptr;
bb8c093b
CH
403 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd))
404 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c 405
bb8c093b 406 len = sizeof(struct iwl4965_cmd) * q->n_window;
b481de9c
ZY
407 if (q->id == IWL_CMD_QUEUE_NUM)
408 len += IWL_MAX_SCAN_SIZE;
409
6440adb5 410 /* De-alloc array of command/tx buffers */
b481de9c
ZY
411 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
412
6440adb5 413 /* De-alloc circular buffer of TFDs */
b481de9c 414 if (txq->q.n_bd)
bb8c093b 415 pci_free_consistent(dev, sizeof(struct iwl4965_tfd_frame) *
b481de9c
ZY
416 txq->q.n_bd, txq->bd, txq->q.dma_addr);
417
6440adb5 418 /* De-alloc array of per-TFD driver data */
b481de9c
ZY
419 if (txq->txb) {
420 kfree(txq->txb);
421 txq->txb = NULL;
422 }
423
6440adb5 424 /* 0-fill queue descriptor structure */
b481de9c
ZY
425 memset(txq, 0, sizeof(*txq));
426}
427
bb8c093b 428const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
b481de9c
ZY
429
430/*************** STATION TABLE MANAGEMENT ****
9fbab516 431 * mac80211 should be examined to determine if sta_info is duplicating
b481de9c
ZY
432 * the functionality provided here
433 */
434
435/**************************************************************/
436
01ebd063 437#if 0 /* temporary disable till we add real remove station */
6440adb5
CB
438/**
439 * iwl4965_remove_station - Remove driver's knowledge of station.
440 *
441 * NOTE: This does not remove station from device's station table.
442 */
bb8c093b 443static u8 iwl4965_remove_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
b481de9c
ZY
444{
445 int index = IWL_INVALID_STATION;
446 int i;
447 unsigned long flags;
448
449 spin_lock_irqsave(&priv->sta_lock, flags);
450
451 if (is_ap)
452 index = IWL_AP_ID;
453 else if (is_broadcast_ether_addr(addr))
454 index = priv->hw_setting.bcast_sta_id;
455 else
456 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
457 if (priv->stations[i].used &&
458 !compare_ether_addr(priv->stations[i].sta.sta.addr,
459 addr)) {
460 index = i;
461 break;
462 }
463
464 if (unlikely(index == IWL_INVALID_STATION))
465 goto out;
466
467 if (priv->stations[index].used) {
468 priv->stations[index].used = 0;
469 priv->num_stations--;
470 }
471
472 BUG_ON(priv->num_stations < 0);
473
474out:
475 spin_unlock_irqrestore(&priv->sta_lock, flags);
476 return 0;
477}
556f8db7 478#endif
b481de9c 479
6440adb5
CB
480/**
481 * iwl4965_clear_stations_table - Clear the driver's station table
482 *
483 * NOTE: This does not clear or otherwise alter the device's station table.
484 */
bb8c093b 485static void iwl4965_clear_stations_table(struct iwl4965_priv *priv)
b481de9c
ZY
486{
487 unsigned long flags;
488
489 spin_lock_irqsave(&priv->sta_lock, flags);
490
491 priv->num_stations = 0;
492 memset(priv->stations, 0, sizeof(priv->stations));
493
494 spin_unlock_irqrestore(&priv->sta_lock, flags);
495}
496
6440adb5
CB
497/**
498 * iwl4965_add_station_flags - Add station to tables in driver and device
499 */
bb8c093b 500u8 iwl4965_add_station_flags(struct iwl4965_priv *priv, const u8 *addr, int is_ap, u8 flags)
b481de9c
ZY
501{
502 int i;
503 int index = IWL_INVALID_STATION;
bb8c093b 504 struct iwl4965_station_entry *station;
b481de9c 505 unsigned long flags_spin;
0795af57 506 DECLARE_MAC_BUF(mac);
b481de9c
ZY
507
508 spin_lock_irqsave(&priv->sta_lock, flags_spin);
509 if (is_ap)
510 index = IWL_AP_ID;
511 else if (is_broadcast_ether_addr(addr))
512 index = priv->hw_setting.bcast_sta_id;
513 else
514 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
515 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
516 addr)) {
517 index = i;
518 break;
519 }
520
521 if (!priv->stations[i].used &&
522 index == IWL_INVALID_STATION)
523 index = i;
524 }
525
526
9fbab516
BC
527 /* These two conditions have the same outcome, but keep them separate
528 since they have different meanings */
b481de9c
ZY
529 if (unlikely(index == IWL_INVALID_STATION)) {
530 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
531 return index;
532 }
533
534 if (priv->stations[index].used &&
535 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
536 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
537 return index;
538 }
539
540
0795af57 541 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
b481de9c
ZY
542 station = &priv->stations[index];
543 station->used = 1;
544 priv->num_stations++;
545
6440adb5 546 /* Set up the REPLY_ADD_STA command to send to device */
bb8c093b 547 memset(&station->sta, 0, sizeof(struct iwl4965_addsta_cmd));
b481de9c
ZY
548 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
549 station->sta.mode = 0;
550 station->sta.sta.sta_id = index;
551 station->sta.station_flags = 0;
552
c8b0e6e1 553#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
554 /* BCAST station and IBSS stations do not work in HT mode */
555 if (index != priv->hw_setting.bcast_sta_id &&
556 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
557 iwl4965_set_ht_add_station(priv, index);
c8b0e6e1 558#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
559
560 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
6440adb5
CB
561
562 /* Add station to device's station table */
bb8c093b 563 iwl4965_send_add_station(priv, &station->sta, flags);
b481de9c
ZY
564 return index;
565
566}
567
568/*************** DRIVER STATUS FUNCTIONS *****/
569
bb8c093b 570static inline int iwl4965_is_ready(struct iwl4965_priv *priv)
b481de9c
ZY
571{
572 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
573 * set but EXIT_PENDING is not */
574 return test_bit(STATUS_READY, &priv->status) &&
575 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
576 !test_bit(STATUS_EXIT_PENDING, &priv->status);
577}
578
bb8c093b 579static inline int iwl4965_is_alive(struct iwl4965_priv *priv)
b481de9c
ZY
580{
581 return test_bit(STATUS_ALIVE, &priv->status);
582}
583
bb8c093b 584static inline int iwl4965_is_init(struct iwl4965_priv *priv)
b481de9c
ZY
585{
586 return test_bit(STATUS_INIT, &priv->status);
587}
588
bb8c093b 589static inline int iwl4965_is_rfkill(struct iwl4965_priv *priv)
b481de9c
ZY
590{
591 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
592 test_bit(STATUS_RF_KILL_SW, &priv->status);
593}
594
bb8c093b 595static inline int iwl4965_is_ready_rf(struct iwl4965_priv *priv)
b481de9c
ZY
596{
597
bb8c093b 598 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
599 return 0;
600
bb8c093b 601 return iwl4965_is_ready(priv);
b481de9c
ZY
602}
603
604/*************** HOST COMMAND QUEUE FUNCTIONS *****/
605
606#define IWL_CMD(x) case x : return #x
607
608static const char *get_cmd_string(u8 cmd)
609{
610 switch (cmd) {
611 IWL_CMD(REPLY_ALIVE);
612 IWL_CMD(REPLY_ERROR);
613 IWL_CMD(REPLY_RXON);
614 IWL_CMD(REPLY_RXON_ASSOC);
615 IWL_CMD(REPLY_QOS_PARAM);
616 IWL_CMD(REPLY_RXON_TIMING);
617 IWL_CMD(REPLY_ADD_STA);
618 IWL_CMD(REPLY_REMOVE_STA);
619 IWL_CMD(REPLY_REMOVE_ALL_STA);
620 IWL_CMD(REPLY_TX);
621 IWL_CMD(REPLY_RATE_SCALE);
622 IWL_CMD(REPLY_LEDS_CMD);
623 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
624 IWL_CMD(RADAR_NOTIFICATION);
625 IWL_CMD(REPLY_QUIET_CMD);
626 IWL_CMD(REPLY_CHANNEL_SWITCH);
627 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
628 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
629 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
630 IWL_CMD(POWER_TABLE_CMD);
631 IWL_CMD(PM_SLEEP_NOTIFICATION);
632 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
633 IWL_CMD(REPLY_SCAN_CMD);
634 IWL_CMD(REPLY_SCAN_ABORT_CMD);
635 IWL_CMD(SCAN_START_NOTIFICATION);
636 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
637 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
638 IWL_CMD(BEACON_NOTIFICATION);
639 IWL_CMD(REPLY_TX_BEACON);
640 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
641 IWL_CMD(QUIET_NOTIFICATION);
642 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
643 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
644 IWL_CMD(REPLY_BT_CONFIG);
645 IWL_CMD(REPLY_STATISTICS_CMD);
646 IWL_CMD(STATISTICS_NOTIFICATION);
647 IWL_CMD(REPLY_CARD_STATE_CMD);
648 IWL_CMD(CARD_STATE_NOTIFICATION);
649 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
650 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
651 IWL_CMD(SENSITIVITY_CMD);
652 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
653 IWL_CMD(REPLY_RX_PHY_CMD);
654 IWL_CMD(REPLY_RX_MPDU_CMD);
655 IWL_CMD(REPLY_4965_RX);
656 IWL_CMD(REPLY_COMPRESSED_BA);
657 default:
658 return "UNKNOWN";
659
660 }
661}
662
663#define HOST_COMPLETE_TIMEOUT (HZ / 2)
664
665/**
bb8c093b 666 * iwl4965_enqueue_hcmd - enqueue a uCode command
b481de9c
ZY
667 * @priv: device private data point
668 * @cmd: a point to the ucode command structure
669 *
670 * The function returns < 0 values to indicate the operation is
671 * failed. On success, it turns the index (> 0) of command in the
672 * command queue.
673 */
bb8c093b 674static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 675{
bb8c093b
CH
676 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
677 struct iwl4965_queue *q = &txq->q;
678 struct iwl4965_tfd_frame *tfd;
b481de9c 679 u32 *control_flags;
bb8c093b 680 struct iwl4965_cmd *out_cmd;
b481de9c
ZY
681 u32 idx;
682 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
683 dma_addr_t phys_addr;
684 int ret;
685 unsigned long flags;
686
687 /* If any of the command structures end up being larger than
688 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
689 * we will need to increase the size of the TFD entries */
690 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
691 !(cmd->meta.flags & CMD_SIZE_HUGE));
692
bb8c093b 693 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
b481de9c
ZY
694 IWL_ERROR("No space for Tx\n");
695 return -ENOSPC;
696 }
697
698 spin_lock_irqsave(&priv->hcmd_lock, flags);
699
fc4b6853 700 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
701 memset(tfd, 0, sizeof(*tfd));
702
703 control_flags = (u32 *) tfd;
704
fc4b6853 705 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
b481de9c
ZY
706 out_cmd = &txq->cmd[idx];
707
708 out_cmd->hdr.cmd = cmd->id;
709 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
710 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
711
712 /* At this point, the out_cmd now has all of the incoming cmd
713 * information */
714
715 out_cmd->hdr.flags = 0;
716 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
fc4b6853 717 INDEX_TO_SEQ(q->write_ptr));
b481de9c
ZY
718 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
719 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
720
721 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
bb8c093b
CH
722 offsetof(struct iwl4965_cmd, hdr);
723 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
b481de9c
ZY
724
725 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
726 "%d bytes at %d[%d]:%d\n",
727 get_cmd_string(out_cmd->hdr.cmd),
728 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
fc4b6853 729 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
b481de9c
ZY
730
731 txq->need_update = 1;
6440adb5
CB
732
733 /* Set up entry in queue's byte count circular buffer */
b481de9c 734 ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0);
6440adb5
CB
735
736 /* Increment and update queue's write index */
bb8c093b
CH
737 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
738 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
739
740 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
741 return ret ? ret : idx;
742}
743
bb8c093b 744static int iwl4965_send_cmd_async(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
745{
746 int ret;
747
748 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
749
750 /* An asynchronous command can not expect an SKB to be set. */
751 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
752
753 /* An asynchronous command MUST have a callback. */
754 BUG_ON(!cmd->meta.u.callback);
755
756 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
757 return -EBUSY;
758
bb8c093b 759 ret = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c 760 if (ret < 0) {
bb8c093b 761 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
762 get_cmd_string(cmd->id), ret);
763 return ret;
764 }
765 return 0;
766}
767
bb8c093b 768static int iwl4965_send_cmd_sync(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c
ZY
769{
770 int cmd_idx;
771 int ret;
772 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
773
774 BUG_ON(cmd->meta.flags & CMD_ASYNC);
775
776 /* A synchronous command can not have a callback set. */
777 BUG_ON(cmd->meta.u.callback != NULL);
778
779 if (atomic_xchg(&entry, 1)) {
780 IWL_ERROR("Error sending %s: Already sending a host command\n",
781 get_cmd_string(cmd->id));
782 return -EBUSY;
783 }
784
785 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
786
787 if (cmd->meta.flags & CMD_WANT_SKB)
788 cmd->meta.source = &cmd->meta;
789
bb8c093b 790 cmd_idx = iwl4965_enqueue_hcmd(priv, cmd);
b481de9c
ZY
791 if (cmd_idx < 0) {
792 ret = cmd_idx;
bb8c093b 793 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
b481de9c
ZY
794 get_cmd_string(cmd->id), ret);
795 goto out;
796 }
797
798 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
799 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
800 HOST_COMPLETE_TIMEOUT);
801 if (!ret) {
802 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
803 IWL_ERROR("Error sending %s: time out after %dms.\n",
804 get_cmd_string(cmd->id),
805 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
806
807 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
808 ret = -ETIMEDOUT;
809 goto cancel;
810 }
811 }
812
813 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
814 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
815 get_cmd_string(cmd->id));
816 ret = -ECANCELED;
817 goto fail;
818 }
819 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
820 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
821 get_cmd_string(cmd->id));
822 ret = -EIO;
823 goto fail;
824 }
825 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
826 IWL_ERROR("Error: Response NULL in '%s'\n",
827 get_cmd_string(cmd->id));
828 ret = -EIO;
829 goto out;
830 }
831
832 ret = 0;
833 goto out;
834
835cancel:
836 if (cmd->meta.flags & CMD_WANT_SKB) {
bb8c093b 837 struct iwl4965_cmd *qcmd;
b481de9c
ZY
838
839 /* Cancel the CMD_WANT_SKB flag for the cmd in the
840 * TX cmd queue. Otherwise in case the cmd comes
841 * in later, it will possibly set an invalid
842 * address (cmd->meta.source). */
843 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
844 qcmd->meta.flags &= ~CMD_WANT_SKB;
845 }
846fail:
847 if (cmd->meta.u.skb) {
848 dev_kfree_skb_any(cmd->meta.u.skb);
849 cmd->meta.u.skb = NULL;
850 }
851out:
852 atomic_set(&entry, 0);
853 return ret;
854}
855
bb8c093b 856int iwl4965_send_cmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
b481de9c 857{
b481de9c 858 if (cmd->meta.flags & CMD_ASYNC)
bb8c093b 859 return iwl4965_send_cmd_async(priv, cmd);
b481de9c 860
bb8c093b 861 return iwl4965_send_cmd_sync(priv, cmd);
b481de9c
ZY
862}
863
bb8c093b 864int iwl4965_send_cmd_pdu(struct iwl4965_priv *priv, u8 id, u16 len, const void *data)
b481de9c 865{
bb8c093b 866 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
867 .id = id,
868 .len = len,
869 .data = data,
870 };
871
bb8c093b 872 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
873}
874
bb8c093b 875static int __must_check iwl4965_send_cmd_u32(struct iwl4965_priv *priv, u8 id, u32 val)
b481de9c 876{
bb8c093b 877 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
878 .id = id,
879 .len = sizeof(val),
880 .data = &val,
881 };
882
bb8c093b 883 return iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
884}
885
bb8c093b 886int iwl4965_send_statistics_request(struct iwl4965_priv *priv)
b481de9c 887{
bb8c093b 888 return iwl4965_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
b481de9c
ZY
889}
890
891/**
bb8c093b 892 * iwl4965_rxon_add_station - add station into station table.
b481de9c
ZY
893 *
894 * there is only one AP station with id= IWL_AP_ID
9fbab516
BC
895 * NOTE: mutex must be held before calling this fnction
896 */
bb8c093b 897static int iwl4965_rxon_add_station(struct iwl4965_priv *priv,
b481de9c
ZY
898 const u8 *addr, int is_ap)
899{
556f8db7 900 u8 sta_id;
b481de9c 901
6440adb5 902 /* Add station to device's station table */
bb8c093b 903 sta_id = iwl4965_add_station_flags(priv, addr, is_ap, 0);
6440adb5
CB
904
905 /* Set up default rate scaling table in device's station table */
b481de9c
ZY
906 iwl4965_add_station(priv, addr, is_ap);
907
556f8db7 908 return sta_id;
b481de9c
ZY
909}
910
911/**
bb8c093b 912 * iwl4965_set_rxon_channel - Set the phymode and channel values in staging RXON
b481de9c
ZY
913 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
914 * @channel: Any channel valid for the requested phymode
915
916 * In addition to setting the staging RXON, priv->phymode is also set.
917 *
918 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
919 * in the staging RXON flag structure based on the phymode
920 */
9fbab516
BC
921static int iwl4965_set_rxon_channel(struct iwl4965_priv *priv, u8 phymode,
922 u16 channel)
b481de9c 923{
bb8c093b 924 if (!iwl4965_get_channel_info(priv, phymode, channel)) {
b481de9c
ZY
925 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
926 channel, phymode);
927 return -EINVAL;
928 }
929
930 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
931 (priv->phymode == phymode))
932 return 0;
933
934 priv->staging_rxon.channel = cpu_to_le16(channel);
935 if (phymode == MODE_IEEE80211A)
936 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
937 else
938 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
939
940 priv->phymode = phymode;
941
942 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
943
944 return 0;
945}
946
947/**
bb8c093b 948 * iwl4965_check_rxon_cmd - validate RXON structure is valid
b481de9c
ZY
949 *
950 * NOTE: This is really only useful during development and can eventually
951 * be #ifdef'd out once the driver is stable and folks aren't actively
952 * making changes
953 */
bb8c093b 954static int iwl4965_check_rxon_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c
ZY
955{
956 int error = 0;
957 int counter = 1;
958
959 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
960 error |= le32_to_cpu(rxon->flags &
961 (RXON_FLG_TGJ_NARROW_BAND_MSK |
962 RXON_FLG_RADAR_DETECT_MSK));
963 if (error)
964 IWL_WARNING("check 24G fields %d | %d\n",
965 counter++, error);
966 } else {
967 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
968 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
969 if (error)
970 IWL_WARNING("check 52 fields %d | %d\n",
971 counter++, error);
972 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
973 if (error)
974 IWL_WARNING("check 52 CCK %d | %d\n",
975 counter++, error);
976 }
977 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
978 if (error)
979 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
980
981 /* make sure basic rates 6Mbps and 1Mbps are supported */
982 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
983 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
984 if (error)
985 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
986
987 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
988 if (error)
989 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
990
991 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
992 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
993 if (error)
994 IWL_WARNING("check CCK and short slot %d | %d\n",
995 counter++, error);
996
997 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
998 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
999 if (error)
1000 IWL_WARNING("check CCK & auto detect %d | %d\n",
1001 counter++, error);
1002
1003 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
1004 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
1005 if (error)
1006 IWL_WARNING("check TGG and auto detect %d | %d\n",
1007 counter++, error);
1008
1009 if (error)
1010 IWL_WARNING("Tuning to channel %d\n",
1011 le16_to_cpu(rxon->channel));
1012
1013 if (error) {
bb8c093b 1014 IWL_ERROR("Not a valid iwl4965_rxon_assoc_cmd field values\n");
b481de9c
ZY
1015 return -1;
1016 }
1017 return 0;
1018}
1019
1020/**
9fbab516 1021 * iwl4965_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
01ebd063 1022 * @priv: staging_rxon is compared to active_rxon
b481de9c 1023 *
9fbab516
BC
1024 * If the RXON structure is changing enough to require a new tune,
1025 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
1026 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
b481de9c 1027 */
bb8c093b 1028static int iwl4965_full_rxon_required(struct iwl4965_priv *priv)
b481de9c
ZY
1029{
1030
1031 /* These items are only settable from the full RXON command */
1032 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
1033 compare_ether_addr(priv->staging_rxon.bssid_addr,
1034 priv->active_rxon.bssid_addr) ||
1035 compare_ether_addr(priv->staging_rxon.node_addr,
1036 priv->active_rxon.node_addr) ||
1037 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
1038 priv->active_rxon.wlap_bssid_addr) ||
1039 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
1040 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
1041 (priv->staging_rxon.air_propagation !=
1042 priv->active_rxon.air_propagation) ||
1043 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
1044 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
1045 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
1046 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
1047 (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
1048 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
1049 return 1;
1050
1051 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
1052 * be updated with the RXON_ASSOC command -- however only some
1053 * flag transitions are allowed using RXON_ASSOC */
1054
1055 /* Check if we are not switching bands */
1056 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
1057 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
1058 return 1;
1059
1060 /* Check if we are switching association toggle */
1061 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
1062 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1063 return 1;
1064
1065 return 0;
1066}
1067
bb8c093b 1068static int iwl4965_send_rxon_assoc(struct iwl4965_priv *priv)
b481de9c
ZY
1069{
1070 int rc = 0;
bb8c093b
CH
1071 struct iwl4965_rx_packet *res = NULL;
1072 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1073 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1074 .id = REPLY_RXON_ASSOC,
1075 .len = sizeof(rxon_assoc),
1076 .meta.flags = CMD_WANT_SKB,
1077 .data = &rxon_assoc,
1078 };
bb8c093b
CH
1079 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon;
1080 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon;
b481de9c
ZY
1081
1082 if ((rxon1->flags == rxon2->flags) &&
1083 (rxon1->filter_flags == rxon2->filter_flags) &&
1084 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1085 (rxon1->ofdm_ht_single_stream_basic_rates ==
1086 rxon2->ofdm_ht_single_stream_basic_rates) &&
1087 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1088 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1089 (rxon1->rx_chain == rxon2->rx_chain) &&
1090 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1091 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1092 return 0;
1093 }
1094
1095 rxon_assoc.flags = priv->staging_rxon.flags;
1096 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1097 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1098 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1099 rxon_assoc.reserved = 0;
1100 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1101 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1102 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1103 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1104 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1105
bb8c093b 1106 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1107 if (rc)
1108 return rc;
1109
bb8c093b 1110 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1111 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1112 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1113 rc = -EIO;
1114 }
1115
1116 priv->alloc_rxb_skb--;
1117 dev_kfree_skb_any(cmd.meta.u.skb);
1118
1119 return rc;
1120}
1121
1122/**
bb8c093b 1123 * iwl4965_commit_rxon - commit staging_rxon to hardware
b481de9c 1124 *
01ebd063 1125 * The RXON command in staging_rxon is committed to the hardware and
b481de9c
ZY
1126 * the active_rxon structure is updated with the new data. This
1127 * function correctly transitions out of the RXON_ASSOC_MSK state if
1128 * a HW tune is required based on the RXON structure changes.
1129 */
bb8c093b 1130static int iwl4965_commit_rxon(struct iwl4965_priv *priv)
b481de9c
ZY
1131{
1132 /* cast away the const for active_rxon in this function */
bb8c093b 1133 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
0795af57 1134 DECLARE_MAC_BUF(mac);
b481de9c
ZY
1135 int rc = 0;
1136
bb8c093b 1137 if (!iwl4965_is_alive(priv))
b481de9c
ZY
1138 return -1;
1139
1140 /* always get timestamp with Rx frame */
1141 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1142
bb8c093b 1143 rc = iwl4965_check_rxon_cmd(&priv->staging_rxon);
b481de9c
ZY
1144 if (rc) {
1145 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
1146 return -EINVAL;
1147 }
1148
1149 /* If we don't need to send a full RXON, we can use
bb8c093b 1150 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter
b481de9c 1151 * and other flags for the current radio configuration. */
bb8c093b
CH
1152 if (!iwl4965_full_rxon_required(priv)) {
1153 rc = iwl4965_send_rxon_assoc(priv);
b481de9c
ZY
1154 if (rc) {
1155 IWL_ERROR("Error setting RXON_ASSOC "
1156 "configuration (%d).\n", rc);
1157 return rc;
1158 }
1159
1160 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1161
1162 return 0;
1163 }
1164
1165 /* station table will be cleared */
1166 priv->assoc_station_added = 0;
1167
c8b0e6e1 1168#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1169 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1170 if (!priv->error_recovering)
1171 priv->start_calib = 0;
1172
1173 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1174#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1175
1176 /* If we are currently associated and the new config requires
1177 * an RXON_ASSOC and the new config wants the associated mask enabled,
1178 * we must clear the associated from the active configuration
1179 * before we apply the new config */
bb8c093b 1180 if (iwl4965_is_associated(priv) &&
b481de9c
ZY
1181 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1182 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1183 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1184
bb8c093b
CH
1185 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1186 sizeof(struct iwl4965_rxon_cmd),
b481de9c
ZY
1187 &priv->active_rxon);
1188
1189 /* If the mask clearing failed then we set
1190 * active_rxon back to what it was previously */
1191 if (rc) {
1192 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1193 IWL_ERROR("Error clearing ASSOC_MSK on current "
1194 "configuration (%d).\n", rc);
1195 return rc;
1196 }
b481de9c
ZY
1197 }
1198
1199 IWL_DEBUG_INFO("Sending RXON\n"
1200 "* with%s RXON_FILTER_ASSOC_MSK\n"
1201 "* channel = %d\n"
0795af57 1202 "* bssid = %s\n",
b481de9c
ZY
1203 ((priv->staging_rxon.filter_flags &
1204 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1205 le16_to_cpu(priv->staging_rxon.channel),
0795af57 1206 print_mac(mac, priv->staging_rxon.bssid_addr));
b481de9c
ZY
1207
1208 /* Apply the new configuration */
bb8c093b
CH
1209 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON,
1210 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon);
b481de9c
ZY
1211 if (rc) {
1212 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1213 return rc;
1214 }
1215
bb8c093b 1216 iwl4965_clear_stations_table(priv);
556f8db7 1217
c8b0e6e1 1218#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
1219 if (!priv->error_recovering)
1220 priv->start_calib = 0;
1221
1222 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1223 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
c8b0e6e1 1224#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
1225
1226 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1227
1228 /* If we issue a new RXON command which required a tune then we must
1229 * send a new TXPOWER command or we won't be able to Tx any frames */
bb8c093b 1230 rc = iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
1231 if (rc) {
1232 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1233 return rc;
1234 }
1235
1236 /* Add the broadcast address so we can send broadcast frames */
bb8c093b 1237 if (iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0) ==
b481de9c
ZY
1238 IWL_INVALID_STATION) {
1239 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1240 return -EIO;
1241 }
1242
1243 /* If we have set the ASSOC_MSK and we are in BSS mode then
1244 * add the IWL_AP_ID to the station rate table */
bb8c093b 1245 if (iwl4965_is_associated(priv) &&
b481de9c 1246 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
bb8c093b 1247 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
b481de9c
ZY
1248 == IWL_INVALID_STATION) {
1249 IWL_ERROR("Error adding AP address for transmit.\n");
1250 return -EIO;
1251 }
1252 priv->assoc_station_added = 1;
1253 }
1254
1255 return 0;
1256}
1257
bb8c093b 1258static int iwl4965_send_bt_config(struct iwl4965_priv *priv)
b481de9c 1259{
bb8c093b 1260 struct iwl4965_bt_cmd bt_cmd = {
b481de9c
ZY
1261 .flags = 3,
1262 .lead_time = 0xAA,
1263 .max_kill = 1,
1264 .kill_ack_mask = 0,
1265 .kill_cts_mask = 0,
1266 };
1267
bb8c093b
CH
1268 return iwl4965_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1269 sizeof(struct iwl4965_bt_cmd), &bt_cmd);
b481de9c
ZY
1270}
1271
bb8c093b 1272static int iwl4965_send_scan_abort(struct iwl4965_priv *priv)
b481de9c
ZY
1273{
1274 int rc = 0;
bb8c093b
CH
1275 struct iwl4965_rx_packet *res;
1276 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1277 .id = REPLY_SCAN_ABORT_CMD,
1278 .meta.flags = CMD_WANT_SKB,
1279 };
1280
1281 /* If there isn't a scan actively going on in the hardware
1282 * then we are in between scan bands and not actually
1283 * actively scanning, so don't send the abort command */
1284 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1285 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1286 return 0;
1287 }
1288
bb8c093b 1289 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1290 if (rc) {
1291 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1292 return rc;
1293 }
1294
bb8c093b 1295 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1296 if (res->u.status != CAN_ABORT_STATUS) {
1297 /* The scan abort will return 1 for success or
1298 * 2 for "failure". A failure condition can be
1299 * due to simply not being in an active scan which
1300 * can occur if we send the scan abort before we
1301 * the microcode has notified us that a scan is
1302 * completed. */
1303 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1304 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1305 clear_bit(STATUS_SCAN_HW, &priv->status);
1306 }
1307
1308 dev_kfree_skb_any(cmd.meta.u.skb);
1309
1310 return rc;
1311}
1312
bb8c093b
CH
1313static int iwl4965_card_state_sync_callback(struct iwl4965_priv *priv,
1314 struct iwl4965_cmd *cmd,
b481de9c
ZY
1315 struct sk_buff *skb)
1316{
1317 return 1;
1318}
1319
1320/*
1321 * CARD_STATE_CMD
1322 *
9fbab516 1323 * Use: Sets the device's internal card state to enable, disable, or halt
b481de9c
ZY
1324 *
1325 * When in the 'enable' state the card operates as normal.
1326 * When in the 'disable' state, the card enters into a low power mode.
1327 * When in the 'halt' state, the card is shut down and must be fully
1328 * restarted to come back on.
1329 */
bb8c093b 1330static int iwl4965_send_card_state(struct iwl4965_priv *priv, u32 flags, u8 meta_flag)
b481de9c 1331{
bb8c093b 1332 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
1333 .id = REPLY_CARD_STATE_CMD,
1334 .len = sizeof(u32),
1335 .data = &flags,
1336 .meta.flags = meta_flag,
1337 };
1338
1339 if (meta_flag & CMD_ASYNC)
bb8c093b 1340 cmd.meta.u.callback = iwl4965_card_state_sync_callback;
b481de9c 1341
bb8c093b 1342 return iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1343}
1344
bb8c093b
CH
1345static int iwl4965_add_sta_sync_callback(struct iwl4965_priv *priv,
1346 struct iwl4965_cmd *cmd, struct sk_buff *skb)
b481de9c 1347{
bb8c093b 1348 struct iwl4965_rx_packet *res = NULL;
b481de9c
ZY
1349
1350 if (!skb) {
1351 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1352 return 1;
1353 }
1354
bb8c093b 1355 res = (struct iwl4965_rx_packet *)skb->data;
b481de9c
ZY
1356 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1357 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1358 res->hdr.flags);
1359 return 1;
1360 }
1361
1362 switch (res->u.add_sta.status) {
1363 case ADD_STA_SUCCESS_MSK:
1364 break;
1365 default:
1366 break;
1367 }
1368
1369 /* We didn't cache the SKB; let the caller free it */
1370 return 1;
1371}
1372
bb8c093b
CH
1373int iwl4965_send_add_station(struct iwl4965_priv *priv,
1374 struct iwl4965_addsta_cmd *sta, u8 flags)
b481de9c 1375{
bb8c093b 1376 struct iwl4965_rx_packet *res = NULL;
b481de9c 1377 int rc = 0;
bb8c093b 1378 struct iwl4965_host_cmd cmd = {
b481de9c 1379 .id = REPLY_ADD_STA,
bb8c093b 1380 .len = sizeof(struct iwl4965_addsta_cmd),
b481de9c
ZY
1381 .meta.flags = flags,
1382 .data = sta,
1383 };
1384
1385 if (flags & CMD_ASYNC)
bb8c093b 1386 cmd.meta.u.callback = iwl4965_add_sta_sync_callback;
b481de9c
ZY
1387 else
1388 cmd.meta.flags |= CMD_WANT_SKB;
1389
bb8c093b 1390 rc = iwl4965_send_cmd(priv, &cmd);
b481de9c
ZY
1391
1392 if (rc || (flags & CMD_ASYNC))
1393 return rc;
1394
bb8c093b 1395 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1396 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1397 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1398 res->hdr.flags);
1399 rc = -EIO;
1400 }
1401
1402 if (rc == 0) {
1403 switch (res->u.add_sta.status) {
1404 case ADD_STA_SUCCESS_MSK:
1405 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1406 break;
1407 default:
1408 rc = -EIO;
1409 IWL_WARNING("REPLY_ADD_STA failed\n");
1410 break;
1411 }
1412 }
1413
1414 priv->alloc_rxb_skb--;
1415 dev_kfree_skb_any(cmd.meta.u.skb);
1416
1417 return rc;
1418}
1419
bb8c093b 1420static int iwl4965_update_sta_key_info(struct iwl4965_priv *priv,
b481de9c
ZY
1421 struct ieee80211_key_conf *keyconf,
1422 u8 sta_id)
1423{
1424 unsigned long flags;
1425 __le16 key_flags = 0;
1426
1427 switch (keyconf->alg) {
1428 case ALG_CCMP:
1429 key_flags |= STA_KEY_FLG_CCMP;
1430 key_flags |= cpu_to_le16(
1431 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1432 key_flags &= ~STA_KEY_FLG_INVALID;
1433 break;
1434 case ALG_TKIP:
1435 case ALG_WEP:
b481de9c
ZY
1436 default:
1437 return -EINVAL;
1438 }
1439 spin_lock_irqsave(&priv->sta_lock, flags);
1440 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1441 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1442 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1443 keyconf->keylen);
1444
1445 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1446 keyconf->keylen);
1447 priv->stations[sta_id].sta.key.key_flags = key_flags;
1448 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1449 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1450
1451 spin_unlock_irqrestore(&priv->sta_lock, flags);
1452
1453 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
bb8c093b 1454 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1455 return 0;
1456}
1457
bb8c093b 1458static int iwl4965_clear_sta_key_info(struct iwl4965_priv *priv, u8 sta_id)
b481de9c
ZY
1459{
1460 unsigned long flags;
1461
1462 spin_lock_irqsave(&priv->sta_lock, flags);
bb8c093b
CH
1463 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl4965_hw_key));
1464 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl4965_keyinfo));
b481de9c
ZY
1465 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1466 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1467 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1468 spin_unlock_irqrestore(&priv->sta_lock, flags);
1469
1470 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
bb8c093b 1471 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
b481de9c
ZY
1472 return 0;
1473}
1474
bb8c093b 1475static void iwl4965_clear_free_frames(struct iwl4965_priv *priv)
b481de9c
ZY
1476{
1477 struct list_head *element;
1478
1479 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1480 priv->frames_count);
1481
1482 while (!list_empty(&priv->free_frames)) {
1483 element = priv->free_frames.next;
1484 list_del(element);
bb8c093b 1485 kfree(list_entry(element, struct iwl4965_frame, list));
b481de9c
ZY
1486 priv->frames_count--;
1487 }
1488
1489 if (priv->frames_count) {
1490 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1491 priv->frames_count);
1492 priv->frames_count = 0;
1493 }
1494}
1495
bb8c093b 1496static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl4965_priv *priv)
b481de9c 1497{
bb8c093b 1498 struct iwl4965_frame *frame;
b481de9c
ZY
1499 struct list_head *element;
1500 if (list_empty(&priv->free_frames)) {
1501 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1502 if (!frame) {
1503 IWL_ERROR("Could not allocate frame!\n");
1504 return NULL;
1505 }
1506
1507 priv->frames_count++;
1508 return frame;
1509 }
1510
1511 element = priv->free_frames.next;
1512 list_del(element);
bb8c093b 1513 return list_entry(element, struct iwl4965_frame, list);
b481de9c
ZY
1514}
1515
bb8c093b 1516static void iwl4965_free_frame(struct iwl4965_priv *priv, struct iwl4965_frame *frame)
b481de9c
ZY
1517{
1518 memset(frame, 0, sizeof(*frame));
1519 list_add(&frame->list, &priv->free_frames);
1520}
1521
bb8c093b 1522unsigned int iwl4965_fill_beacon_frame(struct iwl4965_priv *priv,
b481de9c
ZY
1523 struct ieee80211_hdr *hdr,
1524 const u8 *dest, int left)
1525{
1526
bb8c093b 1527 if (!iwl4965_is_associated(priv) || !priv->ibss_beacon ||
b481de9c
ZY
1528 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1529 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1530 return 0;
1531
1532 if (priv->ibss_beacon->len > left)
1533 return 0;
1534
1535 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1536
1537 return priv->ibss_beacon->len;
1538}
1539
bb8c093b 1540int iwl4965_rate_index_from_plcp(int plcp)
b481de9c
ZY
1541{
1542 int i = 0;
1543
77626355 1544 /* 4965 HT rate format */
b481de9c
ZY
1545 if (plcp & RATE_MCS_HT_MSK) {
1546 i = (plcp & 0xff);
1547
1548 if (i >= IWL_RATE_MIMO_6M_PLCP)
1549 i = i - IWL_RATE_MIMO_6M_PLCP;
1550
1551 i += IWL_FIRST_OFDM_RATE;
1552 /* skip 9M not supported in ht*/
1553 if (i >= IWL_RATE_9M_INDEX)
1554 i += 1;
1555 if ((i >= IWL_FIRST_OFDM_RATE) &&
1556 (i <= IWL_LAST_OFDM_RATE))
1557 return i;
77626355
BC
1558
1559 /* 4965 legacy rate format, search for match in table */
b481de9c 1560 } else {
bb8c093b
CH
1561 for (i = 0; i < ARRAY_SIZE(iwl4965_rates); i++)
1562 if (iwl4965_rates[i].plcp == (plcp &0xFF))
b481de9c
ZY
1563 return i;
1564 }
1565 return -1;
1566}
1567
bb8c093b 1568static u8 iwl4965_rate_get_lowest_plcp(int rate_mask)
b481de9c
ZY
1569{
1570 u8 i;
1571
1572 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
bb8c093b 1573 i = iwl4965_rates[i].next_ieee) {
b481de9c 1574 if (rate_mask & (1 << i))
bb8c093b 1575 return iwl4965_rates[i].plcp;
b481de9c
ZY
1576 }
1577
1578 return IWL_RATE_INVALID;
1579}
1580
bb8c093b 1581static int iwl4965_send_beacon_cmd(struct iwl4965_priv *priv)
b481de9c 1582{
bb8c093b 1583 struct iwl4965_frame *frame;
b481de9c
ZY
1584 unsigned int frame_size;
1585 int rc;
1586 u8 rate;
1587
bb8c093b 1588 frame = iwl4965_get_free_frame(priv);
b481de9c
ZY
1589
1590 if (!frame) {
1591 IWL_ERROR("Could not obtain free frame buffer for beacon "
1592 "command.\n");
1593 return -ENOMEM;
1594 }
1595
1596 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
bb8c093b 1597 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic &
b481de9c
ZY
1598 0xFF0);
1599 if (rate == IWL_INVALID_RATE)
1600 rate = IWL_RATE_6M_PLCP;
1601 } else {
bb8c093b 1602 rate = iwl4965_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
b481de9c
ZY
1603 if (rate == IWL_INVALID_RATE)
1604 rate = IWL_RATE_1M_PLCP;
1605 }
1606
bb8c093b 1607 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate);
b481de9c 1608
bb8c093b 1609 rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
b481de9c
ZY
1610 &frame->u.cmd[0]);
1611
bb8c093b 1612 iwl4965_free_frame(priv, frame);
b481de9c
ZY
1613
1614 return rc;
1615}
1616
1617/******************************************************************************
1618 *
1619 * EEPROM related functions
1620 *
1621 ******************************************************************************/
1622
bb8c093b 1623static void get_eeprom_mac(struct iwl4965_priv *priv, u8 *mac)
b481de9c
ZY
1624{
1625 memcpy(mac, priv->eeprom.mac_address, 6);
1626}
1627
1628/**
bb8c093b 1629 * iwl4965_eeprom_init - read EEPROM contents
b481de9c 1630 *
6440adb5 1631 * Load the EEPROM contents from adapter into priv->eeprom
b481de9c
ZY
1632 *
1633 * NOTE: This routine uses the non-debug IO access functions.
1634 */
bb8c093b 1635int iwl4965_eeprom_init(struct iwl4965_priv *priv)
b481de9c
ZY
1636{
1637 u16 *e = (u16 *)&priv->eeprom;
bb8c093b 1638 u32 gp = iwl4965_read32(priv, CSR_EEPROM_GP);
b481de9c
ZY
1639 u32 r;
1640 int sz = sizeof(priv->eeprom);
1641 int rc;
1642 int i;
1643 u16 addr;
1644
1645 /* The EEPROM structure has several padding buffers within it
1646 * and when adding new EEPROM maps is subject to programmer errors
1647 * which may be very difficult to identify without explicitly
1648 * checking the resulting size of the eeprom map. */
1649 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1650
1651 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1652 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1653 return -ENOENT;
1654 }
1655
6440adb5 1656 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
bb8c093b 1657 rc = iwl4965_eeprom_acquire_semaphore(priv);
b481de9c 1658 if (rc < 0) {
91e17473 1659 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
b481de9c
ZY
1660 return -ENOENT;
1661 }
1662
1663 /* eeprom is an array of 16bit values */
1664 for (addr = 0; addr < sz; addr += sizeof(u16)) {
bb8c093b
CH
1665 _iwl4965_write32(priv, CSR_EEPROM_REG, addr << 1);
1666 _iwl4965_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
b481de9c
ZY
1667
1668 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1669 i += IWL_EEPROM_ACCESS_DELAY) {
bb8c093b 1670 r = _iwl4965_read_direct32(priv, CSR_EEPROM_REG);
b481de9c
ZY
1671 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1672 break;
1673 udelay(IWL_EEPROM_ACCESS_DELAY);
1674 }
1675
1676 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1677 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1678 rc = -ETIMEDOUT;
1679 goto done;
1680 }
1681 e[addr / 2] = le16_to_cpu(r >> 16);
1682 }
1683 rc = 0;
1684
1685done:
bb8c093b 1686 iwl4965_eeprom_release_semaphore(priv);
b481de9c
ZY
1687 return rc;
1688}
1689
1690/******************************************************************************
1691 *
1692 * Misc. internal state and helper functions
1693 *
1694 ******************************************************************************/
c8b0e6e1 1695#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
1696
1697/**
bb8c093b 1698 * iwl4965_report_frame - dump frame to syslog during debug sessions
b481de9c 1699 *
9fbab516 1700 * You may hack this function to show different aspects of received frames,
b481de9c
ZY
1701 * including selective frame dumps.
1702 * group100 parameter selects whether to show 1 out of 100 good frames.
1703 *
9fbab516
BC
1704 * TODO: This was originally written for 3945, need to audit for
1705 * proper operation with 4965.
b481de9c 1706 */
bb8c093b
CH
1707void iwl4965_report_frame(struct iwl4965_priv *priv,
1708 struct iwl4965_rx_packet *pkt,
b481de9c
ZY
1709 struct ieee80211_hdr *header, int group100)
1710{
1711 u32 to_us;
1712 u32 print_summary = 0;
1713 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1714 u32 hundred = 0;
1715 u32 dataframe = 0;
1716 u16 fc;
1717 u16 seq_ctl;
1718 u16 channel;
1719 u16 phy_flags;
1720 int rate_sym;
1721 u16 length;
1722 u16 status;
1723 u16 bcn_tmr;
1724 u32 tsf_low;
1725 u64 tsf;
1726 u8 rssi;
1727 u8 agc;
1728 u16 sig_avg;
1729 u16 noise_diff;
bb8c093b
CH
1730 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1731 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1732 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
b481de9c
ZY
1733 u8 *data = IWL_RX_DATA(pkt);
1734
1735 /* MAC header */
1736 fc = le16_to_cpu(header->frame_control);
1737 seq_ctl = le16_to_cpu(header->seq_ctrl);
1738
1739 /* metadata */
1740 channel = le16_to_cpu(rx_hdr->channel);
1741 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1742 rate_sym = rx_hdr->rate;
1743 length = le16_to_cpu(rx_hdr->len);
1744
1745 /* end-of-frame status and timestamp */
1746 status = le32_to_cpu(rx_end->status);
1747 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1748 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1749 tsf = le64_to_cpu(rx_end->timestamp);
1750
1751 /* signal statistics */
1752 rssi = rx_stats->rssi;
1753 agc = rx_stats->agc;
1754 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1755 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1756
1757 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1758
1759 /* if data frame is to us and all is good,
1760 * (optionally) print summary for only 1 out of every 100 */
1761 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1762 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1763 dataframe = 1;
1764 if (!group100)
1765 print_summary = 1; /* print each frame */
1766 else if (priv->framecnt_to_us < 100) {
1767 priv->framecnt_to_us++;
1768 print_summary = 0;
1769 } else {
1770 priv->framecnt_to_us = 0;
1771 print_summary = 1;
1772 hundred = 1;
1773 }
1774 } else {
1775 /* print summary for all other frames */
1776 print_summary = 1;
1777 }
1778
1779 if (print_summary) {
1780 char *title;
1781 u32 rate;
1782
1783 if (hundred)
1784 title = "100Frames";
1785 else if (fc & IEEE80211_FCTL_RETRY)
1786 title = "Retry";
1787 else if (ieee80211_is_assoc_response(fc))
1788 title = "AscRsp";
1789 else if (ieee80211_is_reassoc_response(fc))
1790 title = "RasRsp";
1791 else if (ieee80211_is_probe_response(fc)) {
1792 title = "PrbRsp";
1793 print_dump = 1; /* dump frame contents */
1794 } else if (ieee80211_is_beacon(fc)) {
1795 title = "Beacon";
1796 print_dump = 1; /* dump frame contents */
1797 } else if (ieee80211_is_atim(fc))
1798 title = "ATIM";
1799 else if (ieee80211_is_auth(fc))
1800 title = "Auth";
1801 else if (ieee80211_is_deauth(fc))
1802 title = "DeAuth";
1803 else if (ieee80211_is_disassoc(fc))
1804 title = "DisAssoc";
1805 else
1806 title = "Frame";
1807
bb8c093b 1808 rate = iwl4965_rate_index_from_plcp(rate_sym);
b481de9c
ZY
1809 if (rate == -1)
1810 rate = 0;
1811 else
bb8c093b 1812 rate = iwl4965_rates[rate].ieee / 2;
b481de9c
ZY
1813
1814 /* print frame summary.
1815 * MAC addresses show just the last byte (for brevity),
1816 * but you can hack it to show more, if you'd like to. */
1817 if (dataframe)
1818 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1819 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1820 title, fc, header->addr1[5],
1821 length, rssi, channel, rate);
1822 else {
1823 /* src/dst addresses assume managed mode */
1824 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1825 "src=0x%02x, rssi=%u, tim=%lu usec, "
1826 "phy=0x%02x, chnl=%d\n",
1827 title, fc, header->addr1[5],
1828 header->addr3[5], rssi,
1829 tsf_low - priv->scan_start_tsf,
1830 phy_flags, channel);
1831 }
1832 }
1833 if (print_dump)
bb8c093b 1834 iwl4965_print_hex_dump(IWL_DL_RX, data, length);
b481de9c
ZY
1835}
1836#endif
1837
bb8c093b 1838static void iwl4965_unset_hw_setting(struct iwl4965_priv *priv)
b481de9c
ZY
1839{
1840 if (priv->hw_setting.shared_virt)
1841 pci_free_consistent(priv->pci_dev,
bb8c093b 1842 sizeof(struct iwl4965_shared),
b481de9c
ZY
1843 priv->hw_setting.shared_virt,
1844 priv->hw_setting.shared_phys);
1845}
1846
1847/**
bb8c093b 1848 * iwl4965_supported_rate_to_ie - fill in the supported rate in IE field
b481de9c
ZY
1849 *
1850 * return : set the bit for each supported rate insert in ie
1851 */
bb8c093b 1852static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
c7c46676 1853 u16 basic_rate, int *left)
b481de9c
ZY
1854{
1855 u16 ret_rates = 0, bit;
1856 int i;
c7c46676
TW
1857 u8 *cnt = ie;
1858 u8 *rates = ie + 1;
b481de9c
ZY
1859
1860 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1861 if (bit & supported_rate) {
1862 ret_rates |= bit;
bb8c093b 1863 rates[*cnt] = iwl4965_rates[i].ieee |
c7c46676
TW
1864 ((bit & basic_rate) ? 0x80 : 0x00);
1865 (*cnt)++;
1866 (*left)--;
1867 if ((*left <= 0) ||
1868 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
b481de9c
ZY
1869 break;
1870 }
1871 }
1872
1873 return ret_rates;
1874}
1875
c8b0e6e1 1876#ifdef CONFIG_IWL4965_HT
bb8c093b 1877void static iwl4965_set_ht_capab(struct ieee80211_hw *hw,
b481de9c
ZY
1878 struct ieee80211_ht_capability *ht_cap,
1879 u8 use_wide_chan);
1880#endif
1881
1882/**
bb8c093b 1883 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request
b481de9c 1884 */
bb8c093b 1885static u16 iwl4965_fill_probe_req(struct iwl4965_priv *priv,
b481de9c
ZY
1886 struct ieee80211_mgmt *frame,
1887 int left, int is_direct)
1888{
1889 int len = 0;
1890 u8 *pos = NULL;
bee488db 1891 u16 active_rates, ret_rates, cck_rates, active_rate_basic;
b481de9c
ZY
1892
1893 /* Make sure there is enough space for the probe request,
1894 * two mandatory IEs and the data */
1895 left -= 24;
1896 if (left < 0)
1897 return 0;
1898 len += 24;
1899
1900 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
bb8c093b 1901 memcpy(frame->da, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c 1902 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
bb8c093b 1903 memcpy(frame->bssid, iwl4965_broadcast_addr, ETH_ALEN);
b481de9c
ZY
1904 frame->seq_ctrl = 0;
1905
1906 /* fill in our indirect SSID IE */
1907 /* ...next IE... */
1908
1909 left -= 2;
1910 if (left < 0)
1911 return 0;
1912 len += 2;
1913 pos = &(frame->u.probe_req.variable[0]);
1914 *pos++ = WLAN_EID_SSID;
1915 *pos++ = 0;
1916
1917 /* fill in our direct SSID IE... */
1918 if (is_direct) {
1919 /* ...next IE... */
1920 left -= 2 + priv->essid_len;
1921 if (left < 0)
1922 return 0;
1923 /* ... fill it in... */
1924 *pos++ = WLAN_EID_SSID;
1925 *pos++ = priv->essid_len;
1926 memcpy(pos, priv->essid, priv->essid_len);
1927 pos += priv->essid_len;
1928 len += 2 + priv->essid_len;
1929 }
1930
1931 /* fill in supported rate */
1932 /* ...next IE... */
1933 left -= 2;
1934 if (left < 0)
1935 return 0;
c7c46676 1936
b481de9c
ZY
1937 /* ... fill it in... */
1938 *pos++ = WLAN_EID_SUPP_RATES;
1939 *pos = 0;
c7c46676 1940
bee488db 1941 /* exclude 60M rate */
1942 active_rates = priv->rates_mask;
1943 active_rates &= ~IWL_RATE_60M_MASK;
1944
1945 active_rate_basic = active_rates & IWL_BASIC_RATES_MASK;
b481de9c 1946
c7c46676 1947 cck_rates = IWL_CCK_RATES_MASK & active_rates;
bb8c093b 1948 ret_rates = iwl4965_supported_rate_to_ie(pos, cck_rates,
bee488db 1949 active_rate_basic, &left);
c7c46676
TW
1950 active_rates &= ~ret_rates;
1951
bb8c093b 1952 ret_rates = iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1953 active_rate_basic, &left);
c7c46676
TW
1954 active_rates &= ~ret_rates;
1955
b481de9c
ZY
1956 len += 2 + *pos;
1957 pos += (*pos) + 1;
c7c46676 1958 if (active_rates == 0)
b481de9c
ZY
1959 goto fill_end;
1960
1961 /* fill in supported extended rate */
1962 /* ...next IE... */
1963 left -= 2;
1964 if (left < 0)
1965 return 0;
1966 /* ... fill it in... */
1967 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1968 *pos = 0;
bb8c093b 1969 iwl4965_supported_rate_to_ie(pos, active_rates,
bee488db 1970 active_rate_basic, &left);
b481de9c
ZY
1971 if (*pos > 0)
1972 len += 2 + *pos;
1973
c8b0e6e1 1974#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
1975 if (is_direct && priv->is_ht_enabled) {
1976 u8 use_wide_chan = 1;
1977
1978 if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ)
1979 use_wide_chan = 0;
1980 pos += (*pos) + 1;
1981 *pos++ = WLAN_EID_HT_CAPABILITY;
1982 *pos++ = sizeof(struct ieee80211_ht_capability);
bb8c093b 1983 iwl4965_set_ht_capab(NULL, (struct ieee80211_ht_capability *)pos,
b481de9c
ZY
1984 use_wide_chan);
1985 len += 2 + sizeof(struct ieee80211_ht_capability);
1986 }
c8b0e6e1 1987#endif /*CONFIG_IWL4965_HT */
b481de9c
ZY
1988
1989 fill_end:
1990 return (u16)len;
1991}
1992
1993/*
1994 * QoS support
1995*/
c8b0e6e1 1996#ifdef CONFIG_IWL4965_QOS
bb8c093b
CH
1997static int iwl4965_send_qos_params_command(struct iwl4965_priv *priv,
1998 struct iwl4965_qosparam_cmd *qos)
b481de9c
ZY
1999{
2000
bb8c093b
CH
2001 return iwl4965_send_cmd_pdu(priv, REPLY_QOS_PARAM,
2002 sizeof(struct iwl4965_qosparam_cmd), qos);
b481de9c
ZY
2003}
2004
bb8c093b 2005static void iwl4965_reset_qos(struct iwl4965_priv *priv)
b481de9c
ZY
2006{
2007 u16 cw_min = 15;
2008 u16 cw_max = 1023;
2009 u8 aifs = 2;
2010 u8 is_legacy = 0;
2011 unsigned long flags;
2012 int i;
2013
2014 spin_lock_irqsave(&priv->lock, flags);
2015 priv->qos_data.qos_active = 0;
2016
2017 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
2018 if (priv->qos_data.qos_enable)
2019 priv->qos_data.qos_active = 1;
2020 if (!(priv->active_rate & 0xfff0)) {
2021 cw_min = 31;
2022 is_legacy = 1;
2023 }
2024 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2025 if (priv->qos_data.qos_enable)
2026 priv->qos_data.qos_active = 1;
2027 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
2028 cw_min = 31;
2029 is_legacy = 1;
2030 }
2031
2032 if (priv->qos_data.qos_active)
2033 aifs = 3;
2034
2035 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
2036 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
2037 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
2038 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
2039 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
2040
2041 if (priv->qos_data.qos_active) {
2042 i = 1;
2043 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
2044 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
2045 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
2046 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2047 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2048
2049 i = 2;
2050 priv->qos_data.def_qos_parm.ac[i].cw_min =
2051 cpu_to_le16((cw_min + 1) / 2 - 1);
2052 priv->qos_data.def_qos_parm.ac[i].cw_max =
2053 cpu_to_le16(cw_max);
2054 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2055 if (is_legacy)
2056 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2057 cpu_to_le16(6016);
2058 else
2059 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2060 cpu_to_le16(3008);
2061 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2062
2063 i = 3;
2064 priv->qos_data.def_qos_parm.ac[i].cw_min =
2065 cpu_to_le16((cw_min + 1) / 4 - 1);
2066 priv->qos_data.def_qos_parm.ac[i].cw_max =
2067 cpu_to_le16((cw_max + 1) / 2 - 1);
2068 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2069 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2070 if (is_legacy)
2071 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2072 cpu_to_le16(3264);
2073 else
2074 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2075 cpu_to_le16(1504);
2076 } else {
2077 for (i = 1; i < 4; i++) {
2078 priv->qos_data.def_qos_parm.ac[i].cw_min =
2079 cpu_to_le16(cw_min);
2080 priv->qos_data.def_qos_parm.ac[i].cw_max =
2081 cpu_to_le16(cw_max);
2082 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
2083 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2084 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2085 }
2086 }
2087 IWL_DEBUG_QOS("set QoS to default \n");
2088
2089 spin_unlock_irqrestore(&priv->lock, flags);
2090}
2091
bb8c093b 2092static void iwl4965_activate_qos(struct iwl4965_priv *priv, u8 force)
b481de9c
ZY
2093{
2094 unsigned long flags;
2095
b481de9c
ZY
2096 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2097 return;
2098
2099 if (!priv->qos_data.qos_enable)
2100 return;
2101
2102 spin_lock_irqsave(&priv->lock, flags);
2103 priv->qos_data.def_qos_parm.qos_flags = 0;
2104
2105 if (priv->qos_data.qos_cap.q_AP.queue_request &&
2106 !priv->qos_data.qos_cap.q_AP.txop_request)
2107 priv->qos_data.def_qos_parm.qos_flags |=
2108 QOS_PARAM_FLG_TXOP_TYPE_MSK;
b481de9c
ZY
2109 if (priv->qos_data.qos_active)
2110 priv->qos_data.def_qos_parm.qos_flags |=
2111 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2112
c8b0e6e1 2113#ifdef CONFIG_IWL4965_HT
f1f1f5c7
TW
2114 if (priv->is_ht_enabled && priv->current_assoc_ht.is_ht)
2115 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
c8b0e6e1 2116#endif /* CONFIG_IWL4965_HT */
f1f1f5c7 2117
b481de9c
ZY
2118 spin_unlock_irqrestore(&priv->lock, flags);
2119
bb8c093b 2120 if (force || iwl4965_is_associated(priv)) {
f1f1f5c7
TW
2121 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2122 priv->qos_data.qos_active,
2123 priv->qos_data.def_qos_parm.qos_flags);
b481de9c 2124
bb8c093b 2125 iwl4965_send_qos_params_command(priv,
b481de9c
ZY
2126 &(priv->qos_data.def_qos_parm));
2127 }
2128}
2129
c8b0e6e1 2130#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
2131/*
2132 * Power management (not Tx power!) functions
2133 */
2134#define MSEC_TO_USEC 1024
2135
2136#define NOSLP __constant_cpu_to_le16(0), 0, 0
2137#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
2138#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2139#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2140 __constant_cpu_to_le32(X1), \
2141 __constant_cpu_to_le32(X2), \
2142 __constant_cpu_to_le32(X3), \
2143 __constant_cpu_to_le32(X4)}
2144
2145
2146/* default power management (not Tx power) table values */
2147/* for tim 0-10 */
bb8c093b 2148static struct iwl4965_power_vec_entry range_0[IWL_POWER_AC] = {
b481de9c
ZY
2149 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2150 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2151 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2152 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2153 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2154 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2155};
2156
2157/* for tim > 10 */
bb8c093b 2158static struct iwl4965_power_vec_entry range_1[IWL_POWER_AC] = {
b481de9c
ZY
2159 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2160 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2161 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2162 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2163 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2164 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2165 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2166 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2167 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2168 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2169};
2170
bb8c093b 2171int iwl4965_power_init_handle(struct iwl4965_priv *priv)
b481de9c
ZY
2172{
2173 int rc = 0, i;
bb8c093b
CH
2174 struct iwl4965_power_mgr *pow_data;
2175 int size = sizeof(struct iwl4965_power_vec_entry) * IWL_POWER_AC;
b481de9c
ZY
2176 u16 pci_pm;
2177
2178 IWL_DEBUG_POWER("Initialize power \n");
2179
2180 pow_data = &(priv->power_data);
2181
2182 memset(pow_data, 0, sizeof(*pow_data));
2183
2184 pow_data->active_index = IWL_POWER_RANGE_0;
2185 pow_data->dtim_val = 0xffff;
2186
2187 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2188 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2189
2190 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2191 if (rc != 0)
2192 return 0;
2193 else {
bb8c093b 2194 struct iwl4965_powertable_cmd *cmd;
b481de9c
ZY
2195
2196 IWL_DEBUG_POWER("adjust power command flags\n");
2197
2198 for (i = 0; i < IWL_POWER_AC; i++) {
2199 cmd = &pow_data->pwr_range_0[i].cmd;
2200
2201 if (pci_pm & 0x1)
2202 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2203 else
2204 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2205 }
2206 }
2207 return rc;
2208}
2209
bb8c093b
CH
2210static int iwl4965_update_power_cmd(struct iwl4965_priv *priv,
2211 struct iwl4965_powertable_cmd *cmd, u32 mode)
b481de9c
ZY
2212{
2213 int rc = 0, i;
2214 u8 skip;
2215 u32 max_sleep = 0;
bb8c093b 2216 struct iwl4965_power_vec_entry *range;
b481de9c 2217 u8 period = 0;
bb8c093b 2218 struct iwl4965_power_mgr *pow_data;
b481de9c
ZY
2219
2220 if (mode > IWL_POWER_INDEX_5) {
2221 IWL_DEBUG_POWER("Error invalid power mode \n");
2222 return -1;
2223 }
2224 pow_data = &(priv->power_data);
2225
2226 if (pow_data->active_index == IWL_POWER_RANGE_0)
2227 range = &pow_data->pwr_range_0[0];
2228 else
2229 range = &pow_data->pwr_range_1[1];
2230
bb8c093b 2231 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl4965_powertable_cmd));
b481de9c
ZY
2232
2233#ifdef IWL_MAC80211_DISABLE
2234 if (priv->assoc_network != NULL) {
2235 unsigned long flags;
2236
2237 period = priv->assoc_network->tim.tim_period;
2238 }
2239#endif /*IWL_MAC80211_DISABLE */
2240 skip = range[mode].no_dtim;
2241
2242 if (period == 0) {
2243 period = 1;
2244 skip = 0;
2245 }
2246
2247 if (skip == 0) {
2248 max_sleep = period;
2249 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2250 } else {
2251 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2252 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2253 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2254 }
2255
2256 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2257 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2258 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2259 }
2260
2261 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2262 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2263 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2264 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2265 le32_to_cpu(cmd->sleep_interval[0]),
2266 le32_to_cpu(cmd->sleep_interval[1]),
2267 le32_to_cpu(cmd->sleep_interval[2]),
2268 le32_to_cpu(cmd->sleep_interval[3]),
2269 le32_to_cpu(cmd->sleep_interval[4]));
2270
2271 return rc;
2272}
2273
bb8c093b 2274static int iwl4965_send_power_mode(struct iwl4965_priv *priv, u32 mode)
b481de9c 2275{
9a62f73b 2276 u32 uninitialized_var(final_mode);
b481de9c 2277 int rc;
bb8c093b 2278 struct iwl4965_powertable_cmd cmd;
b481de9c
ZY
2279
2280 /* If on battery, set to 3,
01ebd063 2281 * if plugged into AC power, set to CAM ("continuously aware mode"),
b481de9c
ZY
2282 * else user level */
2283 switch (mode) {
2284 case IWL_POWER_BATTERY:
2285 final_mode = IWL_POWER_INDEX_3;
2286 break;
2287 case IWL_POWER_AC:
2288 final_mode = IWL_POWER_MODE_CAM;
2289 break;
2290 default:
2291 final_mode = mode;
2292 break;
2293 }
2294
2295 cmd.keep_alive_beacons = 0;
2296
bb8c093b 2297 iwl4965_update_power_cmd(priv, &cmd, final_mode);
b481de9c 2298
bb8c093b 2299 rc = iwl4965_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
b481de9c
ZY
2300
2301 if (final_mode == IWL_POWER_MODE_CAM)
2302 clear_bit(STATUS_POWER_PMI, &priv->status);
2303 else
2304 set_bit(STATUS_POWER_PMI, &priv->status);
2305
2306 return rc;
2307}
2308
bb8c093b 2309int iwl4965_is_network_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
2310{
2311 /* Filter incoming packets to determine if they are targeted toward
2312 * this network, discarding packets coming from ourselves */
2313 switch (priv->iw_mode) {
2314 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
2315 /* packets from our adapter are dropped (echo) */
2316 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2317 return 0;
2318 /* {broad,multi}cast packets to our IBSS go through */
2319 if (is_multicast_ether_addr(header->addr1))
2320 return !compare_ether_addr(header->addr3, priv->bssid);
2321 /* packets to our adapter go through */
2322 return !compare_ether_addr(header->addr1, priv->mac_addr);
2323 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2324 /* packets from our adapter are dropped (echo) */
2325 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2326 return 0;
2327 /* {broad,multi}cast packets to our BSS go through */
2328 if (is_multicast_ether_addr(header->addr1))
2329 return !compare_ether_addr(header->addr2, priv->bssid);
2330 /* packets to our adapter go through */
2331 return !compare_ether_addr(header->addr1, priv->mac_addr);
2332 }
2333
2334 return 1;
2335}
2336
2337#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2338
bb8c093b 2339static const char *iwl4965_get_tx_fail_reason(u32 status)
b481de9c
ZY
2340{
2341 switch (status & TX_STATUS_MSK) {
2342 case TX_STATUS_SUCCESS:
2343 return "SUCCESS";
2344 TX_STATUS_ENTRY(SHORT_LIMIT);
2345 TX_STATUS_ENTRY(LONG_LIMIT);
2346 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2347 TX_STATUS_ENTRY(MGMNT_ABORT);
2348 TX_STATUS_ENTRY(NEXT_FRAG);
2349 TX_STATUS_ENTRY(LIFE_EXPIRE);
2350 TX_STATUS_ENTRY(DEST_PS);
2351 TX_STATUS_ENTRY(ABORTED);
2352 TX_STATUS_ENTRY(BT_RETRY);
2353 TX_STATUS_ENTRY(STA_INVALID);
2354 TX_STATUS_ENTRY(FRAG_DROPPED);
2355 TX_STATUS_ENTRY(TID_DISABLE);
2356 TX_STATUS_ENTRY(FRAME_FLUSHED);
2357 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2358 TX_STATUS_ENTRY(TX_LOCKED);
2359 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2360 }
2361
2362 return "UNKNOWN";
2363}
2364
2365/**
bb8c093b 2366 * iwl4965_scan_cancel - Cancel any currently executing HW scan
b481de9c
ZY
2367 *
2368 * NOTE: priv->mutex is not required before calling this function
2369 */
bb8c093b 2370static int iwl4965_scan_cancel(struct iwl4965_priv *priv)
b481de9c
ZY
2371{
2372 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2373 clear_bit(STATUS_SCANNING, &priv->status);
2374 return 0;
2375 }
2376
2377 if (test_bit(STATUS_SCANNING, &priv->status)) {
2378 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2379 IWL_DEBUG_SCAN("Queuing scan abort.\n");
2380 set_bit(STATUS_SCAN_ABORTING, &priv->status);
2381 queue_work(priv->workqueue, &priv->abort_scan);
2382
2383 } else
2384 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2385
2386 return test_bit(STATUS_SCANNING, &priv->status);
2387 }
2388
2389 return 0;
2390}
2391
2392/**
bb8c093b 2393 * iwl4965_scan_cancel_timeout - Cancel any currently executing HW scan
b481de9c
ZY
2394 * @ms: amount of time to wait (in milliseconds) for scan to abort
2395 *
2396 * NOTE: priv->mutex must be held before calling this function
2397 */
bb8c093b 2398static int iwl4965_scan_cancel_timeout(struct iwl4965_priv *priv, unsigned long ms)
b481de9c
ZY
2399{
2400 unsigned long now = jiffies;
2401 int ret;
2402
bb8c093b 2403 ret = iwl4965_scan_cancel(priv);
b481de9c
ZY
2404 if (ret && ms) {
2405 mutex_unlock(&priv->mutex);
2406 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2407 test_bit(STATUS_SCANNING, &priv->status))
2408 msleep(1);
2409 mutex_lock(&priv->mutex);
2410
2411 return test_bit(STATUS_SCANNING, &priv->status);
2412 }
2413
2414 return ret;
2415}
2416
bb8c093b 2417static void iwl4965_sequence_reset(struct iwl4965_priv *priv)
b481de9c
ZY
2418{
2419 /* Reset ieee stats */
2420
2421 /* We don't reset the net_device_stats (ieee->stats) on
2422 * re-association */
2423
2424 priv->last_seq_num = -1;
2425 priv->last_frag_num = -1;
2426 priv->last_packet_time = 0;
2427
bb8c093b 2428 iwl4965_scan_cancel(priv);
b481de9c
ZY
2429}
2430
2431#define MAX_UCODE_BEACON_INTERVAL 4096
2432#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2433
bb8c093b 2434static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val)
b481de9c
ZY
2435{
2436 u16 new_val = 0;
2437 u16 beacon_factor = 0;
2438
2439 beacon_factor =
2440 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2441 / MAX_UCODE_BEACON_INTERVAL;
2442 new_val = beacon_val / beacon_factor;
2443
2444 return cpu_to_le16(new_val);
2445}
2446
bb8c093b 2447static void iwl4965_setup_rxon_timing(struct iwl4965_priv *priv)
b481de9c
ZY
2448{
2449 u64 interval_tm_unit;
2450 u64 tsf, result;
2451 unsigned long flags;
2452 struct ieee80211_conf *conf = NULL;
2453 u16 beacon_int = 0;
2454
2455 conf = ieee80211_get_hw_conf(priv->hw);
2456
2457 spin_lock_irqsave(&priv->lock, flags);
2458 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2459 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2460
2461 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2462
2463 tsf = priv->timestamp1;
2464 tsf = ((tsf << 32) | priv->timestamp0);
2465
2466 beacon_int = priv->beacon_int;
2467 spin_unlock_irqrestore(&priv->lock, flags);
2468
2469 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2470 if (beacon_int == 0) {
2471 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2472 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2473 } else {
2474 priv->rxon_timing.beacon_interval =
2475 cpu_to_le16(beacon_int);
2476 priv->rxon_timing.beacon_interval =
bb8c093b 2477 iwl4965_adjust_beacon_interval(
b481de9c
ZY
2478 le16_to_cpu(priv->rxon_timing.beacon_interval));
2479 }
2480
2481 priv->rxon_timing.atim_window = 0;
2482 } else {
2483 priv->rxon_timing.beacon_interval =
bb8c093b 2484 iwl4965_adjust_beacon_interval(conf->beacon_int);
b481de9c
ZY
2485 /* TODO: we need to get atim_window from upper stack
2486 * for now we set to 0 */
2487 priv->rxon_timing.atim_window = 0;
2488 }
2489
2490 interval_tm_unit =
2491 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2492 result = do_div(tsf, interval_tm_unit);
2493 priv->rxon_timing.beacon_init_val =
2494 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2495
2496 IWL_DEBUG_ASSOC
2497 ("beacon interval %d beacon timer %d beacon tim %d\n",
2498 le16_to_cpu(priv->rxon_timing.beacon_interval),
2499 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2500 le16_to_cpu(priv->rxon_timing.atim_window));
2501}
2502
bb8c093b 2503static int iwl4965_scan_initiate(struct iwl4965_priv *priv)
b481de9c
ZY
2504{
2505 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2506 IWL_ERROR("APs don't scan.\n");
2507 return 0;
2508 }
2509
bb8c093b 2510 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
2511 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2512 return -EIO;
2513 }
2514
2515 if (test_bit(STATUS_SCANNING, &priv->status)) {
2516 IWL_DEBUG_SCAN("Scan already in progress.\n");
2517 return -EAGAIN;
2518 }
2519
2520 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2521 IWL_DEBUG_SCAN("Scan request while abort pending. "
2522 "Queuing.\n");
2523 return -EAGAIN;
2524 }
2525
2526 IWL_DEBUG_INFO("Starting scan...\n");
2527 priv->scan_bands = 2;
2528 set_bit(STATUS_SCANNING, &priv->status);
2529 priv->scan_start = jiffies;
2530 priv->scan_pass_start = priv->scan_start;
2531
2532 queue_work(priv->workqueue, &priv->request_scan);
2533
2534 return 0;
2535}
2536
bb8c093b 2537static int iwl4965_set_rxon_hwcrypto(struct iwl4965_priv *priv, int hw_decrypt)
b481de9c 2538{
bb8c093b 2539 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
b481de9c
ZY
2540
2541 if (hw_decrypt)
2542 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2543 else
2544 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2545
2546 return 0;
2547}
2548
bb8c093b 2549static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode)
b481de9c
ZY
2550{
2551 if (phymode == MODE_IEEE80211A) {
2552 priv->staging_rxon.flags &=
2553 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2554 | RXON_FLG_CCK_MSK);
2555 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2556 } else {
bb8c093b 2557 /* Copied from iwl4965_bg_post_associate() */
b481de9c
ZY
2558 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2559 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2560 else
2561 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2562
2563 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2564 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2565
2566 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2567 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2568 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2569 }
2570}
2571
2572/*
01ebd063 2573 * initialize rxon structure with default values from eeprom
b481de9c 2574 */
bb8c093b 2575static void iwl4965_connection_init_rx_config(struct iwl4965_priv *priv)
b481de9c 2576{
bb8c093b 2577 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
2578
2579 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2580
2581 switch (priv->iw_mode) {
2582 case IEEE80211_IF_TYPE_AP:
2583 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2584 break;
2585
2586 case IEEE80211_IF_TYPE_STA:
2587 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2588 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2589 break;
2590
2591 case IEEE80211_IF_TYPE_IBSS:
2592 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2593 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2594 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2595 RXON_FILTER_ACCEPT_GRP_MSK;
2596 break;
2597
2598 case IEEE80211_IF_TYPE_MNTR:
2599 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2600 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2601 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2602 break;
2603 }
2604
2605#if 0
2606 /* TODO: Figure out when short_preamble would be set and cache from
2607 * that */
2608 if (!hw_to_local(priv->hw)->short_preamble)
2609 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2610 else
2611 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2612#endif
2613
bb8c093b 2614 ch_info = iwl4965_get_channel_info(priv, priv->phymode,
b481de9c
ZY
2615 le16_to_cpu(priv->staging_rxon.channel));
2616
2617 if (!ch_info)
2618 ch_info = &priv->channel_info[0];
2619
2620 /*
2621 * in some case A channels are all non IBSS
2622 * in this case force B/G channel
2623 */
2624 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2625 !(is_channel_ibss(ch_info)))
2626 ch_info = &priv->channel_info[0];
2627
2628 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2629 if (is_channel_a_band(ch_info))
2630 priv->phymode = MODE_IEEE80211A;
2631 else
2632 priv->phymode = MODE_IEEE80211G;
2633
bb8c093b 2634 iwl4965_set_flags_for_phymode(priv, priv->phymode);
b481de9c
ZY
2635
2636 priv->staging_rxon.ofdm_basic_rates =
2637 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2638 priv->staging_rxon.cck_basic_rates =
2639 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2640
2641 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
2642 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
2643 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2644 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
2645 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
2646 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
2647 iwl4965_set_rxon_chain(priv);
2648}
2649
bb8c093b 2650static int iwl4965_set_mode(struct iwl4965_priv *priv, int mode)
b481de9c 2651{
bb8c093b 2652 if (!iwl4965_is_ready_rf(priv))
b481de9c
ZY
2653 return -EAGAIN;
2654
2655 if (mode == IEEE80211_IF_TYPE_IBSS) {
bb8c093b 2656 const struct iwl4965_channel_info *ch_info;
b481de9c 2657
bb8c093b 2658 ch_info = iwl4965_get_channel_info(priv,
b481de9c
ZY
2659 priv->phymode,
2660 le16_to_cpu(priv->staging_rxon.channel));
2661
2662 if (!ch_info || !is_channel_ibss(ch_info)) {
2663 IWL_ERROR("channel %d not IBSS channel\n",
2664 le16_to_cpu(priv->staging_rxon.channel));
2665 return -EINVAL;
2666 }
2667 }
2668
2669 cancel_delayed_work(&priv->scan_check);
bb8c093b 2670 if (iwl4965_scan_cancel_timeout(priv, 100)) {
b481de9c
ZY
2671 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2672 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2673 return -EAGAIN;
2674 }
2675
2676 priv->iw_mode = mode;
2677
bb8c093b 2678 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
2679 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2680
bb8c093b 2681 iwl4965_clear_stations_table(priv);
b481de9c 2682
bb8c093b 2683 iwl4965_commit_rxon(priv);
b481de9c
ZY
2684
2685 return 0;
2686}
2687
bb8c093b 2688static void iwl4965_build_tx_cmd_hwcrypto(struct iwl4965_priv *priv,
b481de9c 2689 struct ieee80211_tx_control *ctl,
bb8c093b 2690 struct iwl4965_cmd *cmd,
b481de9c
ZY
2691 struct sk_buff *skb_frag,
2692 int last_frag)
2693{
bb8c093b 2694 struct iwl4965_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
b481de9c
ZY
2695
2696 switch (keyinfo->alg) {
2697 case ALG_CCMP:
2698 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2699 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2700 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2701 break;
2702
2703 case ALG_TKIP:
2704#if 0
2705 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2706
2707 if (last_frag)
2708 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2709 8);
2710 else
2711 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2712#endif
2713 break;
2714
2715 case ALG_WEP:
2716 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2717 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2718
2719 if (keyinfo->keylen == 13)
2720 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2721
2722 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2723
2724 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2725 "with key %d\n", ctl->key_idx);
2726 break;
2727
b481de9c
ZY
2728 default:
2729 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2730 break;
2731 }
2732}
2733
2734/*
2735 * handle build REPLY_TX command notification.
2736 */
bb8c093b
CH
2737static void iwl4965_build_tx_cmd_basic(struct iwl4965_priv *priv,
2738 struct iwl4965_cmd *cmd,
b481de9c
ZY
2739 struct ieee80211_tx_control *ctrl,
2740 struct ieee80211_hdr *hdr,
2741 int is_unicast, u8 std_id)
2742{
2743 __le16 *qc;
2744 u16 fc = le16_to_cpu(hdr->frame_control);
2745 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2746
2747 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2748 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2749 tx_flags |= TX_CMD_FLG_ACK_MSK;
2750 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2751 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2752 if (ieee80211_is_probe_response(fc) &&
2753 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2754 tx_flags |= TX_CMD_FLG_TSF_MSK;
2755 } else {
2756 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2757 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2758 }
2759
2760 cmd->cmd.tx.sta_id = std_id;
2761 if (ieee80211_get_morefrag(hdr))
2762 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2763
2764 qc = ieee80211_get_qos_ctrl(hdr);
2765 if (qc) {
2766 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2767 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2768 } else
2769 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2770
2771 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2772 tx_flags |= TX_CMD_FLG_RTS_MSK;
2773 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2774 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2775 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2776 tx_flags |= TX_CMD_FLG_CTS_MSK;
2777 }
2778
2779 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2780 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2781
2782 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2783 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2784 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2785 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
bc434dd2 2786 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
b481de9c 2787 else
bc434dd2 2788 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
b481de9c
ZY
2789 } else
2790 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2791
2792 cmd->cmd.tx.driver_txop = 0;
2793 cmd->cmd.tx.tx_flags = tx_flags;
2794 cmd->cmd.tx.next_frame_len = 0;
2795}
2796
6440adb5
CB
2797/**
2798 * iwl4965_get_sta_id - Find station's index within station table
2799 *
2800 * If new IBSS station, create new entry in station table
2801 */
9fbab516
BC
2802static int iwl4965_get_sta_id(struct iwl4965_priv *priv,
2803 struct ieee80211_hdr *hdr)
b481de9c
ZY
2804{
2805 int sta_id;
2806 u16 fc = le16_to_cpu(hdr->frame_control);
0795af57 2807 DECLARE_MAC_BUF(mac);
b481de9c 2808
6440adb5 2809 /* If this frame is broadcast or management, use broadcast station id */
b481de9c
ZY
2810 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2811 is_multicast_ether_addr(hdr->addr1))
2812 return priv->hw_setting.bcast_sta_id;
2813
2814 switch (priv->iw_mode) {
2815
6440adb5
CB
2816 /* If we are a client station in a BSS network, use the special
2817 * AP station entry (that's the only station we communicate with) */
b481de9c
ZY
2818 case IEEE80211_IF_TYPE_STA:
2819 return IWL_AP_ID;
2820
2821 /* If we are an AP, then find the station, or use BCAST */
2822 case IEEE80211_IF_TYPE_AP:
bb8c093b 2823 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2824 if (sta_id != IWL_INVALID_STATION)
2825 return sta_id;
2826 return priv->hw_setting.bcast_sta_id;
2827
6440adb5
CB
2828 /* If this frame is going out to an IBSS network, find the station,
2829 * or create a new station table entry */
b481de9c 2830 case IEEE80211_IF_TYPE_IBSS:
bb8c093b 2831 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2832 if (sta_id != IWL_INVALID_STATION)
2833 return sta_id;
2834
6440adb5 2835 /* Create new station table entry */
bb8c093b 2836 sta_id = iwl4965_add_station_flags(priv, hdr->addr1, 0, CMD_ASYNC);
b481de9c
ZY
2837
2838 if (sta_id != IWL_INVALID_STATION)
2839 return sta_id;
2840
0795af57 2841 IWL_DEBUG_DROP("Station %s not in station map. "
b481de9c 2842 "Defaulting to broadcast...\n",
0795af57 2843 print_mac(mac, hdr->addr1));
bb8c093b 2844 iwl4965_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
b481de9c
ZY
2845 return priv->hw_setting.bcast_sta_id;
2846
2847 default:
01ebd063 2848 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
b481de9c
ZY
2849 return priv->hw_setting.bcast_sta_id;
2850 }
2851}
2852
2853/*
2854 * start REPLY_TX command process
2855 */
bb8c093b 2856static int iwl4965_tx_skb(struct iwl4965_priv *priv,
b481de9c
ZY
2857 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2858{
2859 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bb8c093b 2860 struct iwl4965_tfd_frame *tfd;
b481de9c
ZY
2861 u32 *control_flags;
2862 int txq_id = ctl->queue;
bb8c093b
CH
2863 struct iwl4965_tx_queue *txq = NULL;
2864 struct iwl4965_queue *q = NULL;
b481de9c
ZY
2865 dma_addr_t phys_addr;
2866 dma_addr_t txcmd_phys;
bb8c093b 2867 struct iwl4965_cmd *out_cmd = NULL;
b481de9c
ZY
2868 u16 len, idx, len_org;
2869 u8 id, hdr_len, unicast;
2870 u8 sta_id;
2871 u16 seq_number = 0;
2872 u16 fc;
2873 __le16 *qc;
2874 u8 wait_write_ptr = 0;
2875 unsigned long flags;
2876 int rc;
2877
2878 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 2879 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
2880 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2881 goto drop_unlock;
2882 }
2883
2884 if (!priv->interface_id) {
2885 IWL_DEBUG_DROP("Dropping - !priv->interface_id\n");
2886 goto drop_unlock;
2887 }
2888
2889 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2890 IWL_ERROR("ERROR: No TX rate available.\n");
2891 goto drop_unlock;
2892 }
2893
2894 unicast = !is_multicast_ether_addr(hdr->addr1);
2895 id = 0;
2896
2897 fc = le16_to_cpu(hdr->frame_control);
2898
c8b0e6e1 2899#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
2900 if (ieee80211_is_auth(fc))
2901 IWL_DEBUG_TX("Sending AUTH frame\n");
2902 else if (ieee80211_is_assoc_request(fc))
2903 IWL_DEBUG_TX("Sending ASSOC frame\n");
2904 else if (ieee80211_is_reassoc_request(fc))
2905 IWL_DEBUG_TX("Sending REASSOC frame\n");
2906#endif
2907
bb8c093b 2908 if (!iwl4965_is_associated(priv) &&
b481de9c 2909 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
bb8c093b 2910 IWL_DEBUG_DROP("Dropping - !iwl4965_is_associated\n");
b481de9c
ZY
2911 goto drop_unlock;
2912 }
2913
2914 spin_unlock_irqrestore(&priv->lock, flags);
2915
2916 hdr_len = ieee80211_get_hdrlen(fc);
6440adb5
CB
2917
2918 /* Find (or create) index into station table for destination station */
bb8c093b 2919 sta_id = iwl4965_get_sta_id(priv, hdr);
b481de9c 2920 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
2921 DECLARE_MAC_BUF(mac);
2922
2923 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2924 print_mac(mac, hdr->addr1));
b481de9c
ZY
2925 goto drop;
2926 }
2927
2928 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2929
2930 qc = ieee80211_get_qos_ctrl(hdr);
2931 if (qc) {
2932 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2933 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2934 IEEE80211_SCTL_SEQ;
2935 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2936 (hdr->seq_ctrl &
2937 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2938 seq_number += 0x10;
c8b0e6e1
CH
2939#ifdef CONFIG_IWL4965_HT
2940#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
2941 /* aggregation is on for this <sta,tid> */
2942 if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG)
2943 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
c8b0e6e1
CH
2944#endif /* CONFIG_IWL4965_HT_AGG */
2945#endif /* CONFIG_IWL4965_HT */
b481de9c 2946 }
6440adb5
CB
2947
2948 /* Descriptor for chosen Tx queue */
b481de9c
ZY
2949 txq = &priv->txq[txq_id];
2950 q = &txq->q;
2951
2952 spin_lock_irqsave(&priv->lock, flags);
2953
6440adb5 2954 /* Set up first empty TFD within this queue's circular TFD buffer */
fc4b6853 2955 tfd = &txq->bd[q->write_ptr];
b481de9c
ZY
2956 memset(tfd, 0, sizeof(*tfd));
2957 control_flags = (u32 *) tfd;
fc4b6853 2958 idx = get_cmd_index(q, q->write_ptr, 0);
b481de9c 2959
6440adb5 2960 /* Set up driver data for this TFD */
bb8c093b 2961 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl4965_tx_info));
fc4b6853
TW
2962 txq->txb[q->write_ptr].skb[0] = skb;
2963 memcpy(&(txq->txb[q->write_ptr].status.control),
b481de9c 2964 ctl, sizeof(struct ieee80211_tx_control));
6440adb5
CB
2965
2966 /* Set up first empty entry in queue's array of Tx/cmd buffers */
b481de9c
ZY
2967 out_cmd = &txq->cmd[idx];
2968 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2969 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
6440adb5
CB
2970
2971 /*
2972 * Set up the Tx-command (not MAC!) header.
2973 * Store the chosen Tx queue and TFD index within the sequence field;
2974 * after Tx, uCode's Tx response will return this value so driver can
2975 * locate the frame within the tx queue and do post-tx processing.
2976 */
b481de9c
ZY
2977 out_cmd->hdr.cmd = REPLY_TX;
2978 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
fc4b6853 2979 INDEX_TO_SEQ(q->write_ptr)));
6440adb5
CB
2980
2981 /* Copy MAC header from skb into command buffer */
b481de9c
ZY
2982 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2983
6440adb5
CB
2984 /*
2985 * Use the first empty entry in this queue's command buffer array
2986 * to contain the Tx command and MAC header concatenated together
2987 * (payload data will be in another buffer).
2988 * Size of this varies, due to varying MAC header length.
2989 * If end is not dword aligned, we'll have 2 extra bytes at the end
2990 * of the MAC header (device reads on dword boundaries).
2991 * We'll tell device about this padding later.
2992 */
b481de9c 2993 len = priv->hw_setting.tx_cmd_len +
bb8c093b 2994 sizeof(struct iwl4965_cmd_header) + hdr_len;
b481de9c
ZY
2995
2996 len_org = len;
2997 len = (len + 3) & ~3;
2998
2999 if (len_org != len)
3000 len_org = 1;
3001 else
3002 len_org = 0;
3003
6440adb5
CB
3004 /* Physical address of this Tx command's header (not MAC header!),
3005 * within command buffer array. */
bb8c093b
CH
3006 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl4965_cmd) * idx +
3007 offsetof(struct iwl4965_cmd, hdr);
b481de9c 3008
6440adb5
CB
3009 /* Add buffer containing Tx command and MAC(!) header to TFD's
3010 * first entry */
bb8c093b 3011 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
b481de9c
ZY
3012
3013 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
bb8c093b 3014 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
b481de9c 3015
6440adb5
CB
3016 /* Set up TFD's 2nd entry to point directly to remainder of skb,
3017 * if any (802.11 null frames have no payload). */
b481de9c
ZY
3018 len = skb->len - hdr_len;
3019 if (len) {
3020 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
3021 len, PCI_DMA_TODEVICE);
bb8c093b 3022 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
b481de9c
ZY
3023 }
3024
6440adb5 3025 /* Tell 4965 about any 2-byte padding after MAC header */
b481de9c
ZY
3026 if (len_org)
3027 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
3028
6440adb5 3029 /* Total # bytes to be transmitted */
b481de9c
ZY
3030 len = (u16)skb->len;
3031 out_cmd->cmd.tx.len = cpu_to_le16(len);
3032
3033 /* TODO need this for burst mode later on */
bb8c093b 3034 iwl4965_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
b481de9c
ZY
3035
3036 /* set is_hcca to 0; it probably will never be implemented */
bb8c093b 3037 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
b481de9c
ZY
3038
3039 iwl4965_tx_cmd(priv, out_cmd, sta_id, txcmd_phys,
3040 hdr, hdr_len, ctl, NULL);
3041
3042 if (!ieee80211_get_morefrag(hdr)) {
3043 txq->need_update = 1;
3044 if (qc) {
3045 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
3046 priv->stations[sta_id].tid[tid].seq_number = seq_number;
3047 }
3048 } else {
3049 wait_write_ptr = 1;
3050 txq->need_update = 0;
3051 }
3052
bb8c093b 3053 iwl4965_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
b481de9c
ZY
3054 sizeof(out_cmd->cmd.tx));
3055
bb8c093b 3056 iwl4965_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
b481de9c
ZY
3057 ieee80211_get_hdrlen(fc));
3058
6440adb5 3059 /* Set up entry for this TFD in Tx byte-count array */
b481de9c
ZY
3060 iwl4965_tx_queue_update_wr_ptr(priv, txq, len);
3061
6440adb5 3062 /* Tell device the write index *just past* this latest filled TFD */
bb8c093b
CH
3063 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd);
3064 rc = iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3065 spin_unlock_irqrestore(&priv->lock, flags);
3066
3067 if (rc)
3068 return rc;
3069
bb8c093b 3070 if ((iwl4965_queue_space(q) < q->high_mark)
b481de9c
ZY
3071 && priv->mac80211_registered) {
3072 if (wait_write_ptr) {
3073 spin_lock_irqsave(&priv->lock, flags);
3074 txq->need_update = 1;
bb8c093b 3075 iwl4965_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
3076 spin_unlock_irqrestore(&priv->lock, flags);
3077 }
3078
3079 ieee80211_stop_queue(priv->hw, ctl->queue);
3080 }
3081
3082 return 0;
3083
3084drop_unlock:
3085 spin_unlock_irqrestore(&priv->lock, flags);
3086drop:
3087 return -1;
3088}
3089
bb8c093b 3090static void iwl4965_set_rate(struct iwl4965_priv *priv)
b481de9c
ZY
3091{
3092 const struct ieee80211_hw_mode *hw = NULL;
3093 struct ieee80211_rate *rate;
3094 int i;
3095
bb8c093b 3096 hw = iwl4965_get_hw_mode(priv, priv->phymode);
c4ba9621
SA
3097 if (!hw) {
3098 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
3099 return;
3100 }
b481de9c
ZY
3101
3102 priv->active_rate = 0;
3103 priv->active_rate_basic = 0;
3104
3105 IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
3106 hw->mode == MODE_IEEE80211A ?
3107 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
3108
3109 for (i = 0; i < hw->num_rates; i++) {
3110 rate = &(hw->rates[i]);
3111 if ((rate->val < IWL_RATE_COUNT) &&
3112 (rate->flags & IEEE80211_RATE_SUPPORTED)) {
3113 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
bb8c093b 3114 rate->val, iwl4965_rates[rate->val].plcp,
b481de9c
ZY
3115 (rate->flags & IEEE80211_RATE_BASIC) ?
3116 "*" : "");
3117 priv->active_rate |= (1 << rate->val);
3118 if (rate->flags & IEEE80211_RATE_BASIC)
3119 priv->active_rate_basic |= (1 << rate->val);
3120 } else
3121 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
bb8c093b 3122 rate->val, iwl4965_rates[rate->val].plcp);
b481de9c
ZY
3123 }
3124
3125 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
3126 priv->active_rate, priv->active_rate_basic);
3127
3128 /*
3129 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
3130 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
3131 * OFDM
3132 */
3133 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
3134 priv->staging_rxon.cck_basic_rates =
3135 ((priv->active_rate_basic &
3136 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
3137 else
3138 priv->staging_rxon.cck_basic_rates =
3139 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
3140
3141 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
3142 priv->staging_rxon.ofdm_basic_rates =
3143 ((priv->active_rate_basic &
3144 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
3145 IWL_FIRST_OFDM_RATE) & 0xFF;
3146 else
3147 priv->staging_rxon.ofdm_basic_rates =
3148 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
3149}
3150
bb8c093b 3151static void iwl4965_radio_kill_sw(struct iwl4965_priv *priv, int disable_radio)
b481de9c
ZY
3152{
3153 unsigned long flags;
3154
3155 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
3156 return;
3157
3158 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
3159 disable_radio ? "OFF" : "ON");
3160
3161 if (disable_radio) {
bb8c093b 3162 iwl4965_scan_cancel(priv);
b481de9c
ZY
3163 /* FIXME: This is a workaround for AP */
3164 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
3165 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3166 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
3167 CSR_UCODE_SW_BIT_RFKILL);
3168 spin_unlock_irqrestore(&priv->lock, flags);
bb8c093b 3169 iwl4965_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
b481de9c
ZY
3170 set_bit(STATUS_RF_KILL_SW, &priv->status);
3171 }
3172 return;
3173 }
3174
3175 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3176 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
3177
3178 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3179 spin_unlock_irqrestore(&priv->lock, flags);
3180
3181 /* wake up ucode */
3182 msleep(10);
3183
3184 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
3185 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
3186 if (!iwl4965_grab_nic_access(priv))
3187 iwl4965_release_nic_access(priv);
b481de9c
ZY
3188 spin_unlock_irqrestore(&priv->lock, flags);
3189
3190 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3191 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3192 "disabled by HW switch\n");
3193 return;
3194 }
3195
3196 queue_work(priv->workqueue, &priv->restart);
3197 return;
3198}
3199
bb8c093b 3200void iwl4965_set_decrypted_flag(struct iwl4965_priv *priv, struct sk_buff *skb,
b481de9c
ZY
3201 u32 decrypt_res, struct ieee80211_rx_status *stats)
3202{
3203 u16 fc =
3204 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3205
3206 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3207 return;
3208
3209 if (!(fc & IEEE80211_FCTL_PROTECTED))
3210 return;
3211
3212 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3213 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3214 case RX_RES_STATUS_SEC_TYPE_TKIP:
3215 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3216 RX_RES_STATUS_BAD_ICV_MIC)
3217 stats->flag |= RX_FLAG_MMIC_ERROR;
3218 case RX_RES_STATUS_SEC_TYPE_WEP:
3219 case RX_RES_STATUS_SEC_TYPE_CCMP:
3220 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3221 RX_RES_STATUS_DECRYPT_OK) {
3222 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3223 stats->flag |= RX_FLAG_DECRYPTED;
3224 }
3225 break;
3226
3227 default:
3228 break;
3229 }
3230}
3231
bb8c093b
CH
3232void iwl4965_handle_data_packet_monitor(struct iwl4965_priv *priv,
3233 struct iwl4965_rx_mem_buffer *rxb,
b481de9c
ZY
3234 void *data, short len,
3235 struct ieee80211_rx_status *stats,
3236 u16 phy_flags)
3237{
bb8c093b 3238 struct iwl4965_rt_rx_hdr *iwl4965_rt;
b481de9c
ZY
3239
3240 /* First cache any information we need before we overwrite
3241 * the information provided in the skb from the hardware */
3242 s8 signal = stats->ssi;
3243 s8 noise = 0;
3244 int rate = stats->rate;
3245 u64 tsf = stats->mactime;
3246 __le16 phy_flags_hw = cpu_to_le16(phy_flags);
3247
3248 /* We received data from the HW, so stop the watchdog */
bb8c093b 3249 if (len > IWL_RX_BUF_SIZE - sizeof(*iwl4965_rt)) {
b481de9c
ZY
3250 IWL_DEBUG_DROP("Dropping too large packet in monitor\n");
3251 return;
3252 }
3253
3254 /* copy the frame data to write after where the radiotap header goes */
bb8c093b
CH
3255 iwl4965_rt = (void *)rxb->skb->data;
3256 memmove(iwl4965_rt->payload, data, len);
b481de9c 3257
bb8c093b
CH
3258 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3259 iwl4965_rt->rt_hdr.it_pad = 0; /* always good to zero */
b481de9c
ZY
3260
3261 /* total header + data */
bb8c093b 3262 iwl4965_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl4965_rt));
b481de9c
ZY
3263
3264 /* Set the size of the skb to the size of the frame */
bb8c093b 3265 skb_put(rxb->skb, sizeof(*iwl4965_rt) + len);
b481de9c
ZY
3266
3267 /* Big bitfield of all the fields we provide in radiotap */
bb8c093b 3268 iwl4965_rt->rt_hdr.it_present =
b481de9c
ZY
3269 cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3270 (1 << IEEE80211_RADIOTAP_FLAGS) |
3271 (1 << IEEE80211_RADIOTAP_RATE) |
3272 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3273 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3274 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3275 (1 << IEEE80211_RADIOTAP_ANTENNA));
3276
3277 /* Zero the flags, we'll add to them as we go */
bb8c093b 3278 iwl4965_rt->rt_flags = 0;
b481de9c 3279
bb8c093b 3280 iwl4965_rt->rt_tsf = cpu_to_le64(tsf);
b481de9c
ZY
3281
3282 /* Convert to dBm */
bb8c093b
CH
3283 iwl4965_rt->rt_dbmsignal = signal;
3284 iwl4965_rt->rt_dbmnoise = noise;
b481de9c
ZY
3285
3286 /* Convert the channel frequency and set the flags */
bb8c093b 3287 iwl4965_rt->rt_channelMHz = cpu_to_le16(stats->freq);
b481de9c 3288 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
bb8c093b 3289 iwl4965_rt->rt_chbitmask =
b481de9c
ZY
3290 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
3291 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
bb8c093b 3292 iwl4965_rt->rt_chbitmask =
b481de9c
ZY
3293 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
3294 else /* 802.11g */
bb8c093b 3295 iwl4965_rt->rt_chbitmask =
b481de9c
ZY
3296 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ));
3297
bb8c093b 3298 rate = iwl4965_rate_index_from_plcp(rate);
b481de9c 3299 if (rate == -1)
bb8c093b 3300 iwl4965_rt->rt_rate = 0;
b481de9c 3301 else
bb8c093b 3302 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
b481de9c
ZY
3303
3304 /* antenna number */
bb8c093b 3305 iwl4965_rt->rt_antenna =
b481de9c
ZY
3306 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
3307
3308 /* set the preamble flag if we have it */
3309 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
bb8c093b 3310 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
b481de9c
ZY
3311
3312 IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
3313
3314 stats->flag |= RX_FLAG_RADIOTAP;
3315 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3316 rxb->skb = NULL;
3317}
3318
3319
3320#define IWL_PACKET_RETRY_TIME HZ
3321
bb8c093b 3322int iwl4965_is_duplicate_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header)
b481de9c
ZY
3323{
3324 u16 sc = le16_to_cpu(header->seq_ctrl);
3325 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
3326 u16 frag = sc & IEEE80211_SCTL_FRAG;
3327 u16 *last_seq, *last_frag;
3328 unsigned long *last_time;
3329
3330 switch (priv->iw_mode) {
3331 case IEEE80211_IF_TYPE_IBSS:{
3332 struct list_head *p;
bb8c093b 3333 struct iwl4965_ibss_seq *entry = NULL;
b481de9c
ZY
3334 u8 *mac = header->addr2;
3335 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
3336
3337 __list_for_each(p, &priv->ibss_mac_hash[index]) {
bb8c093b 3338 entry = list_entry(p, struct iwl4965_ibss_seq, list);
b481de9c
ZY
3339 if (!compare_ether_addr(entry->mac, mac))
3340 break;
3341 }
3342 if (p == &priv->ibss_mac_hash[index]) {
3343 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
3344 if (!entry) {
bc434dd2 3345 IWL_ERROR("Cannot malloc new mac entry\n");
b481de9c
ZY
3346 return 0;
3347 }
3348 memcpy(entry->mac, mac, ETH_ALEN);
3349 entry->seq_num = seq;
3350 entry->frag_num = frag;
3351 entry->packet_time = jiffies;
bc434dd2 3352 list_add(&entry->list, &priv->ibss_mac_hash[index]);
b481de9c
ZY
3353 return 0;
3354 }
3355 last_seq = &entry->seq_num;
3356 last_frag = &entry->frag_num;
3357 last_time = &entry->packet_time;
3358 break;
3359 }
3360 case IEEE80211_IF_TYPE_STA:
3361 last_seq = &priv->last_seq_num;
3362 last_frag = &priv->last_frag_num;
3363 last_time = &priv->last_packet_time;
3364 break;
3365 default:
3366 return 0;
3367 }
3368 if ((*last_seq == seq) &&
3369 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
3370 if (*last_frag == frag)
3371 goto drop;
3372 if (*last_frag + 1 != frag)
3373 /* out-of-order fragment */
3374 goto drop;
3375 } else
3376 *last_seq = seq;
3377
3378 *last_frag = frag;
3379 *last_time = jiffies;
3380 return 0;
3381
3382 drop:
3383 return 1;
3384}
3385
c8b0e6e1 3386#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
3387
3388#include "iwl-spectrum.h"
3389
3390#define BEACON_TIME_MASK_LOW 0x00FFFFFF
3391#define BEACON_TIME_MASK_HIGH 0xFF000000
3392#define TIME_UNIT 1024
3393
3394/*
3395 * extended beacon time format
3396 * time in usec will be changed into a 32-bit value in 8:24 format
3397 * the high 1 byte is the beacon counts
3398 * the lower 3 bytes is the time in usec within one beacon interval
3399 */
3400
bb8c093b 3401static u32 iwl4965_usecs_to_beacons(u32 usec, u32 beacon_interval)
b481de9c
ZY
3402{
3403 u32 quot;
3404 u32 rem;
3405 u32 interval = beacon_interval * 1024;
3406
3407 if (!interval || !usec)
3408 return 0;
3409
3410 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
3411 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
3412
3413 return (quot << 24) + rem;
3414}
3415
3416/* base is usually what we get from ucode with each received frame,
3417 * the same as HW timer counter counting down
3418 */
3419
bb8c093b 3420static __le32 iwl4965_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
b481de9c
ZY
3421{
3422 u32 base_low = base & BEACON_TIME_MASK_LOW;
3423 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
3424 u32 interval = beacon_interval * TIME_UNIT;
3425 u32 res = (base & BEACON_TIME_MASK_HIGH) +
3426 (addon & BEACON_TIME_MASK_HIGH);
3427
3428 if (base_low > addon_low)
3429 res += base_low - addon_low;
3430 else if (base_low < addon_low) {
3431 res += interval + base_low - addon_low;
3432 res += (1 << 24);
3433 } else
3434 res += (1 << 24);
3435
3436 return cpu_to_le32(res);
3437}
3438
bb8c093b 3439static int iwl4965_get_measurement(struct iwl4965_priv *priv,
b481de9c
ZY
3440 struct ieee80211_measurement_params *params,
3441 u8 type)
3442{
bb8c093b
CH
3443 struct iwl4965_spectrum_cmd spectrum;
3444 struct iwl4965_rx_packet *res;
3445 struct iwl4965_host_cmd cmd = {
b481de9c
ZY
3446 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3447 .data = (void *)&spectrum,
3448 .meta.flags = CMD_WANT_SKB,
3449 };
3450 u32 add_time = le64_to_cpu(params->start_time);
3451 int rc;
3452 int spectrum_resp_status;
3453 int duration = le16_to_cpu(params->duration);
3454
bb8c093b 3455 if (iwl4965_is_associated(priv))
b481de9c 3456 add_time =
bb8c093b 3457 iwl4965_usecs_to_beacons(
b481de9c
ZY
3458 le64_to_cpu(params->start_time) - priv->last_tsf,
3459 le16_to_cpu(priv->rxon_timing.beacon_interval));
3460
3461 memset(&spectrum, 0, sizeof(spectrum));
3462
3463 spectrum.channel_count = cpu_to_le16(1);
3464 spectrum.flags =
3465 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
3466 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
3467 cmd.len = sizeof(spectrum);
3468 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3469
bb8c093b 3470 if (iwl4965_is_associated(priv))
b481de9c 3471 spectrum.start_time =
bb8c093b 3472 iwl4965_add_beacon_time(priv->last_beacon_time,
b481de9c
ZY
3473 add_time,
3474 le16_to_cpu(priv->rxon_timing.beacon_interval));
3475 else
3476 spectrum.start_time = 0;
3477
3478 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
3479 spectrum.channels[0].channel = params->channel;
3480 spectrum.channels[0].type = type;
3481 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
3482 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3483 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3484
bb8c093b 3485 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
3486 if (rc)
3487 return rc;
3488
bb8c093b 3489 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
3490 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
3491 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
3492 rc = -EIO;
3493 }
3494
3495 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
3496 switch (spectrum_resp_status) {
3497 case 0: /* Command will be handled */
3498 if (res->u.spectrum.id != 0xff) {
3499 IWL_DEBUG_INFO
3500 ("Replaced existing measurement: %d\n",
3501 res->u.spectrum.id);
3502 priv->measurement_status &= ~MEASUREMENT_READY;
3503 }
3504 priv->measurement_status |= MEASUREMENT_ACTIVE;
3505 rc = 0;
3506 break;
3507
3508 case 1: /* Command will not be handled */
3509 rc = -EAGAIN;
3510 break;
3511 }
3512
3513 dev_kfree_skb_any(cmd.meta.u.skb);
3514
3515 return rc;
3516}
3517#endif
3518
bb8c093b
CH
3519static void iwl4965_txstatus_to_ieee(struct iwl4965_priv *priv,
3520 struct iwl4965_tx_info *tx_sta)
b481de9c
ZY
3521{
3522
3523 tx_sta->status.ack_signal = 0;
3524 tx_sta->status.excessive_retries = 0;
3525 tx_sta->status.queue_length = 0;
3526 tx_sta->status.queue_number = 0;
3527
3528 if (in_interrupt())
3529 ieee80211_tx_status_irqsafe(priv->hw,
3530 tx_sta->skb[0], &(tx_sta->status));
3531 else
3532 ieee80211_tx_status(priv->hw,
3533 tx_sta->skb[0], &(tx_sta->status));
3534
3535 tx_sta->skb[0] = NULL;
3536}
3537
3538/**
6440adb5 3539 * iwl4965_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
b481de9c 3540 *
6440adb5
CB
3541 * When FW advances 'R' index, all entries between old and new 'R' index
3542 * need to be reclaimed. As result, some free space forms. If there is
3543 * enough free space (> low mark), wake the stack that feeds us.
b481de9c 3544 */
bb8c093b 3545int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index)
b481de9c 3546{
bb8c093b
CH
3547 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
3548 struct iwl4965_queue *q = &txq->q;
b481de9c
ZY
3549 int nfreed = 0;
3550
3551 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3552 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3553 "is out of range [0-%d] %d %d.\n", txq_id,
fc4b6853 3554 index, q->n_bd, q->write_ptr, q->read_ptr);
b481de9c
ZY
3555 return 0;
3556 }
3557
bb8c093b 3558 for (index = iwl4965_queue_inc_wrap(index, q->n_bd);
fc4b6853 3559 q->read_ptr != index;
bb8c093b 3560 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd)) {
b481de9c 3561 if (txq_id != IWL_CMD_QUEUE_NUM) {
bb8c093b 3562 iwl4965_txstatus_to_ieee(priv,
fc4b6853 3563 &(txq->txb[txq->q.read_ptr]));
bb8c093b 3564 iwl4965_hw_txq_free_tfd(priv, txq);
b481de9c
ZY
3565 } else if (nfreed > 1) {
3566 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
fc4b6853 3567 q->write_ptr, q->read_ptr);
b481de9c
ZY
3568 queue_work(priv->workqueue, &priv->restart);
3569 }
3570 nfreed++;
3571 }
3572
bb8c093b 3573 if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
b481de9c
ZY
3574 (txq_id != IWL_CMD_QUEUE_NUM) &&
3575 priv->mac80211_registered)
3576 ieee80211_wake_queue(priv->hw, txq_id);
3577
3578
3579 return nfreed;
3580}
3581
bb8c093b 3582static int iwl4965_is_tx_success(u32 status)
b481de9c
ZY
3583{
3584 status &= TX_STATUS_MSK;
3585 return (status == TX_STATUS_SUCCESS)
3586 || (status == TX_STATUS_DIRECT_DONE);
3587}
3588
3589/******************************************************************************
3590 *
3591 * Generic RX handler implementations
3592 *
3593 ******************************************************************************/
c8b0e6e1
CH
3594#ifdef CONFIG_IWL4965_HT
3595#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3596
bb8c093b 3597static inline int iwl4965_get_ra_sta_id(struct iwl4965_priv *priv,
b481de9c
ZY
3598 struct ieee80211_hdr *hdr)
3599{
3600 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
3601 return IWL_AP_ID;
3602 else {
3603 u8 *da = ieee80211_get_DA(hdr);
bb8c093b 3604 return iwl4965_hw_find_station(priv, da);
b481de9c
ZY
3605 }
3606}
3607
bb8c093b
CH
3608static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr(
3609 struct iwl4965_priv *priv, int txq_id, int idx)
b481de9c
ZY
3610{
3611 if (priv->txq[txq_id].txb[idx].skb[0])
3612 return (struct ieee80211_hdr *)priv->txq[txq_id].
3613 txb[idx].skb[0]->data;
3614 return NULL;
3615}
3616
bb8c093b 3617static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
b481de9c
ZY
3618{
3619 __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
3620 tx_resp->frame_count);
3621 return le32_to_cpu(*scd_ssn) & MAX_SN;
3622
3623}
6440adb5
CB
3624
3625/**
3626 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
3627 */
bb8c093b
CH
3628static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3629 struct iwl4965_ht_agg *agg,
3630 struct iwl4965_tx_resp *tx_resp,
b481de9c
ZY
3631 u16 start_idx)
3632{
3633 u32 status;
3634 __le32 *frame_status = &tx_resp->status;
3635 struct ieee80211_tx_status *tx_status = NULL;
3636 struct ieee80211_hdr *hdr = NULL;
3637 int i, sh;
3638 int txq_id, idx;
3639 u16 seq;
3640
3641 if (agg->wait_for_ba)
6440adb5 3642 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
b481de9c
ZY
3643
3644 agg->frame_count = tx_resp->frame_count;
3645 agg->start_idx = start_idx;
3646 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3647 agg->bitmap0 = agg->bitmap1 = 0;
3648
6440adb5 3649 /* # frames attempted by Tx command */
b481de9c 3650 if (agg->frame_count == 1) {
6440adb5 3651 /* Only one frame was attempted; no block-ack will arrive */
bb8c093b 3652 struct iwl4965_tx_queue *txq ;
b481de9c
ZY
3653 status = le32_to_cpu(frame_status[0]);
3654
3655 txq_id = agg->txq_id;
3656 txq = &priv->txq[txq_id];
3657 /* FIXME: code repetition */
3658 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n",
3659 agg->frame_count, agg->start_idx);
3660
fc4b6853 3661 tx_status = &(priv->txq[txq_id].txb[txq->q.read_ptr].status);
b481de9c
ZY
3662 tx_status->retry_count = tx_resp->failure_frame;
3663 tx_status->queue_number = status & 0xff;
3664 tx_status->queue_length = tx_resp->bt_kill_count;
3665 tx_status->queue_length |= tx_resp->failure_rts;
3666
bb8c093b 3667 tx_status->flags = iwl4965_is_tx_success(status)?
b481de9c
ZY
3668 IEEE80211_TX_STATUS_ACK : 0;
3669 tx_status->control.tx_rate =
bb8c093b 3670 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3671 /* FIXME: code repetition end */
3672
3673 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
3674 status & 0xff, tx_resp->failure_frame);
3675 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
bb8c093b 3676 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags));
b481de9c
ZY
3677
3678 agg->wait_for_ba = 0;
3679 } else {
6440adb5 3680 /* Two or more frames were attempted; expect block-ack */
b481de9c
ZY
3681 u64 bitmap = 0;
3682 int start = agg->start_idx;
3683
6440adb5 3684 /* Construct bit-map of pending frames within Tx window */
b481de9c
ZY
3685 for (i = 0; i < agg->frame_count; i++) {
3686 u16 sc;
3687 status = le32_to_cpu(frame_status[i]);
3688 seq = status >> 16;
3689 idx = SEQ_TO_INDEX(seq);
3690 txq_id = SEQ_TO_QUEUE(seq);
3691
3692 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
3693 AGG_TX_STATE_ABORT_MSK))
3694 continue;
3695
3696 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
3697 agg->frame_count, txq_id, idx);
3698
bb8c093b 3699 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, idx);
b481de9c
ZY
3700
3701 sc = le16_to_cpu(hdr->seq_ctrl);
3702 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
3703 IWL_ERROR("BUG_ON idx doesn't match seq control"
3704 " idx=%d, seq_idx=%d, seq=%d\n",
3705 idx, SEQ_TO_SN(sc),
3706 hdr->seq_ctrl);
3707 return -1;
3708 }
3709
3710 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
3711 i, idx, SEQ_TO_SN(sc));
3712
3713 sh = idx - start;
3714 if (sh > 64) {
3715 sh = (start - idx) + 0xff;
3716 bitmap = bitmap << sh;
3717 sh = 0;
3718 start = idx;
3719 } else if (sh < -64)
3720 sh = 0xff - (start - idx);
3721 else if (sh < 0) {
3722 sh = start - idx;
3723 start = idx;
3724 bitmap = bitmap << sh;
3725 sh = 0;
3726 }
3727 bitmap |= (1 << sh);
3728 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
3729 start, (u32)(bitmap & 0xFFFFFFFF));
3730 }
3731
3732 agg->bitmap0 = bitmap & 0xFFFFFFFF;
3733 agg->bitmap1 = bitmap >> 32;
3734 agg->start_idx = start;
3735 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3736 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n",
3737 agg->frame_count, agg->start_idx,
3738 agg->bitmap0);
3739
3740 if (bitmap)
3741 agg->wait_for_ba = 1;
3742 }
3743 return 0;
3744}
3745#endif
3746#endif
3747
6440adb5
CB
3748/**
3749 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
3750 */
bb8c093b
CH
3751static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3752 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3753{
bb8c093b 3754 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3755 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3756 int txq_id = SEQ_TO_QUEUE(sequence);
3757 int index = SEQ_TO_INDEX(sequence);
bb8c093b 3758 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
b481de9c 3759 struct ieee80211_tx_status *tx_status;
bb8c093b 3760 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
b481de9c 3761 u32 status = le32_to_cpu(tx_resp->status);
c8b0e6e1
CH
3762#ifdef CONFIG_IWL4965_HT
3763#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
3764 int tid, sta_id;
3765#endif
3766#endif
3767
3768 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3769 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3770 "is out of range [0-%d] %d %d\n", txq_id,
fc4b6853
TW
3771 index, txq->q.n_bd, txq->q.write_ptr,
3772 txq->q.read_ptr);
b481de9c
ZY
3773 return;
3774 }
3775
c8b0e6e1
CH
3776#ifdef CONFIG_IWL4965_HT
3777#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3778 if (txq->sched_retry) {
bb8c093b 3779 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
b481de9c 3780 struct ieee80211_hdr *hdr =
bb8c093b
CH
3781 iwl4965_tx_queue_get_hdr(priv, txq_id, index);
3782 struct iwl4965_ht_agg *agg = NULL;
b481de9c
ZY
3783 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3784
3785 if (qc == NULL) {
3786 IWL_ERROR("BUG_ON qc is null!!!!\n");
3787 return;
3788 }
3789
3790 tid = le16_to_cpu(*qc) & 0xf;
3791
bb8c093b 3792 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
b481de9c
ZY
3793 if (unlikely(sta_id == IWL_INVALID_STATION)) {
3794 IWL_ERROR("Station not known for\n");
3795 return;
3796 }
3797
3798 agg = &priv->stations[sta_id].tid[tid].agg;
3799
3800 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index);
3801
3802 if ((tx_resp->frame_count == 1) &&
bb8c093b 3803 !iwl4965_is_tx_success(status)) {
b481de9c
ZY
3804 /* TODO: send BAR */
3805 }
3806
fc4b6853 3807 if ((txq->q.read_ptr != (scd_ssn & 0xff))) {
bb8c093b 3808 index = iwl4965_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
b481de9c
ZY
3809 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3810 "%d index %d\n", scd_ssn , index);
bb8c093b 3811 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
3812 }
3813 } else {
c8b0e6e1
CH
3814#endif /* CONFIG_IWL4965_HT_AGG */
3815#endif /* CONFIG_IWL4965_HT */
fc4b6853 3816 tx_status = &(txq->txb[txq->q.read_ptr].status);
b481de9c
ZY
3817
3818 tx_status->retry_count = tx_resp->failure_frame;
3819 tx_status->queue_number = status;
3820 tx_status->queue_length = tx_resp->bt_kill_count;
3821 tx_status->queue_length |= tx_resp->failure_rts;
3822
3823 tx_status->flags =
bb8c093b 3824 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
b481de9c
ZY
3825
3826 tx_status->control.tx_rate =
bb8c093b 3827 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
b481de9c
ZY
3828
3829 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
bb8c093b 3830 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status),
b481de9c
ZY
3831 status, le32_to_cpu(tx_resp->rate_n_flags),
3832 tx_resp->failure_frame);
3833
3834 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3835 if (index != -1)
bb8c093b 3836 iwl4965_tx_queue_reclaim(priv, txq_id, index);
c8b0e6e1
CH
3837#ifdef CONFIG_IWL4965_HT
3838#ifdef CONFIG_IWL4965_HT_AGG
b481de9c 3839 }
c8b0e6e1
CH
3840#endif /* CONFIG_IWL4965_HT_AGG */
3841#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
3842
3843 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3844 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3845}
3846
3847
bb8c093b
CH
3848static void iwl4965_rx_reply_alive(struct iwl4965_priv *priv,
3849 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3850{
bb8c093b
CH
3851 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3852 struct iwl4965_alive_resp *palive;
b481de9c
ZY
3853 struct delayed_work *pwork;
3854
3855 palive = &pkt->u.alive_frame;
3856
3857 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3858 "0x%01X 0x%01X\n",
3859 palive->is_valid, palive->ver_type,
3860 palive->ver_subtype);
3861
3862 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3863 IWL_DEBUG_INFO("Initialization Alive received.\n");
3864 memcpy(&priv->card_alive_init,
3865 &pkt->u.alive_frame,
bb8c093b 3866 sizeof(struct iwl4965_init_alive_resp));
b481de9c
ZY
3867 pwork = &priv->init_alive_start;
3868 } else {
3869 IWL_DEBUG_INFO("Runtime Alive received.\n");
3870 memcpy(&priv->card_alive, &pkt->u.alive_frame,
bb8c093b 3871 sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
3872 pwork = &priv->alive_start;
3873 }
3874
3875 /* We delay the ALIVE response by 5ms to
3876 * give the HW RF Kill time to activate... */
3877 if (palive->is_valid == UCODE_VALID_OK)
3878 queue_delayed_work(priv->workqueue, pwork,
3879 msecs_to_jiffies(5));
3880 else
3881 IWL_WARNING("uCode did not respond OK.\n");
3882}
3883
bb8c093b
CH
3884static void iwl4965_rx_reply_add_sta(struct iwl4965_priv *priv,
3885 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3886{
bb8c093b 3887 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3888
3889 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3890 return;
3891}
3892
bb8c093b
CH
3893static void iwl4965_rx_reply_error(struct iwl4965_priv *priv,
3894 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3895{
bb8c093b 3896 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3897
3898 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3899 "seq 0x%04X ser 0x%08X\n",
3900 le32_to_cpu(pkt->u.err_resp.error_type),
3901 get_cmd_string(pkt->u.err_resp.cmd_id),
3902 pkt->u.err_resp.cmd_id,
3903 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3904 le32_to_cpu(pkt->u.err_resp.error_info));
3905}
3906
3907#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3908
bb8c093b 3909static void iwl4965_rx_csa(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3910{
bb8c093b
CH
3911 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3912 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon;
3913 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif);
b481de9c
ZY
3914 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3915 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3916 rxon->channel = csa->channel;
3917 priv->staging_rxon.channel = csa->channel;
3918}
3919
bb8c093b
CH
3920static void iwl4965_rx_spectrum_measure_notif(struct iwl4965_priv *priv,
3921 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3922{
c8b0e6e1 3923#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
bb8c093b
CH
3924 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3925 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
b481de9c
ZY
3926
3927 if (!report->state) {
3928 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3929 "Spectrum Measure Notification: Start\n");
3930 return;
3931 }
3932
3933 memcpy(&priv->measure_report, report, sizeof(*report));
3934 priv->measurement_status |= MEASUREMENT_READY;
3935#endif
3936}
3937
bb8c093b
CH
3938static void iwl4965_rx_pm_sleep_notif(struct iwl4965_priv *priv,
3939 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3940{
c8b0e6e1 3941#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3942 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3943 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif);
b481de9c
ZY
3944 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3945 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3946#endif
3947}
3948
bb8c093b
CH
3949static void iwl4965_rx_pm_debug_statistics_notif(struct iwl4965_priv *priv,
3950 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3951{
bb8c093b 3952 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
3953 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3954 "notification for %s:\n",
3955 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
bb8c093b 3956 iwl4965_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
b481de9c
ZY
3957}
3958
bb8c093b 3959static void iwl4965_bg_beacon_update(struct work_struct *work)
b481de9c 3960{
bb8c093b
CH
3961 struct iwl4965_priv *priv =
3962 container_of(work, struct iwl4965_priv, beacon_update);
b481de9c
ZY
3963 struct sk_buff *beacon;
3964
3965 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3966 beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL);
3967
3968 if (!beacon) {
3969 IWL_ERROR("update beacon failed\n");
3970 return;
3971 }
3972
3973 mutex_lock(&priv->mutex);
3974 /* new beacon skb is allocated every time; dispose previous.*/
3975 if (priv->ibss_beacon)
3976 dev_kfree_skb(priv->ibss_beacon);
3977
3978 priv->ibss_beacon = beacon;
3979 mutex_unlock(&priv->mutex);
3980
bb8c093b 3981 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
3982}
3983
bb8c093b
CH
3984static void iwl4965_rx_beacon_notif(struct iwl4965_priv *priv,
3985 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 3986{
c8b0e6e1 3987#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
3988 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3989 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status);
3990 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
b481de9c
ZY
3991
3992 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3993 "tsf %d %d rate %d\n",
3994 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3995 beacon->beacon_notify_hdr.failure_frame,
3996 le32_to_cpu(beacon->ibss_mgr_status),
3997 le32_to_cpu(beacon->high_tsf),
3998 le32_to_cpu(beacon->low_tsf), rate);
3999#endif
4000
4001 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
4002 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
4003 queue_work(priv->workqueue, &priv->beacon_update);
4004}
4005
4006/* Service response to REPLY_SCAN_CMD (0x80) */
bb8c093b
CH
4007static void iwl4965_rx_reply_scan(struct iwl4965_priv *priv,
4008 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4009{
c8b0e6e1 4010#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
4011 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4012 struct iwl4965_scanreq_notification *notif =
4013 (struct iwl4965_scanreq_notification *)pkt->u.raw;
b481de9c
ZY
4014
4015 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
4016#endif
4017}
4018
4019/* Service SCAN_START_NOTIFICATION (0x82) */
bb8c093b
CH
4020static void iwl4965_rx_scan_start_notif(struct iwl4965_priv *priv,
4021 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4022{
bb8c093b
CH
4023 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4024 struct iwl4965_scanstart_notification *notif =
4025 (struct iwl4965_scanstart_notification *)pkt->u.raw;
b481de9c
ZY
4026 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
4027 IWL_DEBUG_SCAN("Scan start: "
4028 "%d [802.11%s] "
4029 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
4030 notif->channel,
4031 notif->band ? "bg" : "a",
4032 notif->tsf_high,
4033 notif->tsf_low, notif->status, notif->beacon_timer);
4034}
4035
4036/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
bb8c093b
CH
4037static void iwl4965_rx_scan_results_notif(struct iwl4965_priv *priv,
4038 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4039{
bb8c093b
CH
4040 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4041 struct iwl4965_scanresults_notification *notif =
4042 (struct iwl4965_scanresults_notification *)pkt->u.raw;
b481de9c
ZY
4043
4044 IWL_DEBUG_SCAN("Scan ch.res: "
4045 "%d [802.11%s] "
4046 "(TSF: 0x%08X:%08X) - %d "
4047 "elapsed=%lu usec (%dms since last)\n",
4048 notif->channel,
4049 notif->band ? "bg" : "a",
4050 le32_to_cpu(notif->tsf_high),
4051 le32_to_cpu(notif->tsf_low),
4052 le32_to_cpu(notif->statistics[0]),
4053 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
4054 jiffies_to_msecs(elapsed_jiffies
4055 (priv->last_scan_jiffies, jiffies)));
4056
4057 priv->last_scan_jiffies = jiffies;
4058}
4059
4060/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
bb8c093b
CH
4061static void iwl4965_rx_scan_complete_notif(struct iwl4965_priv *priv,
4062 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4063{
bb8c093b
CH
4064 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4065 struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
b481de9c
ZY
4066
4067 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
4068 scan_notif->scanned_channels,
4069 scan_notif->tsf_low,
4070 scan_notif->tsf_high, scan_notif->status);
4071
4072 /* The HW is no longer scanning */
4073 clear_bit(STATUS_SCAN_HW, &priv->status);
4074
4075 /* The scan completion notification came in, so kill that timer... */
4076 cancel_delayed_work(&priv->scan_check);
4077
4078 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
4079 (priv->scan_bands == 2) ? "2.4" : "5.2",
4080 jiffies_to_msecs(elapsed_jiffies
4081 (priv->scan_pass_start, jiffies)));
4082
4083 /* Remove this scanned band from the list
4084 * of pending bands to scan */
4085 priv->scan_bands--;
4086
4087 /* If a request to abort was given, or the scan did not succeed
4088 * then we reset the scan state machine and terminate,
4089 * re-queuing another scan if one has been requested */
4090 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
4091 IWL_DEBUG_INFO("Aborted scan completed.\n");
4092 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
4093 } else {
4094 /* If there are more bands on this scan pass reschedule */
4095 if (priv->scan_bands > 0)
4096 goto reschedule;
4097 }
4098
4099 priv->last_scan_jiffies = jiffies;
4100 IWL_DEBUG_INFO("Setting scan to off\n");
4101
4102 clear_bit(STATUS_SCANNING, &priv->status);
4103
4104 IWL_DEBUG_INFO("Scan took %dms\n",
4105 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
4106
4107 queue_work(priv->workqueue, &priv->scan_completed);
4108
4109 return;
4110
4111reschedule:
4112 priv->scan_pass_start = jiffies;
4113 queue_work(priv->workqueue, &priv->request_scan);
4114}
4115
4116/* Handle notification from uCode that card's power state is changing
4117 * due to software, hardware, or critical temperature RFKILL */
bb8c093b
CH
4118static void iwl4965_rx_card_state_notif(struct iwl4965_priv *priv,
4119 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4120{
bb8c093b 4121 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
4122 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4123 unsigned long status = priv->status;
4124
4125 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
4126 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4127 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
4128
4129 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
4130 RF_CARD_DISABLED)) {
4131
bb8c093b 4132 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
4133 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4134
bb8c093b
CH
4135 if (!iwl4965_grab_nic_access(priv)) {
4136 iwl4965_write_direct32(
b481de9c
ZY
4137 priv, HBUS_TARG_MBX_C,
4138 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4139
bb8c093b 4140 iwl4965_release_nic_access(priv);
b481de9c
ZY
4141 }
4142
4143 if (!(flags & RXON_CARD_DISABLED)) {
bb8c093b 4144 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c 4145 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
bb8c093b
CH
4146 if (!iwl4965_grab_nic_access(priv)) {
4147 iwl4965_write_direct32(
b481de9c
ZY
4148 priv, HBUS_TARG_MBX_C,
4149 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4150
bb8c093b 4151 iwl4965_release_nic_access(priv);
b481de9c
ZY
4152 }
4153 }
4154
4155 if (flags & RF_CARD_DISABLED) {
bb8c093b 4156 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c 4157 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
bb8c093b
CH
4158 iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
4159 if (!iwl4965_grab_nic_access(priv))
4160 iwl4965_release_nic_access(priv);
b481de9c
ZY
4161 }
4162 }
4163
4164 if (flags & HW_CARD_DISABLED)
4165 set_bit(STATUS_RF_KILL_HW, &priv->status);
4166 else
4167 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4168
4169
4170 if (flags & SW_CARD_DISABLED)
4171 set_bit(STATUS_RF_KILL_SW, &priv->status);
4172 else
4173 clear_bit(STATUS_RF_KILL_SW, &priv->status);
4174
4175 if (!(flags & RXON_CARD_DISABLED))
bb8c093b 4176 iwl4965_scan_cancel(priv);
b481de9c
ZY
4177
4178 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
4179 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
4180 (test_bit(STATUS_RF_KILL_SW, &status) !=
4181 test_bit(STATUS_RF_KILL_SW, &priv->status)))
4182 queue_work(priv->workqueue, &priv->rf_kill);
4183 else
4184 wake_up_interruptible(&priv->wait_command_queue);
4185}
4186
4187/**
bb8c093b 4188 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
b481de9c
ZY
4189 *
4190 * Setup the RX handlers for each of the reply types sent from the uCode
4191 * to the host.
4192 *
4193 * This function chains into the hardware specific files for them to setup
4194 * any hardware specific handlers as well.
4195 */
bb8c093b 4196static void iwl4965_setup_rx_handlers(struct iwl4965_priv *priv)
b481de9c 4197{
bb8c093b
CH
4198 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
4199 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta;
4200 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error;
4201 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa;
b481de9c 4202 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
bb8c093b
CH
4203 iwl4965_rx_spectrum_measure_notif;
4204 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl4965_rx_pm_sleep_notif;
b481de9c 4205 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
bb8c093b
CH
4206 iwl4965_rx_pm_debug_statistics_notif;
4207 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
b481de9c 4208
9fbab516
BC
4209 /*
4210 * The same handler is used for both the REPLY to a discrete
4211 * statistics request from the host as well as for the periodic
4212 * statistics notifications (after received beacons) from the uCode.
b481de9c 4213 */
bb8c093b
CH
4214 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_hw_rx_statistics;
4215 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_hw_rx_statistics;
b481de9c 4216
bb8c093b
CH
4217 priv->rx_handlers[REPLY_SCAN_CMD] = iwl4965_rx_reply_scan;
4218 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl4965_rx_scan_start_notif;
b481de9c 4219 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
bb8c093b 4220 iwl4965_rx_scan_results_notif;
b481de9c 4221 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
bb8c093b
CH
4222 iwl4965_rx_scan_complete_notif;
4223 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif;
4224 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
b481de9c 4225
9fbab516 4226 /* Set up hardware specific Rx handlers */
bb8c093b 4227 iwl4965_hw_rx_handler_setup(priv);
b481de9c
ZY
4228}
4229
4230/**
bb8c093b 4231 * iwl4965_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
b481de9c
ZY
4232 * @rxb: Rx buffer to reclaim
4233 *
4234 * If an Rx buffer has an async callback associated with it the callback
4235 * will be executed. The attached skb (if present) will only be freed
4236 * if the callback returns 1
4237 */
bb8c093b
CH
4238static void iwl4965_tx_cmd_complete(struct iwl4965_priv *priv,
4239 struct iwl4965_rx_mem_buffer *rxb)
b481de9c 4240{
bb8c093b 4241 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4242 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
4243 int txq_id = SEQ_TO_QUEUE(sequence);
4244 int index = SEQ_TO_INDEX(sequence);
4245 int huge = sequence & SEQ_HUGE_FRAME;
4246 int cmd_index;
bb8c093b 4247 struct iwl4965_cmd *cmd;
b481de9c
ZY
4248
4249 /* If a Tx command is being handled and it isn't in the actual
4250 * command queue then there a command routing bug has been introduced
4251 * in the queue management code. */
4252 if (txq_id != IWL_CMD_QUEUE_NUM)
4253 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
4254 txq_id, pkt->hdr.cmd);
4255 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
4256
4257 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
4258 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
4259
4260 /* Input error checking is done when commands are added to queue. */
4261 if (cmd->meta.flags & CMD_WANT_SKB) {
4262 cmd->meta.source->u.skb = rxb->skb;
4263 rxb->skb = NULL;
4264 } else if (cmd->meta.u.callback &&
4265 !cmd->meta.u.callback(priv, cmd, rxb->skb))
4266 rxb->skb = NULL;
4267
bb8c093b 4268 iwl4965_tx_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
4269
4270 if (!(cmd->meta.flags & CMD_ASYNC)) {
4271 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4272 wake_up_interruptible(&priv->wait_command_queue);
4273 }
4274}
4275
4276/************************** RX-FUNCTIONS ****************************/
4277/*
4278 * Rx theory of operation
4279 *
9fbab516
BC
4280 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
4281 * each of which point to Receive Buffers to be filled by 4965. These get
4282 * used not only for Rx frames, but for any command response or notification
4283 * from the 4965. The driver and 4965 manage the Rx buffers by means
4284 * of indexes into the circular buffer.
b481de9c
ZY
4285 *
4286 * Rx Queue Indexes
4287 * The host/firmware share two index registers for managing the Rx buffers.
4288 *
4289 * The READ index maps to the first position that the firmware may be writing
4290 * to -- the driver can read up to (but not including) this position and get
4291 * good data.
4292 * The READ index is managed by the firmware once the card is enabled.
4293 *
4294 * The WRITE index maps to the last position the driver has read from -- the
4295 * position preceding WRITE is the last slot the firmware can place a packet.
4296 *
4297 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4298 * WRITE = READ.
4299 *
9fbab516 4300 * During initialization, the host sets up the READ queue position to the first
b481de9c
ZY
4301 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4302 *
9fbab516 4303 * When the firmware places a packet in a buffer, it will advance the READ index
b481de9c
ZY
4304 * and fire the RX interrupt. The driver can then query the READ index and
4305 * process as many packets as possible, moving the WRITE index forward as it
4306 * resets the Rx queue buffers with new memory.
4307 *
4308 * The management in the driver is as follows:
4309 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
4310 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
01ebd063 4311 * to replenish the iwl->rxq->rx_free.
bb8c093b 4312 * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the
b481de9c
ZY
4313 * iwl->rxq is replenished and the READ INDEX is updated (updating the
4314 * 'processed' and 'read' driver indexes as well)
4315 * + A received packet is processed and handed to the kernel network stack,
4316 * detached from the iwl->rxq. The driver 'processed' index is updated.
4317 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
4318 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
4319 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
4320 * were enough free buffers and RX_STALLED is set it is cleared.
4321 *
4322 *
4323 * Driver sequence:
4324 *
9fbab516
BC
4325 * iwl4965_rx_queue_alloc() Allocates rx_free
4326 * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls
bb8c093b 4327 * iwl4965_rx_queue_restock
9fbab516 4328 * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx
b481de9c
ZY
4329 * queue, updates firmware pointers, and updates
4330 * the WRITE index. If insufficient rx_free buffers
bb8c093b 4331 * are available, schedules iwl4965_rx_replenish
b481de9c
ZY
4332 *
4333 * -- enable interrupts --
9fbab516 4334 * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the
b481de9c
ZY
4335 * READ INDEX, detaching the SKB from the pool.
4336 * Moves the packet buffer from queue to rx_used.
bb8c093b 4337 * Calls iwl4965_rx_queue_restock to refill any empty
b481de9c
ZY
4338 * slots.
4339 * ...
4340 *
4341 */
4342
4343/**
bb8c093b 4344 * iwl4965_rx_queue_space - Return number of free slots available in queue.
b481de9c 4345 */
bb8c093b 4346static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
b481de9c
ZY
4347{
4348 int s = q->read - q->write;
4349 if (s <= 0)
4350 s += RX_QUEUE_SIZE;
4351 /* keep some buffer to not confuse full and empty queue */
4352 s -= 2;
4353 if (s < 0)
4354 s = 0;
4355 return s;
4356}
4357
4358/**
bb8c093b 4359 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
b481de9c 4360 */
bb8c093b 4361int iwl4965_rx_queue_update_write_ptr(struct iwl4965_priv *priv, struct iwl4965_rx_queue *q)
b481de9c
ZY
4362{
4363 u32 reg = 0;
4364 int rc = 0;
4365 unsigned long flags;
4366
4367 spin_lock_irqsave(&q->lock, flags);
4368
4369 if (q->need_update == 0)
4370 goto exit_unlock;
4371
6440adb5 4372 /* If power-saving is in use, make sure device is awake */
b481de9c 4373 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
bb8c093b 4374 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4375
4376 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
bb8c093b 4377 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4378 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4379 goto exit_unlock;
4380 }
4381
bb8c093b 4382 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4383 if (rc)
4384 goto exit_unlock;
4385
6440adb5 4386 /* Device expects a multiple of 8 */
bb8c093b 4387 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
b481de9c 4388 q->write & ~0x7);
bb8c093b 4389 iwl4965_release_nic_access(priv);
6440adb5
CB
4390
4391 /* Else device is assumed to be awake */
b481de9c 4392 } else
6440adb5 4393 /* Device expects a multiple of 8 */
bb8c093b 4394 iwl4965_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
b481de9c
ZY
4395
4396
4397 q->need_update = 0;
4398
4399 exit_unlock:
4400 spin_unlock_irqrestore(&q->lock, flags);
4401 return rc;
4402}
4403
4404/**
9fbab516 4405 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
b481de9c 4406 */
bb8c093b 4407static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl4965_priv *priv,
b481de9c
ZY
4408 dma_addr_t dma_addr)
4409{
4410 return cpu_to_le32((u32)(dma_addr >> 8));
4411}
4412
4413
4414/**
bb8c093b 4415 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
b481de9c 4416 *
9fbab516 4417 * If there are slots in the RX queue that need to be restocked,
b481de9c 4418 * and we have free pre-allocated buffers, fill the ranks as much
9fbab516 4419 * as we can, pulling from rx_free.
b481de9c
ZY
4420 *
4421 * This moves the 'write' index forward to catch up with 'processed', and
4422 * also updates the memory address in the firmware to reference the new
4423 * target buffer.
4424 */
bb8c093b 4425static int iwl4965_rx_queue_restock(struct iwl4965_priv *priv)
b481de9c 4426{
bb8c093b 4427 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4428 struct list_head *element;
bb8c093b 4429 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4430 unsigned long flags;
4431 int write, rc;
4432
4433 spin_lock_irqsave(&rxq->lock, flags);
4434 write = rxq->write & ~0x7;
bb8c093b 4435 while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
6440adb5 4436 /* Get next free Rx buffer, remove from free list */
b481de9c 4437 element = rxq->rx_free.next;
bb8c093b 4438 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
b481de9c 4439 list_del(element);
6440adb5
CB
4440
4441 /* Point to Rx buffer via next RBD in circular buffer */
bb8c093b 4442 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr);
b481de9c
ZY
4443 rxq->queue[rxq->write] = rxb;
4444 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
4445 rxq->free_count--;
4446 }
4447 spin_unlock_irqrestore(&rxq->lock, flags);
4448 /* If the pre-allocated buffer pool is dropping low, schedule to
4449 * refill it */
4450 if (rxq->free_count <= RX_LOW_WATERMARK)
4451 queue_work(priv->workqueue, &priv->rx_replenish);
4452
4453
6440adb5
CB
4454 /* If we've added more space for the firmware to place data, tell it.
4455 * Increment device's write pointer in multiples of 8. */
b481de9c
ZY
4456 if ((write != (rxq->write & ~0x7))
4457 || (abs(rxq->write - rxq->read) > 7)) {
4458 spin_lock_irqsave(&rxq->lock, flags);
4459 rxq->need_update = 1;
4460 spin_unlock_irqrestore(&rxq->lock, flags);
bb8c093b 4461 rc = iwl4965_rx_queue_update_write_ptr(priv, rxq);
b481de9c
ZY
4462 if (rc)
4463 return rc;
4464 }
4465
4466 return 0;
4467}
4468
4469/**
bb8c093b 4470 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
b481de9c
ZY
4471 *
4472 * When moving to rx_free an SKB is allocated for the slot.
4473 *
bb8c093b 4474 * Also restock the Rx queue via iwl4965_rx_queue_restock.
01ebd063 4475 * This is called as a scheduled work item (except for during initialization)
b481de9c 4476 */
bb8c093b 4477void iwl4965_rx_replenish(void *data)
b481de9c 4478{
bb8c093b
CH
4479 struct iwl4965_priv *priv = data;
4480 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c 4481 struct list_head *element;
bb8c093b 4482 struct iwl4965_rx_mem_buffer *rxb;
b481de9c
ZY
4483 unsigned long flags;
4484 spin_lock_irqsave(&rxq->lock, flags);
4485 while (!list_empty(&rxq->rx_used)) {
4486 element = rxq->rx_used.next;
bb8c093b 4487 rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list);
6440adb5
CB
4488
4489 /* Alloc a new receive buffer */
b481de9c
ZY
4490 rxb->skb =
4491 alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC);
4492 if (!rxb->skb) {
4493 if (net_ratelimit())
4494 printk(KERN_CRIT DRV_NAME
4495 ": Can not allocate SKB buffers\n");
4496 /* We don't reschedule replenish work here -- we will
4497 * call the restock method and if it still needs
4498 * more buffers it will schedule replenish */
4499 break;
4500 }
4501 priv->alloc_rxb_skb++;
4502 list_del(element);
6440adb5
CB
4503
4504 /* Get physical address of RB/SKB */
b481de9c
ZY
4505 rxb->dma_addr =
4506 pci_map_single(priv->pci_dev, rxb->skb->data,
4507 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4508 list_add_tail(&rxb->list, &rxq->rx_free);
4509 rxq->free_count++;
4510 }
4511 spin_unlock_irqrestore(&rxq->lock, flags);
4512
4513 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 4514 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4515 spin_unlock_irqrestore(&priv->lock, flags);
4516}
4517
4518/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
9fbab516 4519 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
b481de9c
ZY
4520 * This free routine walks the list of POOL entries and if SKB is set to
4521 * non NULL it is unmapped and freed
4522 */
bb8c093b 4523static void iwl4965_rx_queue_free(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4524{
4525 int i;
4526 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4527 if (rxq->pool[i].skb != NULL) {
4528 pci_unmap_single(priv->pci_dev,
4529 rxq->pool[i].dma_addr,
4530 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4531 dev_kfree_skb(rxq->pool[i].skb);
4532 }
4533 }
4534
4535 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
4536 rxq->dma_addr);
4537 rxq->bd = NULL;
4538}
4539
bb8c093b 4540int iwl4965_rx_queue_alloc(struct iwl4965_priv *priv)
b481de9c 4541{
bb8c093b 4542 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4543 struct pci_dev *dev = priv->pci_dev;
4544 int i;
4545
4546 spin_lock_init(&rxq->lock);
4547 INIT_LIST_HEAD(&rxq->rx_free);
4548 INIT_LIST_HEAD(&rxq->rx_used);
6440adb5
CB
4549
4550 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
b481de9c
ZY
4551 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
4552 if (!rxq->bd)
4553 return -ENOMEM;
6440adb5 4554
b481de9c
ZY
4555 /* Fill the rx_used queue with _all_ of the Rx buffers */
4556 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4557 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
6440adb5 4558
b481de9c
ZY
4559 /* Set us so that we have processed and used all buffers, but have
4560 * not restocked the Rx queue with fresh buffers */
4561 rxq->read = rxq->write = 0;
4562 rxq->free_count = 0;
4563 rxq->need_update = 0;
4564 return 0;
4565}
4566
bb8c093b 4567void iwl4965_rx_queue_reset(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq)
b481de9c
ZY
4568{
4569 unsigned long flags;
4570 int i;
4571 spin_lock_irqsave(&rxq->lock, flags);
4572 INIT_LIST_HEAD(&rxq->rx_free);
4573 INIT_LIST_HEAD(&rxq->rx_used);
4574 /* Fill the rx_used queue with _all_ of the Rx buffers */
4575 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
4576 /* In the reset function, these buffers may have been allocated
4577 * to an SKB, so we need to unmap and free potential storage */
4578 if (rxq->pool[i].skb != NULL) {
4579 pci_unmap_single(priv->pci_dev,
4580 rxq->pool[i].dma_addr,
4581 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4582 priv->alloc_rxb_skb--;
4583 dev_kfree_skb(rxq->pool[i].skb);
4584 rxq->pool[i].skb = NULL;
4585 }
4586 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4587 }
4588
4589 /* Set us so that we have processed and used all buffers, but have
4590 * not restocked the Rx queue with fresh buffers */
4591 rxq->read = rxq->write = 0;
4592 rxq->free_count = 0;
4593 spin_unlock_irqrestore(&rxq->lock, flags);
4594}
4595
4596/* Convert linear signal-to-noise ratio into dB */
4597static u8 ratio2dB[100] = {
4598/* 0 1 2 3 4 5 6 7 8 9 */
4599 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
4600 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4601 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4602 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4603 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4604 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4605 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4606 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4607 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4608 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
4609};
4610
4611/* Calculates a relative dB value from a ratio of linear
4612 * (i.e. not dB) signal levels.
4613 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
bb8c093b 4614int iwl4965_calc_db_from_ratio(int sig_ratio)
b481de9c 4615{
c899a575
AB
4616 /* 1000:1 or higher just report as 60 dB */
4617 if (sig_ratio >= 1000)
b481de9c
ZY
4618 return 60;
4619
c899a575 4620 /* 100:1 or higher, divide by 10 and use table,
b481de9c 4621 * add 20 dB to make up for divide by 10 */
c899a575 4622 if (sig_ratio >= 100)
b481de9c
ZY
4623 return (20 + (int)ratio2dB[sig_ratio/10]);
4624
4625 /* We shouldn't see this */
4626 if (sig_ratio < 1)
4627 return 0;
4628
4629 /* Use table for ratios 1:1 - 99:1 */
4630 return (int)ratio2dB[sig_ratio];
4631}
4632
4633#define PERFECT_RSSI (-20) /* dBm */
4634#define WORST_RSSI (-95) /* dBm */
4635#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4636
4637/* Calculate an indication of rx signal quality (a percentage, not dBm!).
4638 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4639 * about formulas used below. */
bb8c093b 4640int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
b481de9c
ZY
4641{
4642 int sig_qual;
4643 int degradation = PERFECT_RSSI - rssi_dbm;
4644
4645 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4646 * as indicator; formula is (signal dbm - noise dbm).
4647 * SNR at or above 40 is a great signal (100%).
4648 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4649 * Weakest usable signal is usually 10 - 15 dB SNR. */
4650 if (noise_dbm) {
4651 if (rssi_dbm - noise_dbm >= 40)
4652 return 100;
4653 else if (rssi_dbm < noise_dbm)
4654 return 0;
4655 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4656
4657 /* Else use just the signal level.
4658 * This formula is a least squares fit of data points collected and
4659 * compared with a reference system that had a percentage (%) display
4660 * for signal quality. */
4661 } else
4662 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4663 (15 * RSSI_RANGE + 62 * degradation)) /
4664 (RSSI_RANGE * RSSI_RANGE);
4665
4666 if (sig_qual > 100)
4667 sig_qual = 100;
4668 else if (sig_qual < 1)
4669 sig_qual = 0;
4670
4671 return sig_qual;
4672}
4673
4674/**
9fbab516 4675 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
b481de9c
ZY
4676 *
4677 * Uses the priv->rx_handlers callback function array to invoke
4678 * the appropriate handlers, including command responses,
4679 * frame-received notifications, and other notifications.
4680 */
bb8c093b 4681static void iwl4965_rx_handle(struct iwl4965_priv *priv)
b481de9c 4682{
bb8c093b
CH
4683 struct iwl4965_rx_mem_buffer *rxb;
4684 struct iwl4965_rx_packet *pkt;
4685 struct iwl4965_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
4686 u32 r, i;
4687 int reclaim;
4688 unsigned long flags;
4689
6440adb5
CB
4690 /* uCode's read index (stored in shared DRAM) indicates the last Rx
4691 * buffer that the driver may process (last buffer filled by ucode). */
bb8c093b 4692 r = iwl4965_hw_get_rx_read(priv);
b481de9c
ZY
4693 i = rxq->read;
4694
4695 /* Rx interrupt, but nothing sent from uCode */
4696 if (i == r)
4697 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4698
4699 while (i != r) {
4700 rxb = rxq->queue[i];
4701
9fbab516 4702 /* If an RXB doesn't have a Rx queue slot associated with it,
b481de9c
ZY
4703 * then a bug has been introduced in the queue refilling
4704 * routines -- catch it here */
4705 BUG_ON(rxb == NULL);
4706
4707 rxq->queue[i] = NULL;
4708
4709 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4710 IWL_RX_BUF_SIZE,
4711 PCI_DMA_FROMDEVICE);
bb8c093b 4712 pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
b481de9c
ZY
4713
4714 /* Reclaim a command buffer only if this packet is a response
4715 * to a (driver-originated) command.
4716 * If the packet (e.g. Rx frame) originated from uCode,
4717 * there is no command buffer to reclaim.
4718 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4719 * but apparently a few don't get set; catch them here. */
4720 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4721 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
4722 (pkt->hdr.cmd != REPLY_4965_RX) &&
cfe01709 4723 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
b481de9c
ZY
4724 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4725 (pkt->hdr.cmd != REPLY_TX);
4726
4727 /* Based on type of command response or notification,
4728 * handle those that need handling via function in
bb8c093b 4729 * rx_handlers table. See iwl4965_setup_rx_handlers() */
b481de9c
ZY
4730 if (priv->rx_handlers[pkt->hdr.cmd]) {
4731 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4732 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4733 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4734 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4735 } else {
4736 /* No handling needed */
4737 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4738 "r %d i %d No handler needed for %s, 0x%02x\n",
4739 r, i, get_cmd_string(pkt->hdr.cmd),
4740 pkt->hdr.cmd);
4741 }
4742
4743 if (reclaim) {
9fbab516
BC
4744 /* Invoke any callbacks, transfer the skb to caller, and
4745 * fire off the (possibly) blocking iwl4965_send_cmd()
b481de9c
ZY
4746 * as we reclaim the driver command queue */
4747 if (rxb && rxb->skb)
bb8c093b 4748 iwl4965_tx_cmd_complete(priv, rxb);
b481de9c
ZY
4749 else
4750 IWL_WARNING("Claim null rxb?\n");
4751 }
4752
4753 /* For now we just don't re-use anything. We can tweak this
4754 * later to try and re-use notification packets and SKBs that
4755 * fail to Rx correctly */
4756 if (rxb->skb != NULL) {
4757 priv->alloc_rxb_skb--;
4758 dev_kfree_skb_any(rxb->skb);
4759 rxb->skb = NULL;
4760 }
4761
4762 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
4763 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4764 spin_lock_irqsave(&rxq->lock, flags);
4765 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4766 spin_unlock_irqrestore(&rxq->lock, flags);
4767 i = (i + 1) & RX_QUEUE_MASK;
4768 }
4769
4770 /* Backtrack one entry */
4771 priv->rxq.read = i;
bb8c093b 4772 iwl4965_rx_queue_restock(priv);
b481de9c
ZY
4773}
4774
6440adb5
CB
4775/**
4776 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware
4777 */
bb8c093b
CH
4778static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
4779 struct iwl4965_tx_queue *txq)
b481de9c
ZY
4780{
4781 u32 reg = 0;
4782 int rc = 0;
4783 int txq_id = txq->q.id;
4784
4785 if (txq->need_update == 0)
4786 return rc;
4787
4788 /* if we're trying to save power */
4789 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4790 /* wake up nic if it's powered down ...
4791 * uCode will wake up, and interrupt us again, so next
4792 * time we'll skip this part. */
bb8c093b 4793 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
4794
4795 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4796 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
bb8c093b 4797 iwl4965_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
4798 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4799 return rc;
4800 }
4801
4802 /* restore this queue's parameters in nic hardware. */
bb8c093b 4803 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4804 if (rc)
4805 return rc;
bb8c093b 4806 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR,
fc4b6853 4807 txq->q.write_ptr | (txq_id << 8));
bb8c093b 4808 iwl4965_release_nic_access(priv);
b481de9c
ZY
4809
4810 /* else not in power-save mode, uCode will never sleep when we're
4811 * trying to tx (during RFKILL, we're not trying to tx). */
4812 } else
bb8c093b 4813 iwl4965_write32(priv, HBUS_TARG_WRPTR,
fc4b6853 4814 txq->q.write_ptr | (txq_id << 8));
b481de9c
ZY
4815
4816 txq->need_update = 0;
4817
4818 return rc;
4819}
4820
c8b0e6e1 4821#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 4822static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon)
b481de9c 4823{
0795af57
JP
4824 DECLARE_MAC_BUF(mac);
4825
b481de9c 4826 IWL_DEBUG_RADIO("RX CONFIG:\n");
bb8c093b 4827 iwl4965_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
b481de9c
ZY
4828 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4829 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4830 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4831 le32_to_cpu(rxon->filter_flags));
4832 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4833 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4834 rxon->ofdm_basic_rates);
4835 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
0795af57
JP
4836 IWL_DEBUG_RADIO("u8[6] node_addr: %s\n",
4837 print_mac(mac, rxon->node_addr));
4838 IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n",
4839 print_mac(mac, rxon->bssid_addr));
b481de9c
ZY
4840 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4841}
4842#endif
4843
bb8c093b 4844static void iwl4965_enable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4845{
4846 IWL_DEBUG_ISR("Enabling interrupts\n");
4847 set_bit(STATUS_INT_ENABLED, &priv->status);
bb8c093b 4848 iwl4965_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
b481de9c
ZY
4849}
4850
bb8c093b 4851static inline void iwl4965_disable_interrupts(struct iwl4965_priv *priv)
b481de9c
ZY
4852{
4853 clear_bit(STATUS_INT_ENABLED, &priv->status);
4854
4855 /* disable interrupts from uCode/NIC to host */
bb8c093b 4856 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
4857
4858 /* acknowledge/clear/reset any interrupts still pending
4859 * from uCode or flow handler (Rx/Tx DMA) */
bb8c093b
CH
4860 iwl4965_write32(priv, CSR_INT, 0xffffffff);
4861 iwl4965_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
b481de9c
ZY
4862 IWL_DEBUG_ISR("Disabled interrupts\n");
4863}
4864
4865static const char *desc_lookup(int i)
4866{
4867 switch (i) {
4868 case 1:
4869 return "FAIL";
4870 case 2:
4871 return "BAD_PARAM";
4872 case 3:
4873 return "BAD_CHECKSUM";
4874 case 4:
4875 return "NMI_INTERRUPT";
4876 case 5:
4877 return "SYSASSERT";
4878 case 6:
4879 return "FATAL_ERROR";
4880 }
4881
4882 return "UNKNOWN";
4883}
4884
4885#define ERROR_START_OFFSET (1 * sizeof(u32))
4886#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4887
bb8c093b 4888static void iwl4965_dump_nic_error_log(struct iwl4965_priv *priv)
b481de9c
ZY
4889{
4890 u32 data2, line;
4891 u32 desc, time, count, base, data1;
4892 u32 blink1, blink2, ilink1, ilink2;
4893 int rc;
4894
4895 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4896
bb8c093b 4897 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
4898 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4899 return;
4900 }
4901
bb8c093b 4902 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4903 if (rc) {
4904 IWL_WARNING("Can not read from adapter at this time.\n");
4905 return;
4906 }
4907
bb8c093b 4908 count = iwl4965_read_targ_mem(priv, base);
b481de9c
ZY
4909
4910 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4911 IWL_ERROR("Start IWL Error Log Dump:\n");
4912 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n",
4913 priv->status, priv->config, count);
4914 }
4915
bb8c093b
CH
4916 desc = iwl4965_read_targ_mem(priv, base + 1 * sizeof(u32));
4917 blink1 = iwl4965_read_targ_mem(priv, base + 3 * sizeof(u32));
4918 blink2 = iwl4965_read_targ_mem(priv, base + 4 * sizeof(u32));
4919 ilink1 = iwl4965_read_targ_mem(priv, base + 5 * sizeof(u32));
4920 ilink2 = iwl4965_read_targ_mem(priv, base + 6 * sizeof(u32));
4921 data1 = iwl4965_read_targ_mem(priv, base + 7 * sizeof(u32));
4922 data2 = iwl4965_read_targ_mem(priv, base + 8 * sizeof(u32));
4923 line = iwl4965_read_targ_mem(priv, base + 9 * sizeof(u32));
4924 time = iwl4965_read_targ_mem(priv, base + 11 * sizeof(u32));
b481de9c
ZY
4925
4926 IWL_ERROR("Desc Time "
4927 "data1 data2 line\n");
4928 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4929 desc_lookup(desc), desc, time, data1, data2, line);
4930 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
4931 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4932 ilink1, ilink2);
4933
bb8c093b 4934 iwl4965_release_nic_access(priv);
b481de9c
ZY
4935}
4936
4937#define EVENT_START_OFFSET (4 * sizeof(u32))
4938
4939/**
bb8c093b 4940 * iwl4965_print_event_log - Dump error event log to syslog
b481de9c 4941 *
bb8c093b 4942 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained!
b481de9c 4943 */
bb8c093b 4944static void iwl4965_print_event_log(struct iwl4965_priv *priv, u32 start_idx,
b481de9c
ZY
4945 u32 num_events, u32 mode)
4946{
4947 u32 i;
4948 u32 base; /* SRAM byte address of event log header */
4949 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4950 u32 ptr; /* SRAM byte address of log data */
4951 u32 ev, time, data; /* event log data */
4952
4953 if (num_events == 0)
4954 return;
4955
4956 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4957
4958 if (mode == 0)
4959 event_size = 2 * sizeof(u32);
4960 else
4961 event_size = 3 * sizeof(u32);
4962
4963 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4964
4965 /* "time" is actually "data" for mode 0 (no timestamp).
4966 * place event id # at far right for easier visual parsing. */
4967 for (i = 0; i < num_events; i++) {
bb8c093b 4968 ev = iwl4965_read_targ_mem(priv, ptr);
b481de9c 4969 ptr += sizeof(u32);
bb8c093b 4970 time = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
4971 ptr += sizeof(u32);
4972 if (mode == 0)
4973 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4974 else {
bb8c093b 4975 data = iwl4965_read_targ_mem(priv, ptr);
b481de9c
ZY
4976 ptr += sizeof(u32);
4977 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4978 }
4979 }
4980}
4981
bb8c093b 4982static void iwl4965_dump_nic_event_log(struct iwl4965_priv *priv)
b481de9c
ZY
4983{
4984 int rc;
4985 u32 base; /* SRAM byte address of event log header */
4986 u32 capacity; /* event log capacity in # entries */
4987 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4988 u32 num_wraps; /* # times uCode wrapped to top of log */
4989 u32 next_entry; /* index of next entry to be written by uCode */
4990 u32 size; /* # entries that we'll print */
4991
4992 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
bb8c093b 4993 if (!iwl4965_hw_valid_rtc_data_addr(base)) {
b481de9c
ZY
4994 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4995 return;
4996 }
4997
bb8c093b 4998 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
4999 if (rc) {
5000 IWL_WARNING("Can not read from adapter at this time.\n");
5001 return;
5002 }
5003
5004 /* event log header */
bb8c093b
CH
5005 capacity = iwl4965_read_targ_mem(priv, base);
5006 mode = iwl4965_read_targ_mem(priv, base + (1 * sizeof(u32)));
5007 num_wraps = iwl4965_read_targ_mem(priv, base + (2 * sizeof(u32)));
5008 next_entry = iwl4965_read_targ_mem(priv, base + (3 * sizeof(u32)));
b481de9c
ZY
5009
5010 size = num_wraps ? capacity : next_entry;
5011
5012 /* bail out if nothing in log */
5013 if (size == 0) {
583fab37 5014 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
bb8c093b 5015 iwl4965_release_nic_access(priv);
b481de9c
ZY
5016 return;
5017 }
5018
583fab37 5019 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
b481de9c
ZY
5020 size, num_wraps);
5021
5022 /* if uCode has wrapped back to top of log, start at the oldest entry,
5023 * i.e the next one that uCode would fill. */
5024 if (num_wraps)
bb8c093b 5025 iwl4965_print_event_log(priv, next_entry,
b481de9c
ZY
5026 capacity - next_entry, mode);
5027
5028 /* (then/else) start at top of log */
bb8c093b 5029 iwl4965_print_event_log(priv, 0, next_entry, mode);
b481de9c 5030
bb8c093b 5031 iwl4965_release_nic_access(priv);
b481de9c
ZY
5032}
5033
5034/**
bb8c093b 5035 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card
b481de9c 5036 */
bb8c093b 5037static void iwl4965_irq_handle_error(struct iwl4965_priv *priv)
b481de9c 5038{
bb8c093b 5039 /* Set the FW error flag -- cleared on iwl4965_down */
b481de9c
ZY
5040 set_bit(STATUS_FW_ERROR, &priv->status);
5041
5042 /* Cancel currently queued command. */
5043 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
5044
c8b0e6e1 5045#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5046 if (iwl4965_debug_level & IWL_DL_FW_ERRORS) {
5047 iwl4965_dump_nic_error_log(priv);
5048 iwl4965_dump_nic_event_log(priv);
5049 iwl4965_print_rx_config_cmd(&priv->staging_rxon);
b481de9c
ZY
5050 }
5051#endif
5052
5053 wake_up_interruptible(&priv->wait_command_queue);
5054
5055 /* Keep the restart process from trying to send host
5056 * commands by clearing the INIT status bit */
5057 clear_bit(STATUS_READY, &priv->status);
5058
5059 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5060 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
5061 "Restarting adapter due to uCode error.\n");
5062
bb8c093b 5063 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5064 memcpy(&priv->recovery_rxon, &priv->active_rxon,
5065 sizeof(priv->recovery_rxon));
5066 priv->error_recovering = 1;
5067 }
5068 queue_work(priv->workqueue, &priv->restart);
5069 }
5070}
5071
bb8c093b 5072static void iwl4965_error_recovery(struct iwl4965_priv *priv)
b481de9c
ZY
5073{
5074 unsigned long flags;
5075
5076 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
5077 sizeof(priv->staging_rxon));
5078 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5079 iwl4965_commit_rxon(priv);
b481de9c 5080
bb8c093b 5081 iwl4965_rxon_add_station(priv, priv->bssid, 1);
b481de9c
ZY
5082
5083 spin_lock_irqsave(&priv->lock, flags);
5084 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
5085 priv->error_recovering = 0;
5086 spin_unlock_irqrestore(&priv->lock, flags);
5087}
5088
bb8c093b 5089static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
b481de9c
ZY
5090{
5091 u32 inta, handled = 0;
5092 u32 inta_fh;
5093 unsigned long flags;
c8b0e6e1 5094#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
5095 u32 inta_mask;
5096#endif
5097
5098 spin_lock_irqsave(&priv->lock, flags);
5099
5100 /* Ack/clear/reset pending uCode interrupts.
5101 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
5102 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
bb8c093b
CH
5103 inta = iwl4965_read32(priv, CSR_INT);
5104 iwl4965_write32(priv, CSR_INT, inta);
b481de9c
ZY
5105
5106 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
5107 * Any new interrupts that happen after this, either while we're
5108 * in this tasklet, or later, will show up in next ISR/tasklet. */
bb8c093b
CH
5109 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
5110 iwl4965_write32(priv, CSR_FH_INT_STATUS, inta_fh);
b481de9c 5111
c8b0e6e1 5112#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5113 if (iwl4965_debug_level & IWL_DL_ISR) {
9fbab516
BC
5114 /* just for debug */
5115 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
b481de9c
ZY
5116 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5117 inta, inta_mask, inta_fh);
5118 }
5119#endif
5120
5121 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
5122 * atomic, make sure that inta covers all the interrupts that
5123 * we've discovered, even if FH interrupt came in just after
5124 * reading CSR_INT. */
5125 if (inta_fh & CSR_FH_INT_RX_MASK)
5126 inta |= CSR_INT_BIT_FH_RX;
5127 if (inta_fh & CSR_FH_INT_TX_MASK)
5128 inta |= CSR_INT_BIT_FH_TX;
5129
5130 /* Now service all interrupt bits discovered above. */
5131 if (inta & CSR_INT_BIT_HW_ERR) {
5132 IWL_ERROR("Microcode HW error detected. Restarting.\n");
5133
5134 /* Tell the device to stop sending interrupts */
bb8c093b 5135 iwl4965_disable_interrupts(priv);
b481de9c 5136
bb8c093b 5137 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5138
5139 handled |= CSR_INT_BIT_HW_ERR;
5140
5141 spin_unlock_irqrestore(&priv->lock, flags);
5142
5143 return;
5144 }
5145
c8b0e6e1 5146#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 5147 if (iwl4965_debug_level & (IWL_DL_ISR)) {
b481de9c
ZY
5148 /* NIC fires this, but we don't use it, redundant with WAKEUP */
5149 if (inta & CSR_INT_BIT_MAC_CLK_ACTV)
5150 IWL_DEBUG_ISR("Microcode started or stopped.\n");
5151
5152 /* Alive notification via Rx interrupt will do the real work */
5153 if (inta & CSR_INT_BIT_ALIVE)
5154 IWL_DEBUG_ISR("Alive interrupt\n");
5155 }
5156#endif
5157 /* Safely ignore these bits for debug checks below */
5158 inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE);
5159
9fbab516 5160 /* HW RF KILL switch toggled */
b481de9c
ZY
5161 if (inta & CSR_INT_BIT_RF_KILL) {
5162 int hw_rf_kill = 0;
bb8c093b 5163 if (!(iwl4965_read32(priv, CSR_GP_CNTRL) &
b481de9c
ZY
5164 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5165 hw_rf_kill = 1;
5166
5167 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
5168 "RF_KILL bit toggled to %s.\n",
5169 hw_rf_kill ? "disable radio":"enable radio");
5170
5171 /* Queue restart only if RF_KILL switch was set to "kill"
5172 * when we loaded driver, and is now set to "enable".
5173 * After we're Alive, RF_KILL gets handled by
5174 * iwl_rx_card_state_notif() */
53e49093
ZY
5175 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) {
5176 clear_bit(STATUS_RF_KILL_HW, &priv->status);
b481de9c 5177 queue_work(priv->workqueue, &priv->restart);
53e49093 5178 }
b481de9c
ZY
5179
5180 handled |= CSR_INT_BIT_RF_KILL;
5181 }
5182
9fbab516 5183 /* Chip got too hot and stopped itself */
b481de9c
ZY
5184 if (inta & CSR_INT_BIT_CT_KILL) {
5185 IWL_ERROR("Microcode CT kill error detected.\n");
5186 handled |= CSR_INT_BIT_CT_KILL;
5187 }
5188
5189 /* Error detected by uCode */
5190 if (inta & CSR_INT_BIT_SW_ERR) {
5191 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
5192 inta);
bb8c093b 5193 iwl4965_irq_handle_error(priv);
b481de9c
ZY
5194 handled |= CSR_INT_BIT_SW_ERR;
5195 }
5196
5197 /* uCode wakes up after power-down sleep */
5198 if (inta & CSR_INT_BIT_WAKEUP) {
5199 IWL_DEBUG_ISR("Wakeup interrupt\n");
bb8c093b
CH
5200 iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq);
5201 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]);
5202 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]);
5203 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]);
5204 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[3]);
5205 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[4]);
5206 iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[5]);
b481de9c
ZY
5207
5208 handled |= CSR_INT_BIT_WAKEUP;
5209 }
5210
5211 /* All uCode command responses, including Tx command responses,
5212 * Rx "responses" (frame-received notification), and other
5213 * notifications from uCode come through here*/
5214 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
bb8c093b 5215 iwl4965_rx_handle(priv);
b481de9c
ZY
5216 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
5217 }
5218
5219 if (inta & CSR_INT_BIT_FH_TX) {
5220 IWL_DEBUG_ISR("Tx interrupt\n");
5221 handled |= CSR_INT_BIT_FH_TX;
5222 }
5223
5224 if (inta & ~handled)
5225 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
5226
5227 if (inta & ~CSR_INI_SET_MASK) {
5228 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
5229 inta & ~CSR_INI_SET_MASK);
5230 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh);
5231 }
5232
5233 /* Re-enable all interrupts */
bb8c093b 5234 iwl4965_enable_interrupts(priv);
b481de9c 5235
c8b0e6e1 5236#ifdef CONFIG_IWL4965_DEBUG
bb8c093b
CH
5237 if (iwl4965_debug_level & (IWL_DL_ISR)) {
5238 inta = iwl4965_read32(priv, CSR_INT);
5239 inta_mask = iwl4965_read32(priv, CSR_INT_MASK);
5240 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5241 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
5242 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
5243 }
5244#endif
5245 spin_unlock_irqrestore(&priv->lock, flags);
5246}
5247
bb8c093b 5248static irqreturn_t iwl4965_isr(int irq, void *data)
b481de9c 5249{
bb8c093b 5250 struct iwl4965_priv *priv = data;
b481de9c
ZY
5251 u32 inta, inta_mask;
5252 u32 inta_fh;
5253 if (!priv)
5254 return IRQ_NONE;
5255
5256 spin_lock(&priv->lock);
5257
5258 /* Disable (but don't clear!) interrupts here to avoid
5259 * back-to-back ISRs and sporadic interrupts from our NIC.
5260 * If we have something to service, the tasklet will re-enable ints.
5261 * If we *don't* have something, we'll re-enable before leaving here. */
bb8c093b
CH
5262 inta_mask = iwl4965_read32(priv, CSR_INT_MASK); /* just for debug */
5263 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
5264
5265 /* Discover which interrupts are active/pending */
bb8c093b
CH
5266 inta = iwl4965_read32(priv, CSR_INT);
5267 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
5268
5269 /* Ignore interrupt if there's nothing in NIC to service.
5270 * This may be due to IRQ shared with another device,
5271 * or due to sporadic interrupts thrown from our NIC. */
5272 if (!inta && !inta_fh) {
5273 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5274 goto none;
5275 }
5276
5277 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
66fbb541
ON
5278 /* Hardware disappeared. It might have already raised
5279 * an interrupt */
b481de9c 5280 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
66fbb541 5281 goto unplugged;
b481de9c
ZY
5282 }
5283
5284 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5285 inta, inta_mask, inta_fh);
5286
bb8c093b 5287 /* iwl4965_irq_tasklet() will service interrupts and re-enable them */
b481de9c 5288 tasklet_schedule(&priv->irq_tasklet);
b481de9c 5289
66fbb541
ON
5290 unplugged:
5291 spin_unlock(&priv->lock);
b481de9c
ZY
5292 return IRQ_HANDLED;
5293
5294 none:
5295 /* re-enable interrupts here since we don't have anything to service. */
bb8c093b 5296 iwl4965_enable_interrupts(priv);
b481de9c
ZY
5297 spin_unlock(&priv->lock);
5298 return IRQ_NONE;
5299}
5300
5301/************************** EEPROM BANDS ****************************
5302 *
bb8c093b 5303 * The iwl4965_eeprom_band definitions below provide the mapping from the
b481de9c
ZY
5304 * EEPROM contents to the specific channel number supported for each
5305 * band.
5306 *
bb8c093b 5307 * For example, iwl4965_priv->eeprom.band_3_channels[4] from the band_3
b481de9c
ZY
5308 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
5309 * The specific geography and calibration information for that channel
5310 * is contained in the eeprom map itself.
5311 *
5312 * During init, we copy the eeprom information and channel map
5313 * information into priv->channel_info_24/52 and priv->channel_map_24/52
5314 *
5315 * channel_map_24/52 provides the index in the channel_info array for a
5316 * given channel. We have to have two separate maps as there is channel
5317 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
5318 * band_2
5319 *
5320 * A value of 0xff stored in the channel_map indicates that the channel
5321 * is not supported by the hardware at all.
5322 *
5323 * A value of 0xfe in the channel_map indicates that the channel is not
5324 * valid for Tx with the current hardware. This means that
5325 * while the system can tune and receive on a given channel, it may not
5326 * be able to associate or transmit any frames on that
5327 * channel. There is no corresponding channel information for that
5328 * entry.
5329 *
5330 *********************************************************************/
5331
5332/* 2.4 GHz */
bb8c093b 5333static const u8 iwl4965_eeprom_band_1[14] = {
b481de9c
ZY
5334 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
5335};
5336
5337/* 5.2 GHz bands */
9fbab516 5338static const u8 iwl4965_eeprom_band_2[] = { /* 4915-5080MHz */
b481de9c
ZY
5339 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
5340};
5341
9fbab516 5342static const u8 iwl4965_eeprom_band_3[] = { /* 5170-5320MHz */
b481de9c
ZY
5343 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
5344};
5345
bb8c093b 5346static const u8 iwl4965_eeprom_band_4[] = { /* 5500-5700MHz */
b481de9c
ZY
5347 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
5348};
5349
bb8c093b 5350static const u8 iwl4965_eeprom_band_5[] = { /* 5725-5825MHz */
b481de9c
ZY
5351 145, 149, 153, 157, 161, 165
5352};
5353
bb8c093b 5354static u8 iwl4965_eeprom_band_6[] = { /* 2.4 FAT channel */
b481de9c
ZY
5355 1, 2, 3, 4, 5, 6, 7
5356};
5357
bb8c093b 5358static u8 iwl4965_eeprom_band_7[] = { /* 5.2 FAT channel */
b481de9c
ZY
5359 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
5360};
5361
9fbab516
BC
5362static void iwl4965_init_band_reference(const struct iwl4965_priv *priv,
5363 int band,
b481de9c 5364 int *eeprom_ch_count,
bb8c093b 5365 const struct iwl4965_eeprom_channel
b481de9c
ZY
5366 **eeprom_ch_info,
5367 const u8 **eeprom_ch_index)
5368{
5369 switch (band) {
5370 case 1: /* 2.4GHz band */
bb8c093b 5371 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_1);
b481de9c 5372 *eeprom_ch_info = priv->eeprom.band_1_channels;
bb8c093b 5373 *eeprom_ch_index = iwl4965_eeprom_band_1;
b481de9c 5374 break;
9fbab516 5375 case 2: /* 4.9GHz band */
bb8c093b 5376 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_2);
b481de9c 5377 *eeprom_ch_info = priv->eeprom.band_2_channels;
bb8c093b 5378 *eeprom_ch_index = iwl4965_eeprom_band_2;
b481de9c
ZY
5379 break;
5380 case 3: /* 5.2GHz band */
bb8c093b 5381 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_3);
b481de9c 5382 *eeprom_ch_info = priv->eeprom.band_3_channels;
bb8c093b 5383 *eeprom_ch_index = iwl4965_eeprom_band_3;
b481de9c 5384 break;
9fbab516 5385 case 4: /* 5.5GHz band */
bb8c093b 5386 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_4);
b481de9c 5387 *eeprom_ch_info = priv->eeprom.band_4_channels;
bb8c093b 5388 *eeprom_ch_index = iwl4965_eeprom_band_4;
b481de9c 5389 break;
9fbab516 5390 case 5: /* 5.7GHz band */
bb8c093b 5391 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c 5392 *eeprom_ch_info = priv->eeprom.band_5_channels;
bb8c093b 5393 *eeprom_ch_index = iwl4965_eeprom_band_5;
b481de9c 5394 break;
9fbab516 5395 case 6: /* 2.4GHz FAT channels */
bb8c093b 5396 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_6);
b481de9c 5397 *eeprom_ch_info = priv->eeprom.band_24_channels;
bb8c093b 5398 *eeprom_ch_index = iwl4965_eeprom_band_6;
b481de9c 5399 break;
9fbab516 5400 case 7: /* 5 GHz FAT channels */
bb8c093b 5401 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_7);
b481de9c 5402 *eeprom_ch_info = priv->eeprom.band_52_channels;
bb8c093b 5403 *eeprom_ch_index = iwl4965_eeprom_band_7;
b481de9c
ZY
5404 break;
5405 default:
5406 BUG();
5407 return;
5408 }
5409}
5410
6440adb5
CB
5411/**
5412 * iwl4965_get_channel_info - Find driver's private channel info
5413 *
5414 * Based on band and channel number.
5415 */
bb8c093b 5416const struct iwl4965_channel_info *iwl4965_get_channel_info(const struct iwl4965_priv *priv,
b481de9c
ZY
5417 int phymode, u16 channel)
5418{
5419 int i;
5420
5421 switch (phymode) {
5422 case MODE_IEEE80211A:
5423 for (i = 14; i < priv->channel_count; i++) {
5424 if (priv->channel_info[i].channel == channel)
5425 return &priv->channel_info[i];
5426 }
5427 break;
5428
5429 case MODE_IEEE80211B:
5430 case MODE_IEEE80211G:
5431 if (channel >= 1 && channel <= 14)
5432 return &priv->channel_info[channel - 1];
5433 break;
5434
5435 }
5436
5437 return NULL;
5438}
5439
5440#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
5441 ? # x " " : "")
5442
6440adb5
CB
5443/**
5444 * iwl4965_init_channel_map - Set up driver's info for all possible channels
5445 */
bb8c093b 5446static int iwl4965_init_channel_map(struct iwl4965_priv *priv)
b481de9c
ZY
5447{
5448 int eeprom_ch_count = 0;
5449 const u8 *eeprom_ch_index = NULL;
bb8c093b 5450 const struct iwl4965_eeprom_channel *eeprom_ch_info = NULL;
b481de9c 5451 int band, ch;
bb8c093b 5452 struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5453
5454 if (priv->channel_count) {
5455 IWL_DEBUG_INFO("Channel map already initialized.\n");
5456 return 0;
5457 }
5458
5459 if (priv->eeprom.version < 0x2f) {
5460 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5461 priv->eeprom.version);
5462 return -EINVAL;
5463 }
5464
5465 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5466
5467 priv->channel_count =
bb8c093b
CH
5468 ARRAY_SIZE(iwl4965_eeprom_band_1) +
5469 ARRAY_SIZE(iwl4965_eeprom_band_2) +
5470 ARRAY_SIZE(iwl4965_eeprom_band_3) +
5471 ARRAY_SIZE(iwl4965_eeprom_band_4) +
5472 ARRAY_SIZE(iwl4965_eeprom_band_5);
b481de9c
ZY
5473
5474 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5475
bb8c093b 5476 priv->channel_info = kzalloc(sizeof(struct iwl4965_channel_info) *
b481de9c
ZY
5477 priv->channel_count, GFP_KERNEL);
5478 if (!priv->channel_info) {
5479 IWL_ERROR("Could not allocate channel_info\n");
5480 priv->channel_count = 0;
5481 return -ENOMEM;
5482 }
5483
5484 ch_info = priv->channel_info;
5485
5486 /* Loop through the 5 EEPROM bands adding them in order to the
5487 * channel map we maintain (that contains additional information than
5488 * what just in the EEPROM) */
5489 for (band = 1; band <= 5; band++) {
5490
bb8c093b 5491 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5492 &eeprom_ch_info, &eeprom_ch_index);
5493
5494 /* Loop through each band adding each of the channels */
5495 for (ch = 0; ch < eeprom_ch_count; ch++) {
5496 ch_info->channel = eeprom_ch_index[ch];
5497 ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5498 MODE_IEEE80211A;
5499
5500 /* permanently store EEPROM's channel regulatory flags
5501 * and max power in channel info database. */
5502 ch_info->eeprom = eeprom_ch_info[ch];
5503
5504 /* Copy the run-time flags so they are there even on
5505 * invalid channels */
5506 ch_info->flags = eeprom_ch_info[ch].flags;
5507
5508 if (!(is_channel_valid(ch_info))) {
5509 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5510 "No traffic\n",
5511 ch_info->channel,
5512 ch_info->flags,
5513 is_channel_a_band(ch_info) ?
5514 "5.2" : "2.4");
5515 ch_info++;
5516 continue;
5517 }
5518
5519 /* Initialize regulatory-based run-time data */
5520 ch_info->max_power_avg = ch_info->curr_txpow =
5521 eeprom_ch_info[ch].max_power_avg;
5522 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5523 ch_info->min_power = 0;
5524
5525 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5526 " %ddBm): Ad-Hoc %ssupported\n",
5527 ch_info->channel,
5528 is_channel_a_band(ch_info) ?
5529 "5.2" : "2.4",
5530 CHECK_AND_PRINT(IBSS),
5531 CHECK_AND_PRINT(ACTIVE),
5532 CHECK_AND_PRINT(RADAR),
5533 CHECK_AND_PRINT(WIDE),
5534 CHECK_AND_PRINT(NARROW),
5535 CHECK_AND_PRINT(DFS),
5536 eeprom_ch_info[ch].flags,
5537 eeprom_ch_info[ch].max_power_avg,
5538 ((eeprom_ch_info[ch].
5539 flags & EEPROM_CHANNEL_IBSS)
5540 && !(eeprom_ch_info[ch].
5541 flags & EEPROM_CHANNEL_RADAR))
5542 ? "" : "not ");
5543
5544 /* Set the user_txpower_limit to the highest power
5545 * supported by any channel */
5546 if (eeprom_ch_info[ch].max_power_avg >
5547 priv->user_txpower_limit)
5548 priv->user_txpower_limit =
5549 eeprom_ch_info[ch].max_power_avg;
5550
5551 ch_info++;
5552 }
5553 }
5554
6440adb5 5555 /* Two additional EEPROM bands for 2.4 and 5 GHz FAT channels */
b481de9c
ZY
5556 for (band = 6; band <= 7; band++) {
5557 int phymode;
5558 u8 fat_extension_chan;
5559
bb8c093b 5560 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
5561 &eeprom_ch_info, &eeprom_ch_index);
5562
6440adb5 5563 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
b481de9c 5564 phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A;
6440adb5 5565
b481de9c
ZY
5566 /* Loop through each band adding each of the channels */
5567 for (ch = 0; ch < eeprom_ch_count; ch++) {
5568
5569 if ((band == 6) &&
5570 ((eeprom_ch_index[ch] == 5) ||
5571 (eeprom_ch_index[ch] == 6) ||
5572 (eeprom_ch_index[ch] == 7)))
5573 fat_extension_chan = HT_IE_EXT_CHANNEL_MAX;
5574 else
5575 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
5576
6440adb5 5577 /* Set up driver's info for lower half */
b481de9c
ZY
5578 iwl4965_set_fat_chan_info(priv, phymode,
5579 eeprom_ch_index[ch],
5580 &(eeprom_ch_info[ch]),
5581 fat_extension_chan);
5582
6440adb5 5583 /* Set up driver's info for upper half */
b481de9c
ZY
5584 iwl4965_set_fat_chan_info(priv, phymode,
5585 (eeprom_ch_index[ch] + 4),
5586 &(eeprom_ch_info[ch]),
5587 HT_IE_EXT_CHANNEL_BELOW);
5588 }
5589 }
5590
5591 return 0;
5592}
5593
5594/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5595 * sending probe req. This should be set long enough to hear probe responses
5596 * from more than one AP. */
5597#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */
5598#define IWL_ACTIVE_DWELL_TIME_52 (10)
5599
5600/* For faster active scanning, scan will move to the next channel if fewer than
5601 * PLCP_QUIET_THRESH packets are heard on this channel within
5602 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
5603 * time if it's a quiet channel (nothing responded to our probe, and there's
5604 * no other traffic).
5605 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
5606#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
5607#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */
5608
5609/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
5610 * Must be set longer than active dwell time.
5611 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
5612#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
5613#define IWL_PASSIVE_DWELL_TIME_52 (10)
5614#define IWL_PASSIVE_DWELL_BASE (100)
5615#define IWL_CHANNEL_TUNE_TIME 5
5616
bb8c093b 5617static inline u16 iwl4965_get_active_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c
ZY
5618{
5619 if (phymode == MODE_IEEE80211A)
5620 return IWL_ACTIVE_DWELL_TIME_52;
5621 else
5622 return IWL_ACTIVE_DWELL_TIME_24;
5623}
5624
bb8c093b 5625static u16 iwl4965_get_passive_dwell_time(struct iwl4965_priv *priv, int phymode)
b481de9c 5626{
bb8c093b 5627 u16 active = iwl4965_get_active_dwell_time(priv, phymode);
b481de9c
ZY
5628 u16 passive = (phymode != MODE_IEEE80211A) ?
5629 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5630 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5631
bb8c093b 5632 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5633 /* If we're associated, we clamp the maximum passive
5634 * dwell time to be 98% of the beacon interval (minus
5635 * 2 * channel tune time) */
5636 passive = priv->beacon_int;
5637 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
5638 passive = IWL_PASSIVE_DWELL_BASE;
5639 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
5640 }
5641
5642 if (passive <= active)
5643 passive = active + 1;
5644
5645 return passive;
5646}
5647
bb8c093b 5648static int iwl4965_get_channels_for_scan(struct iwl4965_priv *priv, int phymode,
b481de9c 5649 u8 is_active, u8 direct_mask,
bb8c093b 5650 struct iwl4965_scan_channel *scan_ch)
b481de9c
ZY
5651{
5652 const struct ieee80211_channel *channels = NULL;
5653 const struct ieee80211_hw_mode *hw_mode;
bb8c093b 5654 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
5655 u16 passive_dwell = 0;
5656 u16 active_dwell = 0;
5657 int added, i;
5658
bb8c093b 5659 hw_mode = iwl4965_get_hw_mode(priv, phymode);
b481de9c
ZY
5660 if (!hw_mode)
5661 return 0;
5662
5663 channels = hw_mode->channels;
5664
bb8c093b
CH
5665 active_dwell = iwl4965_get_active_dwell_time(priv, phymode);
5666 passive_dwell = iwl4965_get_passive_dwell_time(priv, phymode);
b481de9c
ZY
5667
5668 for (i = 0, added = 0; i < hw_mode->num_channels; i++) {
5669 if (channels[i].chan ==
5670 le16_to_cpu(priv->active_rxon.channel)) {
bb8c093b 5671 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
5672 IWL_DEBUG_SCAN
5673 ("Skipping current channel %d\n",
5674 le16_to_cpu(priv->active_rxon.channel));
5675 continue;
5676 }
5677 } else if (priv->only_active_channel)
5678 continue;
5679
5680 scan_ch->channel = channels[i].chan;
5681
9fbab516
BC
5682 ch_info = iwl4965_get_channel_info(priv, phymode,
5683 scan_ch->channel);
b481de9c
ZY
5684 if (!is_channel_valid(ch_info)) {
5685 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5686 scan_ch->channel);
5687 continue;
5688 }
5689
5690 if (!is_active || is_channel_passive(ch_info) ||
5691 !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN))
5692 scan_ch->type = 0; /* passive */
5693 else
5694 scan_ch->type = 1; /* active */
5695
5696 if (scan_ch->type & 1)
5697 scan_ch->type |= (direct_mask << 1);
5698
5699 if (is_channel_narrow(ch_info))
5700 scan_ch->type |= (1 << 7);
5701
5702 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5703 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5704
9fbab516 5705 /* Set txpower levels to defaults */
b481de9c
ZY
5706 scan_ch->tpc.dsp_atten = 110;
5707 /* scan_pwr_info->tpc.dsp_atten; */
5708
5709 /*scan_pwr_info->tpc.tx_gain; */
5710 if (phymode == MODE_IEEE80211A)
5711 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5712 else {
5713 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
5714 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
9fbab516
BC
5715 * power level:
5716 * scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3;
b481de9c
ZY
5717 */
5718 }
5719
5720 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
5721 scan_ch->channel,
5722 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
5723 (scan_ch->type & 1) ?
5724 active_dwell : passive_dwell);
5725
5726 scan_ch++;
5727 added++;
5728 }
5729
5730 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
5731 return added;
5732}
5733
bb8c093b 5734static void iwl4965_reset_channel_flag(struct iwl4965_priv *priv)
b481de9c
ZY
5735{
5736 int i, j;
5737 for (i = 0; i < 3; i++) {
5738 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5739 for (j = 0; j < hw_mode->num_channels; j++)
5740 hw_mode->channels[j].flag = hw_mode->channels[j].val;
5741 }
5742}
5743
bb8c093b 5744static void iwl4965_init_hw_rates(struct iwl4965_priv *priv,
b481de9c
ZY
5745 struct ieee80211_rate *rates)
5746{
5747 int i;
5748
5749 for (i = 0; i < IWL_RATE_COUNT; i++) {
bb8c093b 5750 rates[i].rate = iwl4965_rates[i].ieee * 5;
b481de9c
ZY
5751 rates[i].val = i; /* Rate scaling will work on indexes */
5752 rates[i].val2 = i;
5753 rates[i].flags = IEEE80211_RATE_SUPPORTED;
5754 /* Only OFDM have the bits-per-symbol set */
5755 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5756 rates[i].flags |= IEEE80211_RATE_OFDM;
5757 else {
5758 /*
5759 * If CCK 1M then set rate flag to CCK else CCK_2
5760 * which is CCK | PREAMBLE2
5761 */
bb8c093b 5762 rates[i].flags |= (iwl4965_rates[i].plcp == 10) ?
b481de9c
ZY
5763 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2;
5764 }
5765
5766 /* Set up which ones are basic rates... */
5767 if (IWL_BASIC_RATES_MASK & (1 << i))
5768 rates[i].flags |= IEEE80211_RATE_BASIC;
5769 }
b481de9c
ZY
5770}
5771
5772/**
bb8c093b 5773 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom
b481de9c 5774 */
bb8c093b 5775static int iwl4965_init_geos(struct iwl4965_priv *priv)
b481de9c 5776{
bb8c093b 5777 struct iwl4965_channel_info *ch;
b481de9c
ZY
5778 struct ieee80211_hw_mode *modes;
5779 struct ieee80211_channel *channels;
5780 struct ieee80211_channel *geo_ch;
5781 struct ieee80211_rate *rates;
5782 int i = 0;
5783 enum {
5784 A = 0,
5785 B = 1,
5786 G = 2,
5787 A_11N = 3,
5788 G_11N = 4,
5789 };
5790 int mode_count = 5;
5791
5792 if (priv->modes) {
5793 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5794 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5795 return 0;
5796 }
5797
5798 modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5799 GFP_KERNEL);
5800 if (!modes)
5801 return -ENOMEM;
5802
5803 channels = kzalloc(sizeof(struct ieee80211_channel) *
5804 priv->channel_count, GFP_KERNEL);
5805 if (!channels) {
5806 kfree(modes);
5807 return -ENOMEM;
5808 }
5809
5810 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)),
5811 GFP_KERNEL);
5812 if (!rates) {
5813 kfree(modes);
5814 kfree(channels);
5815 return -ENOMEM;
5816 }
5817
5818 /* 0 = 802.11a
5819 * 1 = 802.11b
5820 * 2 = 802.11g
5821 */
5822
5823 /* 5.2GHz channels start after the 2.4GHz channels */
5824 modes[A].mode = MODE_IEEE80211A;
bb8c093b 5825 modes[A].channels = &channels[ARRAY_SIZE(iwl4965_eeprom_band_1)];
b481de9c
ZY
5826 modes[A].rates = rates;
5827 modes[A].num_rates = 8; /* just OFDM */
5828 modes[A].rates = &rates[4];
5829 modes[A].num_channels = 0;
5830
5831 modes[B].mode = MODE_IEEE80211B;
5832 modes[B].channels = channels;
5833 modes[B].rates = rates;
5834 modes[B].num_rates = 4; /* just CCK */
5835 modes[B].num_channels = 0;
5836
5837 modes[G].mode = MODE_IEEE80211G;
5838 modes[G].channels = channels;
5839 modes[G].rates = rates;
5840 modes[G].num_rates = 12; /* OFDM & CCK */
5841 modes[G].num_channels = 0;
5842
5843 modes[G_11N].mode = MODE_IEEE80211G;
5844 modes[G_11N].channels = channels;
5845 modes[G_11N].num_rates = 13; /* OFDM & CCK */
5846 modes[G_11N].rates = rates;
5847 modes[G_11N].num_channels = 0;
5848
5849 modes[A_11N].mode = MODE_IEEE80211A;
bb8c093b 5850 modes[A_11N].channels = &channels[ARRAY_SIZE(iwl4965_eeprom_band_1)];
b481de9c
ZY
5851 modes[A_11N].rates = &rates[4];
5852 modes[A_11N].num_rates = 9; /* just OFDM */
5853 modes[A_11N].num_channels = 0;
5854
5855 priv->ieee_channels = channels;
5856 priv->ieee_rates = rates;
5857
bb8c093b 5858 iwl4965_init_hw_rates(priv, rates);
b481de9c
ZY
5859
5860 for (i = 0, geo_ch = channels; i < priv->channel_count; i++) {
5861 ch = &priv->channel_info[i];
5862
5863 if (!is_channel_valid(ch)) {
5864 IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- "
5865 "skipping.\n",
5866 ch->channel, is_channel_a_band(ch) ?
5867 "5.2" : "2.4");
5868 continue;
5869 }
5870
5871 if (is_channel_a_band(ch)) {
5872 geo_ch = &modes[A].channels[modes[A].num_channels++];
5873 modes[A_11N].num_channels++;
5874 } else {
5875 geo_ch = &modes[B].channels[modes[B].num_channels++];
5876 modes[G].num_channels++;
5877 modes[G_11N].num_channels++;
5878 }
5879
5880 geo_ch->freq = ieee80211chan2mhz(ch->channel);
5881 geo_ch->chan = ch->channel;
5882 geo_ch->power_level = ch->max_power_avg;
5883 geo_ch->antenna_max = 0xff;
5884
5885 if (is_channel_valid(ch)) {
5886 geo_ch->flag = IEEE80211_CHAN_W_SCAN;
5887 if (ch->flags & EEPROM_CHANNEL_IBSS)
5888 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5889
5890 if (ch->flags & EEPROM_CHANNEL_ACTIVE)
5891 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN;
5892
5893 if (ch->flags & EEPROM_CHANNEL_RADAR)
5894 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT;
5895
5896 if (ch->max_power_avg > priv->max_channel_txpower_limit)
5897 priv->max_channel_txpower_limit =
5898 ch->max_power_avg;
5899 }
5900
5901 geo_ch->val = geo_ch->flag;
5902 }
5903
5904 if ((modes[A].num_channels == 0) && priv->is_abg) {
5905 printk(KERN_INFO DRV_NAME
5906 ": Incorrectly detected BG card as ABG. Please send "
5907 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5908 priv->pci_dev->device, priv->pci_dev->subsystem_device);
5909 priv->is_abg = 0;
5910 }
5911
5912 printk(KERN_INFO DRV_NAME
5913 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5914 modes[G].num_channels, modes[A].num_channels);
5915
5916 /*
5917 * NOTE: We register these in preference of order -- the
5918 * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5919 * a phymode based on rates or AP capabilities but seems to
5920 * configure it purely on if the channel being configured
5921 * is supported by a mode -- and the first match is taken
5922 */
5923
5924 if (modes[G].num_channels)
5925 ieee80211_register_hwmode(priv->hw, &modes[G]);
5926 if (modes[B].num_channels)
5927 ieee80211_register_hwmode(priv->hw, &modes[B]);
5928 if (modes[A].num_channels)
5929 ieee80211_register_hwmode(priv->hw, &modes[A]);
5930
5931 priv->modes = modes;
5932 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5933
5934 return 0;
5935}
5936
5937/******************************************************************************
5938 *
5939 * uCode download functions
5940 *
5941 ******************************************************************************/
5942
bb8c093b 5943static void iwl4965_dealloc_ucode_pci(struct iwl4965_priv *priv)
b481de9c
ZY
5944{
5945 if (priv->ucode_code.v_addr != NULL) {
5946 pci_free_consistent(priv->pci_dev,
5947 priv->ucode_code.len,
5948 priv->ucode_code.v_addr,
5949 priv->ucode_code.p_addr);
5950 priv->ucode_code.v_addr = NULL;
5951 }
5952 if (priv->ucode_data.v_addr != NULL) {
5953 pci_free_consistent(priv->pci_dev,
5954 priv->ucode_data.len,
5955 priv->ucode_data.v_addr,
5956 priv->ucode_data.p_addr);
5957 priv->ucode_data.v_addr = NULL;
5958 }
5959 if (priv->ucode_data_backup.v_addr != NULL) {
5960 pci_free_consistent(priv->pci_dev,
5961 priv->ucode_data_backup.len,
5962 priv->ucode_data_backup.v_addr,
5963 priv->ucode_data_backup.p_addr);
5964 priv->ucode_data_backup.v_addr = NULL;
5965 }
5966 if (priv->ucode_init.v_addr != NULL) {
5967 pci_free_consistent(priv->pci_dev,
5968 priv->ucode_init.len,
5969 priv->ucode_init.v_addr,
5970 priv->ucode_init.p_addr);
5971 priv->ucode_init.v_addr = NULL;
5972 }
5973 if (priv->ucode_init_data.v_addr != NULL) {
5974 pci_free_consistent(priv->pci_dev,
5975 priv->ucode_init_data.len,
5976 priv->ucode_init_data.v_addr,
5977 priv->ucode_init_data.p_addr);
5978 priv->ucode_init_data.v_addr = NULL;
5979 }
5980 if (priv->ucode_boot.v_addr != NULL) {
5981 pci_free_consistent(priv->pci_dev,
5982 priv->ucode_boot.len,
5983 priv->ucode_boot.v_addr,
5984 priv->ucode_boot.p_addr);
5985 priv->ucode_boot.v_addr = NULL;
5986 }
5987}
5988
5989/**
bb8c093b 5990 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
b481de9c
ZY
5991 * looking at all data.
5992 */
9fbab516
BC
5993static int iwl4965_verify_inst_full(struct iwl4965_priv *priv, __le32 * image,
5994 u32 len)
b481de9c
ZY
5995{
5996 u32 val;
5997 u32 save_len = len;
5998 int rc = 0;
5999 u32 errcnt;
6000
6001 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
6002
bb8c093b 6003 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6004 if (rc)
6005 return rc;
6006
bb8c093b 6007 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
b481de9c
ZY
6008
6009 errcnt = 0;
6010 for (; len > 0; len -= sizeof(u32), image++) {
6011 /* read data comes through single port, auto-incr addr */
6012 /* NOTE: Use the debugless read so we don't flood kernel log
6013 * if IWL_DL_IO is set */
bb8c093b 6014 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
6015 if (val != le32_to_cpu(*image)) {
6016 IWL_ERROR("uCode INST section is invalid at "
6017 "offset 0x%x, is 0x%x, s/b 0x%x\n",
6018 save_len - len, val, le32_to_cpu(*image));
6019 rc = -EIO;
6020 errcnt++;
6021 if (errcnt >= 20)
6022 break;
6023 }
6024 }
6025
bb8c093b 6026 iwl4965_release_nic_access(priv);
b481de9c
ZY
6027
6028 if (!errcnt)
6029 IWL_DEBUG_INFO
6030 ("ucode image in INSTRUCTION memory is good\n");
6031
6032 return rc;
6033}
6034
6035
6036/**
bb8c093b 6037 * iwl4965_verify_inst_sparse - verify runtime uCode image in card vs. host,
b481de9c
ZY
6038 * using sample data 100 bytes apart. If these sample points are good,
6039 * it's a pretty good bet that everything between them is good, too.
6040 */
bb8c093b 6041static int iwl4965_verify_inst_sparse(struct iwl4965_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
6042{
6043 u32 val;
6044 int rc = 0;
6045 u32 errcnt = 0;
6046 u32 i;
6047
6048 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
6049
bb8c093b 6050 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6051 if (rc)
6052 return rc;
6053
6054 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
6055 /* read data comes through single port, auto-incr addr */
6056 /* NOTE: Use the debugless read so we don't flood kernel log
6057 * if IWL_DL_IO is set */
bb8c093b 6058 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR,
b481de9c 6059 i + RTC_INST_LOWER_BOUND);
bb8c093b 6060 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
6061 if (val != le32_to_cpu(*image)) {
6062#if 0 /* Enable this if you want to see details */
6063 IWL_ERROR("uCode INST section is invalid at "
6064 "offset 0x%x, is 0x%x, s/b 0x%x\n",
6065 i, val, *image);
6066#endif
6067 rc = -EIO;
6068 errcnt++;
6069 if (errcnt >= 3)
6070 break;
6071 }
6072 }
6073
bb8c093b 6074 iwl4965_release_nic_access(priv);
b481de9c
ZY
6075
6076 return rc;
6077}
6078
6079
6080/**
bb8c093b 6081 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
b481de9c
ZY
6082 * and verify its contents
6083 */
bb8c093b 6084static int iwl4965_verify_ucode(struct iwl4965_priv *priv)
b481de9c
ZY
6085{
6086 __le32 *image;
6087 u32 len;
6088 int rc = 0;
6089
6090 /* Try bootstrap */
6091 image = (__le32 *)priv->ucode_boot.v_addr;
6092 len = priv->ucode_boot.len;
bb8c093b 6093 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6094 if (rc == 0) {
6095 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
6096 return 0;
6097 }
6098
6099 /* Try initialize */
6100 image = (__le32 *)priv->ucode_init.v_addr;
6101 len = priv->ucode_init.len;
bb8c093b 6102 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6103 if (rc == 0) {
6104 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
6105 return 0;
6106 }
6107
6108 /* Try runtime/protocol */
6109 image = (__le32 *)priv->ucode_code.v_addr;
6110 len = priv->ucode_code.len;
bb8c093b 6111 rc = iwl4965_verify_inst_sparse(priv, image, len);
b481de9c
ZY
6112 if (rc == 0) {
6113 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
6114 return 0;
6115 }
6116
6117 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
6118
9fbab516
BC
6119 /* Since nothing seems to match, show first several data entries in
6120 * instruction SRAM, so maybe visual inspection will give a clue.
6121 * Selection of bootstrap image (vs. other images) is arbitrary. */
b481de9c
ZY
6122 image = (__le32 *)priv->ucode_boot.v_addr;
6123 len = priv->ucode_boot.len;
bb8c093b 6124 rc = iwl4965_verify_inst_full(priv, image, len);
b481de9c
ZY
6125
6126 return rc;
6127}
6128
6129
6130/* check contents of special bootstrap uCode SRAM */
bb8c093b 6131static int iwl4965_verify_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6132{
6133 __le32 *image = priv->ucode_boot.v_addr;
6134 u32 len = priv->ucode_boot.len;
6135 u32 reg;
6136 u32 val;
6137
6138 IWL_DEBUG_INFO("Begin verify bsm\n");
6139
6140 /* verify BSM SRAM contents */
bb8c093b 6141 val = iwl4965_read_prph(priv, BSM_WR_DWCOUNT_REG);
b481de9c
ZY
6142 for (reg = BSM_SRAM_LOWER_BOUND;
6143 reg < BSM_SRAM_LOWER_BOUND + len;
6144 reg += sizeof(u32), image ++) {
bb8c093b 6145 val = iwl4965_read_prph(priv, reg);
b481de9c
ZY
6146 if (val != le32_to_cpu(*image)) {
6147 IWL_ERROR("BSM uCode verification failed at "
6148 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
6149 BSM_SRAM_LOWER_BOUND,
6150 reg - BSM_SRAM_LOWER_BOUND, len,
6151 val, le32_to_cpu(*image));
6152 return -EIO;
6153 }
6154 }
6155
6156 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
6157
6158 return 0;
6159}
6160
6161/**
bb8c093b 6162 * iwl4965_load_bsm - Load bootstrap instructions
b481de9c
ZY
6163 *
6164 * BSM operation:
6165 *
6166 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
6167 * in special SRAM that does not power down during RFKILL. When powering back
6168 * up after power-saving sleeps (or during initial uCode load), the BSM loads
6169 * the bootstrap program into the on-board processor, and starts it.
6170 *
6171 * The bootstrap program loads (via DMA) instructions and data for a new
6172 * program from host DRAM locations indicated by the host driver in the
6173 * BSM_DRAM_* registers. Once the new program is loaded, it starts
6174 * automatically.
6175 *
6176 * When initializing the NIC, the host driver points the BSM to the
6177 * "initialize" uCode image. This uCode sets up some internal data, then
6178 * notifies host via "initialize alive" that it is complete.
6179 *
6180 * The host then replaces the BSM_DRAM_* pointer values to point to the
6181 * normal runtime uCode instructions and a backup uCode data cache buffer
6182 * (filled initially with starting data values for the on-board processor),
6183 * then triggers the "initialize" uCode to load and launch the runtime uCode,
6184 * which begins normal operation.
6185 *
6186 * When doing a power-save shutdown, runtime uCode saves data SRAM into
6187 * the backup data cache in DRAM before SRAM is powered down.
6188 *
6189 * When powering back up, the BSM loads the bootstrap program. This reloads
6190 * the runtime uCode instructions and the backup data cache into SRAM,
6191 * and re-launches the runtime uCode from where it left off.
6192 */
bb8c093b 6193static int iwl4965_load_bsm(struct iwl4965_priv *priv)
b481de9c
ZY
6194{
6195 __le32 *image = priv->ucode_boot.v_addr;
6196 u32 len = priv->ucode_boot.len;
6197 dma_addr_t pinst;
6198 dma_addr_t pdata;
6199 u32 inst_len;
6200 u32 data_len;
6201 int rc;
6202 int i;
6203 u32 done;
6204 u32 reg_offset;
6205
6206 IWL_DEBUG_INFO("Begin load bsm\n");
6207
6208 /* make sure bootstrap program is no larger than BSM's SRAM size */
6209 if (len > IWL_MAX_BSM_SIZE)
6210 return -EINVAL;
6211
6212 /* Tell bootstrap uCode where to find the "Initialize" uCode
9fbab516 6213 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
bb8c093b 6214 * NOTE: iwl4965_initialize_alive_start() will replace these values,
b481de9c
ZY
6215 * after the "initialize" uCode has run, to point to
6216 * runtime/protocol instructions and backup data cache. */
6217 pinst = priv->ucode_init.p_addr >> 4;
6218 pdata = priv->ucode_init_data.p_addr >> 4;
6219 inst_len = priv->ucode_init.len;
6220 data_len = priv->ucode_init_data.len;
6221
bb8c093b 6222 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6223 if (rc)
6224 return rc;
6225
bb8c093b
CH
6226 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6227 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6228 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
6229 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
b481de9c
ZY
6230
6231 /* Fill BSM memory with bootstrap instructions */
6232 for (reg_offset = BSM_SRAM_LOWER_BOUND;
6233 reg_offset < BSM_SRAM_LOWER_BOUND + len;
6234 reg_offset += sizeof(u32), image++)
bb8c093b 6235 _iwl4965_write_prph(priv, reg_offset,
b481de9c
ZY
6236 le32_to_cpu(*image));
6237
bb8c093b 6238 rc = iwl4965_verify_bsm(priv);
b481de9c 6239 if (rc) {
bb8c093b 6240 iwl4965_release_nic_access(priv);
b481de9c
ZY
6241 return rc;
6242 }
6243
6244 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
bb8c093b
CH
6245 iwl4965_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
6246 iwl4965_write_prph(priv, BSM_WR_MEM_DST_REG,
b481de9c 6247 RTC_INST_LOWER_BOUND);
bb8c093b 6248 iwl4965_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
b481de9c
ZY
6249
6250 /* Load bootstrap code into instruction SRAM now,
6251 * to prepare to load "initialize" uCode */
bb8c093b 6252 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6253 BSM_WR_CTRL_REG_BIT_START);
6254
6255 /* Wait for load of bootstrap uCode to finish */
6256 for (i = 0; i < 100; i++) {
bb8c093b 6257 done = iwl4965_read_prph(priv, BSM_WR_CTRL_REG);
b481de9c
ZY
6258 if (!(done & BSM_WR_CTRL_REG_BIT_START))
6259 break;
6260 udelay(10);
6261 }
6262 if (i < 100)
6263 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
6264 else {
6265 IWL_ERROR("BSM write did not complete!\n");
6266 return -EIO;
6267 }
6268
6269 /* Enable future boot loads whenever power management unit triggers it
6270 * (e.g. when powering back up after power-save shutdown) */
bb8c093b 6271 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
b481de9c
ZY
6272 BSM_WR_CTRL_REG_BIT_START_EN);
6273
bb8c093b 6274 iwl4965_release_nic_access(priv);
b481de9c
ZY
6275
6276 return 0;
6277}
6278
bb8c093b 6279static void iwl4965_nic_start(struct iwl4965_priv *priv)
b481de9c
ZY
6280{
6281 /* Remove all resets to allow NIC to operate */
bb8c093b 6282 iwl4965_write32(priv, CSR_RESET, 0);
b481de9c
ZY
6283}
6284
90e759d1
TW
6285static int iwl4965_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
6286{
6287 desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
6288 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
6289}
6290
b481de9c 6291/**
bb8c093b 6292 * iwl4965_read_ucode - Read uCode images from disk file.
b481de9c
ZY
6293 *
6294 * Copy into buffers for card to fetch via bus-mastering
6295 */
bb8c093b 6296static int iwl4965_read_ucode(struct iwl4965_priv *priv)
b481de9c 6297{
bb8c093b 6298 struct iwl4965_ucode *ucode;
90e759d1 6299 int ret;
b481de9c
ZY
6300 const struct firmware *ucode_raw;
6301 const char *name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode";
6302 u8 *src;
6303 size_t len;
6304 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
6305
6306 /* Ask kernel firmware_class module to get the boot firmware off disk.
6307 * request_firmware() is synchronous, file is in memory on return. */
90e759d1
TW
6308 ret = request_firmware(&ucode_raw, name, &priv->pci_dev->dev);
6309 if (ret < 0) {
6310 IWL_ERROR("%s firmware file req failed: Reason %d\n",
6311 name, ret);
b481de9c
ZY
6312 goto error;
6313 }
6314
6315 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
6316 name, ucode_raw->size);
6317
6318 /* Make sure that we got at least our header! */
6319 if (ucode_raw->size < sizeof(*ucode)) {
6320 IWL_ERROR("File size way too small!\n");
90e759d1 6321 ret = -EINVAL;
b481de9c
ZY
6322 goto err_release;
6323 }
6324
6325 /* Data from ucode file: header followed by uCode images */
6326 ucode = (void *)ucode_raw->data;
6327
6328 ver = le32_to_cpu(ucode->ver);
6329 inst_size = le32_to_cpu(ucode->inst_size);
6330 data_size = le32_to_cpu(ucode->data_size);
6331 init_size = le32_to_cpu(ucode->init_size);
6332 init_data_size = le32_to_cpu(ucode->init_data_size);
6333 boot_size = le32_to_cpu(ucode->boot_size);
6334
6335 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver);
6336 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
6337 inst_size);
6338 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
6339 data_size);
6340 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n",
6341 init_size);
6342 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n",
6343 init_data_size);
6344 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n",
6345 boot_size);
6346
6347 /* Verify size of file vs. image size info in file's header */
6348 if (ucode_raw->size < sizeof(*ucode) +
6349 inst_size + data_size + init_size +
6350 init_data_size + boot_size) {
6351
6352 IWL_DEBUG_INFO("uCode file size %d too small\n",
6353 (int)ucode_raw->size);
90e759d1 6354 ret = -EINVAL;
b481de9c
ZY
6355 goto err_release;
6356 }
6357
6358 /* Verify that uCode images will fit in card's SRAM */
6359 if (inst_size > IWL_MAX_INST_SIZE) {
90e759d1
TW
6360 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
6361 inst_size);
6362 ret = -EINVAL;
b481de9c
ZY
6363 goto err_release;
6364 }
6365
6366 if (data_size > IWL_MAX_DATA_SIZE) {
90e759d1
TW
6367 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
6368 data_size);
6369 ret = -EINVAL;
b481de9c
ZY
6370 goto err_release;
6371 }
6372 if (init_size > IWL_MAX_INST_SIZE) {
6373 IWL_DEBUG_INFO
90e759d1
TW
6374 ("uCode init instr len %d too large to fit in\n",
6375 init_size);
6376 ret = -EINVAL;
b481de9c
ZY
6377 goto err_release;
6378 }
6379 if (init_data_size > IWL_MAX_DATA_SIZE) {
6380 IWL_DEBUG_INFO
90e759d1
TW
6381 ("uCode init data len %d too large to fit in\n",
6382 init_data_size);
6383 ret = -EINVAL;
b481de9c
ZY
6384 goto err_release;
6385 }
6386 if (boot_size > IWL_MAX_BSM_SIZE) {
6387 IWL_DEBUG_INFO
90e759d1
TW
6388 ("uCode boot instr len %d too large to fit in\n",
6389 boot_size);
6390 ret = -EINVAL;
b481de9c
ZY
6391 goto err_release;
6392 }
6393
6394 /* Allocate ucode buffers for card's bus-master loading ... */
6395
6396 /* Runtime instructions and 2 copies of data:
6397 * 1) unmodified from disk
6398 * 2) backup cache for save/restore during power-downs */
6399 priv->ucode_code.len = inst_size;
90e759d1 6400 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
b481de9c
ZY
6401
6402 priv->ucode_data.len = data_size;
90e759d1 6403 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
b481de9c
ZY
6404
6405 priv->ucode_data_backup.len = data_size;
90e759d1 6406 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
b481de9c
ZY
6407
6408 /* Initialization instructions and data */
90e759d1
TW
6409 if (init_size && init_data_size) {
6410 priv->ucode_init.len = init_size;
6411 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
6412
6413 priv->ucode_init_data.len = init_data_size;
6414 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
6415
6416 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
6417 goto err_pci_alloc;
6418 }
b481de9c
ZY
6419
6420 /* Bootstrap (instructions only, no data) */
90e759d1
TW
6421 if (boot_size) {
6422 priv->ucode_boot.len = boot_size;
6423 iwl4965_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c 6424
90e759d1
TW
6425 if (!priv->ucode_boot.v_addr)
6426 goto err_pci_alloc;
6427 }
b481de9c
ZY
6428
6429 /* Copy images into buffers for card's bus-master reads ... */
6430
6431 /* Runtime instructions (first block of data in file) */
6432 src = &ucode->data[0];
6433 len = priv->ucode_code.len;
90e759d1 6434 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
b481de9c
ZY
6435 memcpy(priv->ucode_code.v_addr, src, len);
6436 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
6437 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
6438
6439 /* Runtime data (2nd block)
bb8c093b 6440 * NOTE: Copy into backup buffer will be done in iwl4965_up() */
b481de9c
ZY
6441 src = &ucode->data[inst_size];
6442 len = priv->ucode_data.len;
90e759d1 6443 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
b481de9c
ZY
6444 memcpy(priv->ucode_data.v_addr, src, len);
6445 memcpy(priv->ucode_data_backup.v_addr, src, len);
6446
6447 /* Initialization instructions (3rd block) */
6448 if (init_size) {
6449 src = &ucode->data[inst_size + data_size];
6450 len = priv->ucode_init.len;
90e759d1
TW
6451 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
6452 len);
b481de9c
ZY
6453 memcpy(priv->ucode_init.v_addr, src, len);
6454 }
6455
6456 /* Initialization data (4th block) */
6457 if (init_data_size) {
6458 src = &ucode->data[inst_size + data_size + init_size];
6459 len = priv->ucode_init_data.len;
90e759d1
TW
6460 IWL_DEBUG_INFO("Copying (but not loading) init data len %Zd\n",
6461 len);
b481de9c
ZY
6462 memcpy(priv->ucode_init_data.v_addr, src, len);
6463 }
6464
6465 /* Bootstrap instructions (5th block) */
6466 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
6467 len = priv->ucode_boot.len;
90e759d1 6468 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %Zd\n", len);
b481de9c
ZY
6469 memcpy(priv->ucode_boot.v_addr, src, len);
6470
6471 /* We have our copies now, allow OS release its copies */
6472 release_firmware(ucode_raw);
6473 return 0;
6474
6475 err_pci_alloc:
6476 IWL_ERROR("failed to allocate pci memory\n");
90e759d1 6477 ret = -ENOMEM;
bb8c093b 6478 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
6479
6480 err_release:
6481 release_firmware(ucode_raw);
6482
6483 error:
90e759d1 6484 return ret;
b481de9c
ZY
6485}
6486
6487
6488/**
bb8c093b 6489 * iwl4965_set_ucode_ptrs - Set uCode address location
b481de9c
ZY
6490 *
6491 * Tell initialization uCode where to find runtime uCode.
6492 *
6493 * BSM registers initially contain pointers to initialization uCode.
6494 * We need to replace them to load runtime uCode inst and data,
6495 * and to save runtime data when powering down.
6496 */
bb8c093b 6497static int iwl4965_set_ucode_ptrs(struct iwl4965_priv *priv)
b481de9c
ZY
6498{
6499 dma_addr_t pinst;
6500 dma_addr_t pdata;
6501 int rc = 0;
6502 unsigned long flags;
6503
6504 /* bits 35:4 for 4965 */
6505 pinst = priv->ucode_code.p_addr >> 4;
6506 pdata = priv->ucode_data_backup.p_addr >> 4;
6507
6508 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 6509 rc = iwl4965_grab_nic_access(priv);
b481de9c
ZY
6510 if (rc) {
6511 spin_unlock_irqrestore(&priv->lock, flags);
6512 return rc;
6513 }
6514
6515 /* Tell bootstrap uCode where to find image to load */
bb8c093b
CH
6516 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6517 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6518 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
b481de9c
ZY
6519 priv->ucode_data.len);
6520
6521 /* Inst bytecount must be last to set up, bit 31 signals uCode
6522 * that all new ptr/size info is in place */
bb8c093b 6523 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
b481de9c
ZY
6524 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
6525
bb8c093b 6526 iwl4965_release_nic_access(priv);
b481de9c
ZY
6527
6528 spin_unlock_irqrestore(&priv->lock, flags);
6529
6530 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
6531
6532 return rc;
6533}
6534
6535/**
bb8c093b 6536 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
b481de9c
ZY
6537 *
6538 * Called after REPLY_ALIVE notification received from "initialize" uCode.
6539 *
6540 * The 4965 "initialize" ALIVE reply contains calibration data for:
6541 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
6542 * (3945 does not contain this data).
6543 *
6544 * Tell "initialize" uCode to go ahead and load the runtime uCode.
6545*/
bb8c093b 6546static void iwl4965_init_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6547{
6548 /* Check alive response for "valid" sign from uCode */
6549 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
6550 /* We had an error bringing up the hardware, so take it
6551 * all the way back down so we can try again */
6552 IWL_DEBUG_INFO("Initialize Alive failed.\n");
6553 goto restart;
6554 }
6555
6556 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
6557 * This is a paranoid check, because we would not have gotten the
6558 * "initialize" alive if code weren't properly loaded. */
bb8c093b 6559 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6560 /* Runtime instruction load was bad;
6561 * take it all the way back down so we can try again */
6562 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
6563 goto restart;
6564 }
6565
6566 /* Calculate temperature */
6567 priv->temperature = iwl4965_get_temperature(priv);
6568
6569 /* Send pointers to protocol/runtime uCode image ... init code will
6570 * load and launch runtime uCode, which will send us another "Alive"
6571 * notification. */
6572 IWL_DEBUG_INFO("Initialization Alive received.\n");
bb8c093b 6573 if (iwl4965_set_ucode_ptrs(priv)) {
b481de9c
ZY
6574 /* Runtime instruction load won't happen;
6575 * take it all the way back down so we can try again */
6576 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
6577 goto restart;
6578 }
6579 return;
6580
6581 restart:
6582 queue_work(priv->workqueue, &priv->restart);
6583}
6584
6585
6586/**
bb8c093b 6587 * iwl4965_alive_start - called after REPLY_ALIVE notification received
b481de9c 6588 * from protocol/runtime uCode (initialization uCode's
bb8c093b 6589 * Alive gets handled by iwl4965_init_alive_start()).
b481de9c 6590 */
bb8c093b 6591static void iwl4965_alive_start(struct iwl4965_priv *priv)
b481de9c
ZY
6592{
6593 int rc = 0;
6594
6595 IWL_DEBUG_INFO("Runtime Alive received.\n");
6596
6597 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
6598 /* We had an error bringing up the hardware, so take it
6599 * all the way back down so we can try again */
6600 IWL_DEBUG_INFO("Alive failed.\n");
6601 goto restart;
6602 }
6603
6604 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
6605 * This is a paranoid check, because we would not have gotten the
6606 * "runtime" alive if code weren't properly loaded. */
bb8c093b 6607 if (iwl4965_verify_ucode(priv)) {
b481de9c
ZY
6608 /* Runtime instruction load was bad;
6609 * take it all the way back down so we can try again */
6610 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
6611 goto restart;
6612 }
6613
bb8c093b 6614 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6615
6616 rc = iwl4965_alive_notify(priv);
6617 if (rc) {
6618 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
6619 rc);
6620 goto restart;
6621 }
6622
9fbab516 6623 /* After the ALIVE response, we can send host commands to 4965 uCode */
b481de9c
ZY
6624 set_bit(STATUS_ALIVE, &priv->status);
6625
6626 /* Clear out the uCode error bit if it is set */
6627 clear_bit(STATUS_FW_ERROR, &priv->status);
6628
bb8c093b 6629 rc = iwl4965_init_channel_map(priv);
b481de9c
ZY
6630 if (rc) {
6631 IWL_ERROR("initializing regulatory failed: %d\n", rc);
6632 return;
6633 }
6634
bb8c093b 6635 iwl4965_init_geos(priv);
b481de9c 6636
bb8c093b 6637 if (iwl4965_is_rfkill(priv))
b481de9c
ZY
6638 return;
6639
6640 if (!priv->mac80211_registered) {
6641 /* Unlock so any user space entry points can call back into
6642 * the driver without a deadlock... */
6643 mutex_unlock(&priv->mutex);
bb8c093b 6644 iwl4965_rate_control_register(priv->hw);
b481de9c
ZY
6645 rc = ieee80211_register_hw(priv->hw);
6646 priv->hw->conf.beacon_int = 100;
6647 mutex_lock(&priv->mutex);
6648
6649 if (rc) {
bb8c093b 6650 iwl4965_rate_control_unregister(priv->hw);
b481de9c
ZY
6651 IWL_ERROR("Failed to register network "
6652 "device (error %d)\n", rc);
6653 return;
6654 }
6655
6656 priv->mac80211_registered = 1;
6657
bb8c093b 6658 iwl4965_reset_channel_flag(priv);
b481de9c
ZY
6659 } else
6660 ieee80211_start_queues(priv->hw);
6661
6662 priv->active_rate = priv->rates_mask;
6663 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
6664
bb8c093b 6665 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
b481de9c 6666
bb8c093b
CH
6667 if (iwl4965_is_associated(priv)) {
6668 struct iwl4965_rxon_cmd *active_rxon =
6669 (struct iwl4965_rxon_cmd *)(&priv->active_rxon);
b481de9c
ZY
6670
6671 memcpy(&priv->staging_rxon, &priv->active_rxon,
6672 sizeof(priv->staging_rxon));
6673 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6674 } else {
6675 /* Initialize our rx_config data */
bb8c093b 6676 iwl4965_connection_init_rx_config(priv);
b481de9c
ZY
6677 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
6678 }
6679
9fbab516 6680 /* Configure Bluetooth device coexistence support */
bb8c093b 6681 iwl4965_send_bt_config(priv);
b481de9c
ZY
6682
6683 /* Configure the adapter for unassociated operation */
bb8c093b 6684 iwl4965_commit_rxon(priv);
b481de9c
ZY
6685
6686 /* At this point, the NIC is initialized and operational */
6687 priv->notif_missed_beacons = 0;
6688 set_bit(STATUS_READY, &priv->status);
6689
6690 iwl4965_rf_kill_ct_config(priv);
6691 IWL_DEBUG_INFO("ALIVE processing complete.\n");
6692
6693 if (priv->error_recovering)
bb8c093b 6694 iwl4965_error_recovery(priv);
b481de9c
ZY
6695
6696 return;
6697
6698 restart:
6699 queue_work(priv->workqueue, &priv->restart);
6700}
6701
bb8c093b 6702static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv);
b481de9c 6703
bb8c093b 6704static void __iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6705{
6706 unsigned long flags;
6707 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
6708 struct ieee80211_conf *conf = NULL;
6709
6710 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
6711
6712 conf = ieee80211_get_hw_conf(priv->hw);
6713
6714 if (!exit_pending)
6715 set_bit(STATUS_EXIT_PENDING, &priv->status);
6716
bb8c093b 6717 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6718
6719 /* Unblock any waiting calls */
6720 wake_up_interruptible_all(&priv->wait_command_queue);
6721
b481de9c
ZY
6722 /* Wipe out the EXIT_PENDING status bit if we are not actually
6723 * exiting the module */
6724 if (!exit_pending)
6725 clear_bit(STATUS_EXIT_PENDING, &priv->status);
6726
6727 /* stop and reset the on-board processor */
bb8c093b 6728 iwl4965_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
b481de9c
ZY
6729
6730 /* tell the device to stop sending interrupts */
bb8c093b 6731 iwl4965_disable_interrupts(priv);
b481de9c
ZY
6732
6733 if (priv->mac80211_registered)
6734 ieee80211_stop_queues(priv->hw);
6735
bb8c093b 6736 /* If we have not previously called iwl4965_init() then
b481de9c 6737 * clear all bits but the RF Kill and SUSPEND bits and return */
bb8c093b 6738 if (!iwl4965_is_init(priv)) {
b481de9c
ZY
6739 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6740 STATUS_RF_KILL_HW |
6741 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6742 STATUS_RF_KILL_SW |
6743 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6744 STATUS_IN_SUSPEND;
6745 goto exit;
6746 }
6747
6748 /* ...otherwise clear out all the status bits but the RF Kill and
6749 * SUSPEND bits and continue taking the NIC down. */
6750 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6751 STATUS_RF_KILL_HW |
6752 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6753 STATUS_RF_KILL_SW |
6754 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6755 STATUS_IN_SUSPEND |
6756 test_bit(STATUS_FW_ERROR, &priv->status) <<
6757 STATUS_FW_ERROR;
6758
6759 spin_lock_irqsave(&priv->lock, flags);
9fbab516
BC
6760 iwl4965_clear_bit(priv, CSR_GP_CNTRL,
6761 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c
ZY
6762 spin_unlock_irqrestore(&priv->lock, flags);
6763
bb8c093b
CH
6764 iwl4965_hw_txq_ctx_stop(priv);
6765 iwl4965_hw_rxq_stop(priv);
b481de9c
ZY
6766
6767 spin_lock_irqsave(&priv->lock, flags);
bb8c093b
CH
6768 if (!iwl4965_grab_nic_access(priv)) {
6769 iwl4965_write_prph(priv, APMG_CLK_DIS_REG,
b481de9c 6770 APMG_CLK_VAL_DMA_CLK_RQT);
bb8c093b 6771 iwl4965_release_nic_access(priv);
b481de9c
ZY
6772 }
6773 spin_unlock_irqrestore(&priv->lock, flags);
6774
6775 udelay(5);
6776
bb8c093b
CH
6777 iwl4965_hw_nic_stop_master(priv);
6778 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
6779 iwl4965_hw_nic_reset(priv);
b481de9c
ZY
6780
6781 exit:
bb8c093b 6782 memset(&priv->card_alive, 0, sizeof(struct iwl4965_alive_resp));
b481de9c
ZY
6783
6784 if (priv->ibss_beacon)
6785 dev_kfree_skb(priv->ibss_beacon);
6786 priv->ibss_beacon = NULL;
6787
6788 /* clear out any free frames */
bb8c093b 6789 iwl4965_clear_free_frames(priv);
b481de9c
ZY
6790}
6791
bb8c093b 6792static void iwl4965_down(struct iwl4965_priv *priv)
b481de9c
ZY
6793{
6794 mutex_lock(&priv->mutex);
bb8c093b 6795 __iwl4965_down(priv);
b481de9c 6796 mutex_unlock(&priv->mutex);
b24d22b1 6797
bb8c093b 6798 iwl4965_cancel_deferred_work(priv);
b481de9c
ZY
6799}
6800
6801#define MAX_HW_RESTARTS 5
6802
bb8c093b 6803static int __iwl4965_up(struct iwl4965_priv *priv)
b481de9c 6804{
0795af57 6805 DECLARE_MAC_BUF(mac);
b481de9c
ZY
6806 int rc, i;
6807 u32 hw_rf_kill = 0;
6808
6809 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6810 IWL_WARNING("Exit pending; will not bring the NIC up\n");
6811 return -EIO;
6812 }
6813
6814 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
6815 IWL_WARNING("Radio disabled by SW RF kill (module "
6816 "parameter)\n");
6817 return 0;
6818 }
6819
a781cf94
RC
6820 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
6821 IWL_ERROR("ucode not available for device bringup\n");
6822 return -EIO;
6823 }
6824
bb8c093b 6825 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 6826
bb8c093b 6827 rc = iwl4965_hw_nic_init(priv);
b481de9c
ZY
6828 if (rc) {
6829 IWL_ERROR("Unable to int nic\n");
6830 return rc;
6831 }
6832
6833 /* make sure rfkill handshake bits are cleared */
bb8c093b
CH
6834 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6835 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c
ZY
6836 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
6837
6838 /* clear (again), then enable host interrupts */
bb8c093b
CH
6839 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
6840 iwl4965_enable_interrupts(priv);
b481de9c
ZY
6841
6842 /* really make sure rfkill handshake bits are cleared */
bb8c093b
CH
6843 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6844 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
6845
6846 /* Copy original ucode data image from disk into backup cache.
6847 * This will be used to initialize the on-board processor's
6848 * data SRAM for a clean start when the runtime program first loads. */
6849 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
6850 priv->ucode_data.len);
6851
6852 /* If platform's RF_KILL switch is set to KILL,
6853 * wait for BIT_INT_RF_KILL interrupt before loading uCode
6854 * and getting things started */
bb8c093b 6855 if (!(iwl4965_read32(priv, CSR_GP_CNTRL) &
b481de9c
ZY
6856 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
6857 hw_rf_kill = 1;
6858
6859 if (test_bit(STATUS_RF_KILL_HW, &priv->status) || hw_rf_kill) {
6860 IWL_WARNING("Radio disabled by HW RF Kill switch\n");
6861 return 0;
6862 }
6863
6864 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6865
bb8c093b 6866 iwl4965_clear_stations_table(priv);
b481de9c
ZY
6867
6868 /* load bootstrap state machine,
6869 * load bootstrap program into processor's memory,
6870 * prepare to load the "initialize" uCode */
bb8c093b 6871 rc = iwl4965_load_bsm(priv);
b481de9c
ZY
6872
6873 if (rc) {
6874 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc);
6875 continue;
6876 }
6877
6878 /* start card; "initialize" will load runtime ucode */
bb8c093b 6879 iwl4965_nic_start(priv);
b481de9c 6880
9fbab516 6881 /* MAC Address location in EEPROM is same for 3945/4965 */
b481de9c 6882 get_eeprom_mac(priv, priv->mac_addr);
0795af57
JP
6883 IWL_DEBUG_INFO("MAC address: %s\n",
6884 print_mac(mac, priv->mac_addr));
b481de9c
ZY
6885
6886 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
6887
6888 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
6889
6890 return 0;
6891 }
6892
6893 set_bit(STATUS_EXIT_PENDING, &priv->status);
bb8c093b 6894 __iwl4965_down(priv);
b481de9c
ZY
6895
6896 /* tried to restart and config the device for as long as our
6897 * patience could withstand */
6898 IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
6899 return -EIO;
6900}
6901
6902
6903/*****************************************************************************
6904 *
6905 * Workqueue callbacks
6906 *
6907 *****************************************************************************/
6908
bb8c093b 6909static void iwl4965_bg_init_alive_start(struct work_struct *data)
b481de9c 6910{
bb8c093b
CH
6911 struct iwl4965_priv *priv =
6912 container_of(data, struct iwl4965_priv, init_alive_start.work);
b481de9c
ZY
6913
6914 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6915 return;
6916
6917 mutex_lock(&priv->mutex);
bb8c093b 6918 iwl4965_init_alive_start(priv);
b481de9c
ZY
6919 mutex_unlock(&priv->mutex);
6920}
6921
bb8c093b 6922static void iwl4965_bg_alive_start(struct work_struct *data)
b481de9c 6923{
bb8c093b
CH
6924 struct iwl4965_priv *priv =
6925 container_of(data, struct iwl4965_priv, alive_start.work);
b481de9c
ZY
6926
6927 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6928 return;
6929
6930 mutex_lock(&priv->mutex);
bb8c093b 6931 iwl4965_alive_start(priv);
b481de9c
ZY
6932 mutex_unlock(&priv->mutex);
6933}
6934
bb8c093b 6935static void iwl4965_bg_rf_kill(struct work_struct *work)
b481de9c 6936{
bb8c093b 6937 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, rf_kill);
b481de9c
ZY
6938
6939 wake_up_interruptible(&priv->wait_command_queue);
6940
6941 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6942 return;
6943
6944 mutex_lock(&priv->mutex);
6945
bb8c093b 6946 if (!iwl4965_is_rfkill(priv)) {
b481de9c
ZY
6947 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
6948 "HW and/or SW RF Kill no longer active, restarting "
6949 "device\n");
6950 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6951 queue_work(priv->workqueue, &priv->restart);
6952 } else {
6953
6954 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
6955 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
6956 "disabled by SW switch\n");
6957 else
6958 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
6959 "Kill switch must be turned off for "
6960 "wireless networking to work.\n");
6961 }
6962 mutex_unlock(&priv->mutex);
6963}
6964
6965#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6966
bb8c093b 6967static void iwl4965_bg_scan_check(struct work_struct *data)
b481de9c 6968{
bb8c093b
CH
6969 struct iwl4965_priv *priv =
6970 container_of(data, struct iwl4965_priv, scan_check.work);
b481de9c
ZY
6971
6972 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6973 return;
6974
6975 mutex_lock(&priv->mutex);
6976 if (test_bit(STATUS_SCANNING, &priv->status) ||
6977 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6978 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
6979 "Scan completion watchdog resetting adapter (%dms)\n",
6980 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
052c4b9f 6981
b481de9c 6982 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
bb8c093b 6983 iwl4965_send_scan_abort(priv);
b481de9c
ZY
6984 }
6985 mutex_unlock(&priv->mutex);
6986}
6987
bb8c093b 6988static void iwl4965_bg_request_scan(struct work_struct *data)
b481de9c 6989{
bb8c093b
CH
6990 struct iwl4965_priv *priv =
6991 container_of(data, struct iwl4965_priv, request_scan);
6992 struct iwl4965_host_cmd cmd = {
b481de9c 6993 .id = REPLY_SCAN_CMD,
bb8c093b 6994 .len = sizeof(struct iwl4965_scan_cmd),
b481de9c
ZY
6995 .meta.flags = CMD_SIZE_HUGE,
6996 };
6997 int rc = 0;
bb8c093b 6998 struct iwl4965_scan_cmd *scan;
b481de9c
ZY
6999 struct ieee80211_conf *conf = NULL;
7000 u8 direct_mask;
7001 int phymode;
7002
7003 conf = ieee80211_get_hw_conf(priv->hw);
7004
7005 mutex_lock(&priv->mutex);
7006
bb8c093b 7007 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
7008 IWL_WARNING("request scan called when driver not ready.\n");
7009 goto done;
7010 }
7011
7012 /* Make sure the scan wasn't cancelled before this queued work
7013 * was given the chance to run... */
7014 if (!test_bit(STATUS_SCANNING, &priv->status))
7015 goto done;
7016
7017 /* This should never be called or scheduled if there is currently
7018 * a scan active in the hardware. */
7019 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
7020 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
7021 "Ignoring second request.\n");
7022 rc = -EIO;
7023 goto done;
7024 }
7025
7026 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
7027 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
7028 goto done;
7029 }
7030
7031 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
7032 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
7033 goto done;
7034 }
7035
bb8c093b 7036 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
7037 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
7038 goto done;
7039 }
7040
7041 if (!test_bit(STATUS_READY, &priv->status)) {
7042 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
7043 goto done;
7044 }
7045
7046 if (!priv->scan_bands) {
7047 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
7048 goto done;
7049 }
7050
7051 if (!priv->scan) {
bb8c093b 7052 priv->scan = kmalloc(sizeof(struct iwl4965_scan_cmd) +
b481de9c
ZY
7053 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
7054 if (!priv->scan) {
7055 rc = -ENOMEM;
7056 goto done;
7057 }
7058 }
7059 scan = priv->scan;
bb8c093b 7060 memset(scan, 0, sizeof(struct iwl4965_scan_cmd) + IWL_MAX_SCAN_SIZE);
b481de9c
ZY
7061
7062 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
7063 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
7064
bb8c093b 7065 if (iwl4965_is_associated(priv)) {
b481de9c
ZY
7066 u16 interval = 0;
7067 u32 extra;
7068 u32 suspend_time = 100;
7069 u32 scan_suspend_time = 100;
7070 unsigned long flags;
7071
7072 IWL_DEBUG_INFO("Scanning while associated...\n");
7073
7074 spin_lock_irqsave(&priv->lock, flags);
7075 interval = priv->beacon_int;
7076 spin_unlock_irqrestore(&priv->lock, flags);
7077
7078 scan->suspend_time = 0;
052c4b9f 7079 scan->max_out_time = cpu_to_le32(200 * 1024);
b481de9c
ZY
7080 if (!interval)
7081 interval = suspend_time;
7082
7083 extra = (suspend_time / interval) << 22;
7084 scan_suspend_time = (extra |
7085 ((suspend_time % interval) * 1024));
7086 scan->suspend_time = cpu_to_le32(scan_suspend_time);
7087 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
7088 scan_suspend_time, interval);
7089 }
7090
7091 /* We should add the ability for user to lock to PASSIVE ONLY */
7092 if (priv->one_direct_scan) {
7093 IWL_DEBUG_SCAN
7094 ("Kicking off one direct scan for '%s'\n",
bb8c093b 7095 iwl4965_escape_essid(priv->direct_ssid,
b481de9c
ZY
7096 priv->direct_ssid_len));
7097 scan->direct_scan[0].id = WLAN_EID_SSID;
7098 scan->direct_scan[0].len = priv->direct_ssid_len;
7099 memcpy(scan->direct_scan[0].ssid,
7100 priv->direct_ssid, priv->direct_ssid_len);
7101 direct_mask = 1;
bb8c093b 7102 } else if (!iwl4965_is_associated(priv) && priv->essid_len) {
b481de9c
ZY
7103 scan->direct_scan[0].id = WLAN_EID_SSID;
7104 scan->direct_scan[0].len = priv->essid_len;
7105 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
7106 direct_mask = 1;
7107 } else
7108 direct_mask = 0;
7109
7110 /* We don't build a direct scan probe request; the uCode will do
7111 * that based on the direct_mask added to each channel entry */
7112 scan->tx_cmd.len = cpu_to_le16(
bb8c093b 7113 iwl4965_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
b481de9c
ZY
7114 IWL_MAX_SCAN_SIZE - sizeof(scan), 0));
7115 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
7116 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
7117 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
7118
7119 /* flags + rate selection */
7120
7121 scan->tx_cmd.tx_flags |= cpu_to_le32(0x200);
7122
7123 switch (priv->scan_bands) {
7124 case 2:
7125 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
7126 scan->tx_cmd.rate_n_flags =
bb8c093b 7127 iwl4965_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
b481de9c
ZY
7128 RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
7129
7130 scan->good_CRC_th = 0;
7131 phymode = MODE_IEEE80211G;
7132 break;
7133
7134 case 1:
7135 scan->tx_cmd.rate_n_flags =
bb8c093b 7136 iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
b481de9c
ZY
7137 RATE_MCS_ANT_B_MSK);
7138 scan->good_CRC_th = IWL_GOOD_CRC_TH;
7139 phymode = MODE_IEEE80211A;
7140 break;
7141
7142 default:
7143 IWL_WARNING("Invalid scan band count\n");
7144 goto done;
7145 }
7146
7147 /* select Rx chains */
7148
7149 /* Force use of chains B and C (0x6) for scan Rx.
7150 * Avoid A (0x1) because of its off-channel reception on A-band.
7151 * MIMO is not used here, but value is required to make uCode happy. */
7152 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
7153 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
7154 (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) |
7155 (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS));
7156
7157 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
7158 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
7159
7160 if (direct_mask)
7161 IWL_DEBUG_SCAN
7162 ("Initiating direct scan for %s.\n",
bb8c093b 7163 iwl4965_escape_essid(priv->essid, priv->essid_len));
b481de9c
ZY
7164 else
7165 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
7166
7167 scan->channel_count =
bb8c093b 7168 iwl4965_get_channels_for_scan(
b481de9c
ZY
7169 priv, phymode, 1, /* active */
7170 direct_mask,
7171 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
7172
7173 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
bb8c093b 7174 scan->channel_count * sizeof(struct iwl4965_scan_channel);
b481de9c
ZY
7175 cmd.data = scan;
7176 scan->len = cpu_to_le16(cmd.len);
7177
7178 set_bit(STATUS_SCAN_HW, &priv->status);
bb8c093b 7179 rc = iwl4965_send_cmd_sync(priv, &cmd);
b481de9c
ZY
7180 if (rc)
7181 goto done;
7182
7183 queue_delayed_work(priv->workqueue, &priv->scan_check,
7184 IWL_SCAN_CHECK_WATCHDOG);
7185
7186 mutex_unlock(&priv->mutex);
7187 return;
7188
7189 done:
01ebd063 7190 /* inform mac80211 scan aborted */
b481de9c
ZY
7191 queue_work(priv->workqueue, &priv->scan_completed);
7192 mutex_unlock(&priv->mutex);
7193}
7194
bb8c093b 7195static void iwl4965_bg_up(struct work_struct *data)
b481de9c 7196{
bb8c093b 7197 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, up);
b481de9c
ZY
7198
7199 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7200 return;
7201
7202 mutex_lock(&priv->mutex);
bb8c093b 7203 __iwl4965_up(priv);
b481de9c
ZY
7204 mutex_unlock(&priv->mutex);
7205}
7206
bb8c093b 7207static void iwl4965_bg_restart(struct work_struct *data)
b481de9c 7208{
bb8c093b 7209 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, restart);
b481de9c
ZY
7210
7211 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7212 return;
7213
bb8c093b 7214 iwl4965_down(priv);
b481de9c
ZY
7215 queue_work(priv->workqueue, &priv->up);
7216}
7217
bb8c093b 7218static void iwl4965_bg_rx_replenish(struct work_struct *data)
b481de9c 7219{
bb8c093b
CH
7220 struct iwl4965_priv *priv =
7221 container_of(data, struct iwl4965_priv, rx_replenish);
b481de9c
ZY
7222
7223 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7224 return;
7225
7226 mutex_lock(&priv->mutex);
bb8c093b 7227 iwl4965_rx_replenish(priv);
b481de9c
ZY
7228 mutex_unlock(&priv->mutex);
7229}
7230
bb8c093b 7231static void iwl4965_bg_post_associate(struct work_struct *data)
b481de9c 7232{
bb8c093b 7233 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv,
b481de9c
ZY
7234 post_associate.work);
7235
7236 int rc = 0;
7237 struct ieee80211_conf *conf = NULL;
0795af57 7238 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7239
7240 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7241 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
7242 return;
7243 }
7244
0795af57
JP
7245 IWL_DEBUG_ASSOC("Associated as %d to: %s\n",
7246 priv->assoc_id,
7247 print_mac(mac, priv->active_rxon.bssid_addr));
b481de9c
ZY
7248
7249
7250 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7251 return;
7252
7253 mutex_lock(&priv->mutex);
7254
948c171c
MA
7255 if (!priv->interface_id || !priv->is_open) {
7256 mutex_unlock(&priv->mutex);
7257 return;
7258 }
bb8c093b 7259 iwl4965_scan_cancel_timeout(priv, 200);
052c4b9f 7260
b481de9c
ZY
7261 conf = ieee80211_get_hw_conf(priv->hw);
7262
7263 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7264 iwl4965_commit_rxon(priv);
b481de9c 7265
bb8c093b
CH
7266 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7267 iwl4965_setup_rxon_timing(priv);
7268 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7269 sizeof(priv->rxon_timing), &priv->rxon_timing);
7270 if (rc)
7271 IWL_WARNING("REPLY_RXON_TIMING failed - "
7272 "Attempting to continue.\n");
7273
7274 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
7275
c8b0e6e1 7276#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
7277 if (priv->is_ht_enabled && priv->current_assoc_ht.is_ht)
7278 iwl4965_set_rxon_ht(priv, &priv->current_assoc_ht);
7279 else {
7280 priv->active_rate_ht[0] = 0;
7281 priv->active_rate_ht[1] = 0;
7282 priv->current_channel_width = IWL_CHANNEL_WIDTH_20MHZ;
7283 }
c8b0e6e1 7284#endif /* CONFIG_IWL4965_HT*/
b481de9c
ZY
7285 iwl4965_set_rxon_chain(priv);
7286 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7287
7288 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
7289 priv->assoc_id, priv->beacon_int);
7290
7291 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7292 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7293 else
7294 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7295
7296 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7297 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
7298 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
7299 else
7300 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7301
7302 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7303 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
7304
7305 }
7306
bb8c093b 7307 iwl4965_commit_rxon(priv);
b481de9c
ZY
7308
7309 switch (priv->iw_mode) {
7310 case IEEE80211_IF_TYPE_STA:
bb8c093b 7311 iwl4965_rate_scale_init(priv->hw, IWL_AP_ID);
b481de9c
ZY
7312 break;
7313
7314 case IEEE80211_IF_TYPE_IBSS:
7315
7316 /* clear out the station table */
bb8c093b 7317 iwl4965_clear_stations_table(priv);
b481de9c 7318
bb8c093b
CH
7319 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
7320 iwl4965_rxon_add_station(priv, priv->bssid, 0);
7321 iwl4965_rate_scale_init(priv->hw, IWL_STA_ID);
7322 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7323
7324 break;
7325
7326 default:
7327 IWL_ERROR("%s Should not be called in %d mode\n",
7328 __FUNCTION__, priv->iw_mode);
7329 break;
7330 }
7331
bb8c093b 7332 iwl4965_sequence_reset(priv);
b481de9c 7333
c8b0e6e1 7334#ifdef CONFIG_IWL4965_SENSITIVITY
b481de9c
ZY
7335 /* Enable Rx differential gain and sensitivity calibrations */
7336 iwl4965_chain_noise_reset(priv);
7337 priv->start_calib = 1;
c8b0e6e1 7338#endif /* CONFIG_IWL4965_SENSITIVITY */
b481de9c
ZY
7339
7340 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7341 priv->assoc_station_added = 1;
7342
c8b0e6e1 7343#ifdef CONFIG_IWL4965_QOS
bb8c093b 7344 iwl4965_activate_qos(priv, 0);
c8b0e6e1 7345#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
7346 mutex_unlock(&priv->mutex);
7347}
7348
bb8c093b 7349static void iwl4965_bg_abort_scan(struct work_struct *work)
b481de9c 7350{
bb8c093b 7351 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, abort_scan);
b481de9c 7352
bb8c093b 7353 if (!iwl4965_is_ready(priv))
b481de9c
ZY
7354 return;
7355
7356 mutex_lock(&priv->mutex);
7357
7358 set_bit(STATUS_SCAN_ABORTING, &priv->status);
bb8c093b 7359 iwl4965_send_scan_abort(priv);
b481de9c
ZY
7360
7361 mutex_unlock(&priv->mutex);
7362}
7363
bb8c093b 7364static void iwl4965_bg_scan_completed(struct work_struct *work)
b481de9c 7365{
bb8c093b
CH
7366 struct iwl4965_priv *priv =
7367 container_of(work, struct iwl4965_priv, scan_completed);
b481de9c
ZY
7368
7369 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
7370
7371 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7372 return;
7373
7374 ieee80211_scan_completed(priv->hw);
7375
7376 /* Since setting the TXPOWER may have been deferred while
7377 * performing the scan, fire one off */
7378 mutex_lock(&priv->mutex);
bb8c093b 7379 iwl4965_hw_reg_send_txpower(priv);
b481de9c
ZY
7380 mutex_unlock(&priv->mutex);
7381}
7382
7383/*****************************************************************************
7384 *
7385 * mac80211 entry point functions
7386 *
7387 *****************************************************************************/
7388
bb8c093b 7389static int iwl4965_mac_start(struct ieee80211_hw *hw)
b481de9c 7390{
bb8c093b 7391 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7392
7393 IWL_DEBUG_MAC80211("enter\n");
7394
7395 /* we should be verifying the device is ready to be opened */
7396 mutex_lock(&priv->mutex);
7397
7398 priv->is_open = 1;
7399
bb8c093b 7400 if (!iwl4965_is_rfkill(priv))
b481de9c
ZY
7401 ieee80211_start_queues(priv->hw);
7402
7403 mutex_unlock(&priv->mutex);
7404 IWL_DEBUG_MAC80211("leave\n");
7405 return 0;
7406}
7407
bb8c093b 7408static void iwl4965_mac_stop(struct ieee80211_hw *hw)
b481de9c 7409{
bb8c093b 7410 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7411
7412 IWL_DEBUG_MAC80211("enter\n");
948c171c
MA
7413
7414
7415 mutex_lock(&priv->mutex);
7416 /* stop mac, cancel any scan request and clear
7417 * RXON_FILTER_ASSOC_MSK BIT
7418 */
b481de9c 7419 priv->is_open = 0;
bb8c093b 7420 iwl4965_scan_cancel_timeout(priv, 100);
948c171c
MA
7421 cancel_delayed_work(&priv->post_associate);
7422 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7423 iwl4965_commit_rxon(priv);
948c171c
MA
7424 mutex_unlock(&priv->mutex);
7425
b481de9c 7426 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
7427}
7428
bb8c093b 7429static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
7430 struct ieee80211_tx_control *ctl)
7431{
bb8c093b 7432 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7433
7434 IWL_DEBUG_MAC80211("enter\n");
7435
7436 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
7437 IWL_DEBUG_MAC80211("leave - monitor\n");
7438 return -1;
7439 }
7440
7441 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
7442 ctl->tx_rate);
7443
bb8c093b 7444 if (iwl4965_tx_skb(priv, skb, ctl))
b481de9c
ZY
7445 dev_kfree_skb_any(skb);
7446
7447 IWL_DEBUG_MAC80211("leave\n");
7448 return 0;
7449}
7450
bb8c093b 7451static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7452 struct ieee80211_if_init_conf *conf)
7453{
bb8c093b 7454 struct iwl4965_priv *priv = hw->priv;
b481de9c 7455 unsigned long flags;
0795af57 7456 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7457
7458 IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type);
b481de9c
ZY
7459
7460 if (priv->interface_id) {
7461 IWL_DEBUG_MAC80211("leave - interface_id != 0\n");
7462 return 0;
7463 }
7464
7465 spin_lock_irqsave(&priv->lock, flags);
7466 priv->interface_id = conf->if_id;
7467
7468 spin_unlock_irqrestore(&priv->lock, flags);
7469
7470 mutex_lock(&priv->mutex);
864792e3
TW
7471
7472 if (conf->mac_addr) {
7473 IWL_DEBUG_MAC80211("Set %s\n", print_mac(mac, conf->mac_addr));
7474 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
7475 }
bb8c093b 7476 iwl4965_set_mode(priv, conf->type);
b481de9c
ZY
7477
7478 IWL_DEBUG_MAC80211("leave\n");
7479 mutex_unlock(&priv->mutex);
7480
7481 return 0;
7482}
7483
7484/**
bb8c093b 7485 * iwl4965_mac_config - mac80211 config callback
b481de9c
ZY
7486 *
7487 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
7488 * be set inappropriately and the driver currently sets the hardware up to
7489 * use it whenever needed.
7490 */
bb8c093b 7491static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
b481de9c 7492{
bb8c093b
CH
7493 struct iwl4965_priv *priv = hw->priv;
7494 const struct iwl4965_channel_info *ch_info;
b481de9c
ZY
7495 unsigned long flags;
7496
7497 mutex_lock(&priv->mutex);
7498 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel);
7499
bb8c093b 7500 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
7501 IWL_DEBUG_MAC80211("leave - not ready\n");
7502 mutex_unlock(&priv->mutex);
7503 return -EIO;
7504 }
7505
7506 /* TODO: Figure out how to get ieee80211_local->sta_scanning w/ only
01ebd063 7507 * what is exposed through include/ declarations */
bb8c093b 7508 if (unlikely(!iwl4965_param_disable_hw_scan &&
b481de9c
ZY
7509 test_bit(STATUS_SCANNING, &priv->status))) {
7510 IWL_DEBUG_MAC80211("leave - scanning\n");
7511 mutex_unlock(&priv->mutex);
7512 return 0;
7513 }
7514
7515 spin_lock_irqsave(&priv->lock, flags);
7516
bb8c093b 7517 ch_info = iwl4965_get_channel_info(priv, conf->phymode, conf->channel);
b481de9c
ZY
7518 if (!is_channel_valid(ch_info)) {
7519 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
7520 conf->channel, conf->phymode);
7521 IWL_DEBUG_MAC80211("leave - invalid channel\n");
7522 spin_unlock_irqrestore(&priv->lock, flags);
7523 mutex_unlock(&priv->mutex);
7524 return -EINVAL;
7525 }
7526
c8b0e6e1 7527#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
7528 /* if we are switching fron ht to 2.4 clear flags
7529 * from any ht related info since 2.4 does not
7530 * support ht */
7531 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel)
7532#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7533 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
7534#endif
7535 )
7536 priv->staging_rxon.flags = 0;
c8b0e6e1 7537#endif /* CONFIG_IWL4965_HT */
b481de9c 7538
bb8c093b 7539 iwl4965_set_rxon_channel(priv, conf->phymode, conf->channel);
b481de9c 7540
bb8c093b 7541 iwl4965_set_flags_for_phymode(priv, conf->phymode);
b481de9c
ZY
7542
7543 /* The list of supported rates and rate mask can be different
7544 * for each phymode; since the phymode may have changed, reset
7545 * the rate mask to what mac80211 lists */
bb8c093b 7546 iwl4965_set_rate(priv);
b481de9c
ZY
7547
7548 spin_unlock_irqrestore(&priv->lock, flags);
7549
7550#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7551 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
bb8c093b 7552 iwl4965_hw_channel_switch(priv, conf->channel);
b481de9c
ZY
7553 mutex_unlock(&priv->mutex);
7554 return 0;
7555 }
7556#endif
7557
bb8c093b 7558 iwl4965_radio_kill_sw(priv, !conf->radio_enabled);
b481de9c
ZY
7559
7560 if (!conf->radio_enabled) {
7561 IWL_DEBUG_MAC80211("leave - radio disabled\n");
7562 mutex_unlock(&priv->mutex);
7563 return 0;
7564 }
7565
bb8c093b 7566 if (iwl4965_is_rfkill(priv)) {
b481de9c
ZY
7567 IWL_DEBUG_MAC80211("leave - RF kill\n");
7568 mutex_unlock(&priv->mutex);
7569 return -EIO;
7570 }
7571
bb8c093b 7572 iwl4965_set_rate(priv);
b481de9c
ZY
7573
7574 if (memcmp(&priv->active_rxon,
7575 &priv->staging_rxon, sizeof(priv->staging_rxon)))
bb8c093b 7576 iwl4965_commit_rxon(priv);
b481de9c
ZY
7577 else
7578 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
7579
7580 IWL_DEBUG_MAC80211("leave\n");
7581
7582 mutex_unlock(&priv->mutex);
7583
7584 return 0;
7585}
7586
bb8c093b 7587static void iwl4965_config_ap(struct iwl4965_priv *priv)
b481de9c
ZY
7588{
7589 int rc = 0;
7590
7591 if (priv->status & STATUS_EXIT_PENDING)
7592 return;
7593
7594 /* The following should be done only at AP bring up */
7595 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) {
7596
7597 /* RXON - unassoc (to set timing command) */
7598 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7599 iwl4965_commit_rxon(priv);
b481de9c
ZY
7600
7601 /* RXON Timing */
bb8c093b
CH
7602 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7603 iwl4965_setup_rxon_timing(priv);
7604 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
7605 sizeof(priv->rxon_timing), &priv->rxon_timing);
7606 if (rc)
7607 IWL_WARNING("REPLY_RXON_TIMING failed - "
7608 "Attempting to continue.\n");
7609
7610 iwl4965_set_rxon_chain(priv);
7611
7612 /* FIXME: what should be the assoc_id for AP? */
7613 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7614 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7615 priv->staging_rxon.flags |=
7616 RXON_FLG_SHORT_PREAMBLE_MSK;
7617 else
7618 priv->staging_rxon.flags &=
7619 ~RXON_FLG_SHORT_PREAMBLE_MSK;
7620
7621 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7622 if (priv->assoc_capability &
7623 WLAN_CAPABILITY_SHORT_SLOT_TIME)
7624 priv->staging_rxon.flags |=
7625 RXON_FLG_SHORT_SLOT_MSK;
7626 else
7627 priv->staging_rxon.flags &=
7628 ~RXON_FLG_SHORT_SLOT_MSK;
7629
7630 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7631 priv->staging_rxon.flags &=
7632 ~RXON_FLG_SHORT_SLOT_MSK;
7633 }
7634 /* restore RXON assoc */
7635 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
bb8c093b 7636 iwl4965_commit_rxon(priv);
c8b0e6e1 7637#ifdef CONFIG_IWL4965_QOS
bb8c093b 7638 iwl4965_activate_qos(priv, 1);
b481de9c 7639#endif
bb8c093b 7640 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
e1493deb 7641 }
bb8c093b 7642 iwl4965_send_beacon_cmd(priv);
b481de9c
ZY
7643
7644 /* FIXME - we need to add code here to detect a totally new
7645 * configuration, reset the AP, unassoc, rxon timing, assoc,
7646 * clear sta table, add BCAST sta... */
7647}
7648
bb8c093b 7649static int iwl4965_mac_config_interface(struct ieee80211_hw *hw, int if_id,
b481de9c
ZY
7650 struct ieee80211_if_conf *conf)
7651{
bb8c093b 7652 struct iwl4965_priv *priv = hw->priv;
0795af57 7653 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7654 unsigned long flags;
7655 int rc;
7656
7657 if (conf == NULL)
7658 return -EIO;
7659
7660 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
7661 (!conf->beacon || !conf->ssid_len)) {
7662 IWL_DEBUG_MAC80211
7663 ("Leaving in AP mode because HostAPD is not ready.\n");
7664 return 0;
7665 }
7666
7667 mutex_lock(&priv->mutex);
7668
7669 IWL_DEBUG_MAC80211("enter: interface id %d\n", if_id);
7670 if (conf->bssid)
0795af57
JP
7671 IWL_DEBUG_MAC80211("bssid: %s\n",
7672 print_mac(mac, conf->bssid));
b481de9c 7673
4150c572
JB
7674/*
7675 * very dubious code was here; the probe filtering flag is never set:
7676 *
b481de9c
ZY
7677 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
7678 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
4150c572
JB
7679 */
7680 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
b481de9c
ZY
7681 IWL_DEBUG_MAC80211("leave - scanning\n");
7682 mutex_unlock(&priv->mutex);
7683 return 0;
7684 }
7685
7686 if (priv->interface_id != if_id) {
7687 IWL_DEBUG_MAC80211("leave - interface_id != if_id\n");
7688 mutex_unlock(&priv->mutex);
7689 return 0;
7690 }
7691
7692 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7693 if (!conf->bssid) {
7694 conf->bssid = priv->mac_addr;
7695 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
0795af57
JP
7696 IWL_DEBUG_MAC80211("bssid was set to: %s\n",
7697 print_mac(mac, conf->bssid));
b481de9c
ZY
7698 }
7699 if (priv->ibss_beacon)
7700 dev_kfree_skb(priv->ibss_beacon);
7701
7702 priv->ibss_beacon = conf->beacon;
7703 }
7704
7705 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
7706 !is_multicast_ether_addr(conf->bssid)) {
7707 /* If there is currently a HW scan going on in the background
7708 * then we need to cancel it else the RXON below will fail. */
bb8c093b 7709 if (iwl4965_scan_cancel_timeout(priv, 100)) {
b481de9c
ZY
7710 IWL_WARNING("Aborted scan still in progress "
7711 "after 100ms\n");
7712 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
7713 mutex_unlock(&priv->mutex);
7714 return -EAGAIN;
7715 }
7716 memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
7717
7718 /* TODO: Audit driver for usage of these members and see
7719 * if mac80211 deprecates them (priv->bssid looks like it
7720 * shouldn't be there, but I haven't scanned the IBSS code
7721 * to verify) - jpk */
7722 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
7723
7724 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b 7725 iwl4965_config_ap(priv);
b481de9c 7726 else {
bb8c093b 7727 rc = iwl4965_commit_rxon(priv);
b481de9c 7728 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
bb8c093b 7729 iwl4965_rxon_add_station(
b481de9c
ZY
7730 priv, priv->active_rxon.bssid_addr, 1);
7731 }
7732
7733 } else {
bb8c093b 7734 iwl4965_scan_cancel_timeout(priv, 100);
b481de9c 7735 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7736 iwl4965_commit_rxon(priv);
b481de9c
ZY
7737 }
7738
7739 spin_lock_irqsave(&priv->lock, flags);
7740 if (!conf->ssid_len)
7741 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7742 else
7743 memcpy(priv->essid, conf->ssid, conf->ssid_len);
7744
7745 priv->essid_len = conf->ssid_len;
7746 spin_unlock_irqrestore(&priv->lock, flags);
7747
7748 IWL_DEBUG_MAC80211("leave\n");
7749 mutex_unlock(&priv->mutex);
7750
7751 return 0;
7752}
7753
bb8c093b 7754static void iwl4965_configure_filter(struct ieee80211_hw *hw,
4150c572
JB
7755 unsigned int changed_flags,
7756 unsigned int *total_flags,
7757 int mc_count, struct dev_addr_list *mc_list)
7758{
7759 /*
7760 * XXX: dummy
bb8c093b 7761 * see also iwl4965_connection_init_rx_config
4150c572
JB
7762 */
7763 *total_flags = 0;
7764}
7765
bb8c093b 7766static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
b481de9c
ZY
7767 struct ieee80211_if_init_conf *conf)
7768{
bb8c093b 7769 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7770
7771 IWL_DEBUG_MAC80211("enter\n");
7772
7773 mutex_lock(&priv->mutex);
948c171c 7774
bb8c093b 7775 iwl4965_scan_cancel_timeout(priv, 100);
948c171c
MA
7776 cancel_delayed_work(&priv->post_associate);
7777 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 7778 iwl4965_commit_rxon(priv);
948c171c 7779
b481de9c
ZY
7780 if (priv->interface_id == conf->if_id) {
7781 priv->interface_id = 0;
7782 memset(priv->bssid, 0, ETH_ALEN);
7783 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7784 priv->essid_len = 0;
7785 }
7786 mutex_unlock(&priv->mutex);
7787
7788 IWL_DEBUG_MAC80211("leave\n");
7789
7790}
bb8c093b 7791static void iwl4965_mac_erp_ie_changed(struct ieee80211_hw *hw,
220173b0
TW
7792 u8 changes, int cts_protection, int preamble)
7793{
bb8c093b 7794 struct iwl4965_priv *priv = hw->priv;
220173b0
TW
7795
7796 if (changes & IEEE80211_ERP_CHANGE_PREAMBLE) {
7797 if (preamble == WLAN_ERP_PREAMBLE_SHORT)
7798 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7799 else
7800 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
7801 }
7802
7803 if (changes & IEEE80211_ERP_CHANGE_PROTECTION) {
797a54c6 7804 if (cts_protection && (priv->phymode != MODE_IEEE80211A))
220173b0
TW
7805 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
7806 else
7807 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
7808 }
7809
bb8c093b
CH
7810 if (iwl4965_is_associated(priv))
7811 iwl4965_send_rxon_assoc(priv);
220173b0 7812}
b481de9c
ZY
7813
7814#define IWL_DELAY_NEXT_SCAN (HZ*2)
bb8c093b 7815static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
b481de9c
ZY
7816{
7817 int rc = 0;
7818 unsigned long flags;
bb8c093b 7819 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
7820
7821 IWL_DEBUG_MAC80211("enter\n");
7822
052c4b9f 7823 mutex_lock(&priv->mutex);
b481de9c
ZY
7824 spin_lock_irqsave(&priv->lock, flags);
7825
bb8c093b 7826 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7827 rc = -EIO;
7828 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
7829 goto out_unlock;
7830 }
7831
7832 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */
7833 rc = -EIO;
7834 IWL_ERROR("ERROR: APs don't scan\n");
7835 goto out_unlock;
7836 }
7837
7838 /* if we just finished scan ask for delay */
7839 if (priv->last_scan_jiffies &&
7840 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
7841 jiffies)) {
7842 rc = -EAGAIN;
7843 goto out_unlock;
7844 }
7845 if (len) {
7846 IWL_DEBUG_SCAN("direct scan for "
7847 "%s [%d]\n ",
bb8c093b 7848 iwl4965_escape_essid(ssid, len), (int)len);
b481de9c
ZY
7849
7850 priv->one_direct_scan = 1;
7851 priv->direct_ssid_len = (u8)
7852 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
7853 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
948c171c
MA
7854 } else
7855 priv->one_direct_scan = 0;
b481de9c 7856
bb8c093b 7857 rc = iwl4965_scan_initiate(priv);
b481de9c
ZY
7858
7859 IWL_DEBUG_MAC80211("leave\n");
7860
7861out_unlock:
7862 spin_unlock_irqrestore(&priv->lock, flags);
052c4b9f 7863 mutex_unlock(&priv->mutex);
b481de9c
ZY
7864
7865 return rc;
7866}
7867
bb8c093b 7868static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
b481de9c
ZY
7869 const u8 *local_addr, const u8 *addr,
7870 struct ieee80211_key_conf *key)
7871{
bb8c093b 7872 struct iwl4965_priv *priv = hw->priv;
0795af57 7873 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7874 int rc = 0;
7875 u8 sta_id;
7876
7877 IWL_DEBUG_MAC80211("enter\n");
7878
bb8c093b 7879 if (!iwl4965_param_hwcrypto) {
b481de9c
ZY
7880 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7881 return -EOPNOTSUPP;
7882 }
7883
7884 if (is_zero_ether_addr(addr))
7885 /* only support pairwise keys */
7886 return -EOPNOTSUPP;
7887
bb8c093b 7888 sta_id = iwl4965_hw_find_station(priv, addr);
b481de9c 7889 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
7890 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
7891 print_mac(mac, addr));
b481de9c
ZY
7892 return -EINVAL;
7893 }
7894
7895 mutex_lock(&priv->mutex);
7896
bb8c093b 7897 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 7898
b481de9c
ZY
7899 switch (cmd) {
7900 case SET_KEY:
bb8c093b 7901 rc = iwl4965_update_sta_key_info(priv, key, sta_id);
b481de9c 7902 if (!rc) {
bb8c093b
CH
7903 iwl4965_set_rxon_hwcrypto(priv, 1);
7904 iwl4965_commit_rxon(priv);
b481de9c
ZY
7905 key->hw_key_idx = sta_id;
7906 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
7907 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7908 }
7909 break;
7910 case DISABLE_KEY:
bb8c093b 7911 rc = iwl4965_clear_sta_key_info(priv, sta_id);
b481de9c 7912 if (!rc) {
bb8c093b
CH
7913 iwl4965_set_rxon_hwcrypto(priv, 0);
7914 iwl4965_commit_rxon(priv);
b481de9c
ZY
7915 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
7916 }
7917 break;
7918 default:
7919 rc = -EINVAL;
7920 }
7921
7922 IWL_DEBUG_MAC80211("leave\n");
7923 mutex_unlock(&priv->mutex);
7924
7925 return rc;
7926}
7927
bb8c093b 7928static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
b481de9c
ZY
7929 const struct ieee80211_tx_queue_params *params)
7930{
bb8c093b 7931 struct iwl4965_priv *priv = hw->priv;
c8b0e6e1 7932#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
7933 unsigned long flags;
7934 int q;
0054b34d 7935#endif /* CONFIG_IWL4965_QOS */
b481de9c
ZY
7936
7937 IWL_DEBUG_MAC80211("enter\n");
7938
bb8c093b 7939 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7940 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7941 return -EIO;
7942 }
7943
7944 if (queue >= AC_NUM) {
7945 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
7946 return 0;
7947 }
7948
c8b0e6e1 7949#ifdef CONFIG_IWL4965_QOS
b481de9c
ZY
7950 if (!priv->qos_data.qos_enable) {
7951 priv->qos_data.qos_active = 0;
7952 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
7953 return 0;
7954 }
7955 q = AC_NUM - 1 - queue;
7956
7957 spin_lock_irqsave(&priv->lock, flags);
7958
7959 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
7960 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
7961 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
7962 priv->qos_data.def_qos_parm.ac[q].edca_txop =
7963 cpu_to_le16((params->burst_time * 100));
7964
7965 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
7966 priv->qos_data.qos_active = 1;
7967
7968 spin_unlock_irqrestore(&priv->lock, flags);
7969
7970 mutex_lock(&priv->mutex);
7971 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
bb8c093b
CH
7972 iwl4965_activate_qos(priv, 1);
7973 else if (priv->assoc_id && iwl4965_is_associated(priv))
7974 iwl4965_activate_qos(priv, 0);
b481de9c
ZY
7975
7976 mutex_unlock(&priv->mutex);
7977
c8b0e6e1 7978#endif /*CONFIG_IWL4965_QOS */
b481de9c
ZY
7979
7980 IWL_DEBUG_MAC80211("leave\n");
7981 return 0;
7982}
7983
bb8c093b 7984static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
b481de9c
ZY
7985 struct ieee80211_tx_queue_stats *stats)
7986{
bb8c093b 7987 struct iwl4965_priv *priv = hw->priv;
b481de9c 7988 int i, avail;
bb8c093b
CH
7989 struct iwl4965_tx_queue *txq;
7990 struct iwl4965_queue *q;
b481de9c
ZY
7991 unsigned long flags;
7992
7993 IWL_DEBUG_MAC80211("enter\n");
7994
bb8c093b 7995 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
7996 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7997 return -EIO;
7998 }
7999
8000 spin_lock_irqsave(&priv->lock, flags);
8001
8002 for (i = 0; i < AC_NUM; i++) {
8003 txq = &priv->txq[i];
8004 q = &txq->q;
bb8c093b 8005 avail = iwl4965_queue_space(q);
b481de9c
ZY
8006
8007 stats->data[i].len = q->n_window - avail;
8008 stats->data[i].limit = q->n_window - q->high_mark;
8009 stats->data[i].count = q->n_window;
8010
8011 }
8012 spin_unlock_irqrestore(&priv->lock, flags);
8013
8014 IWL_DEBUG_MAC80211("leave\n");
8015
8016 return 0;
8017}
8018
bb8c093b 8019static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
b481de9c
ZY
8020 struct ieee80211_low_level_stats *stats)
8021{
8022 IWL_DEBUG_MAC80211("enter\n");
8023 IWL_DEBUG_MAC80211("leave\n");
8024
8025 return 0;
8026}
8027
bb8c093b 8028static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw)
b481de9c
ZY
8029{
8030 IWL_DEBUG_MAC80211("enter\n");
8031 IWL_DEBUG_MAC80211("leave\n");
8032
8033 return 0;
8034}
8035
bb8c093b 8036static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
b481de9c 8037{
bb8c093b 8038 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8039 unsigned long flags;
8040
8041 mutex_lock(&priv->mutex);
8042 IWL_DEBUG_MAC80211("enter\n");
8043
8044 priv->lq_mngr.lq_ready = 0;
c8b0e6e1 8045#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
8046 spin_lock_irqsave(&priv->lock, flags);
8047 memset(&priv->current_assoc_ht, 0, sizeof(struct sta_ht_info));
8048 spin_unlock_irqrestore(&priv->lock, flags);
c8b0e6e1 8049#ifdef CONFIG_IWL4965_HT_AGG
b481de9c
ZY
8050/* if (priv->lq_mngr.agg_ctrl.granted_ba)
8051 iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);*/
8052
bb8c093b 8053 memset(&(priv->lq_mngr.agg_ctrl), 0, sizeof(struct iwl4965_agg_control));
b481de9c
ZY
8054 priv->lq_mngr.agg_ctrl.tid_traffic_load_threshold = 10;
8055 priv->lq_mngr.agg_ctrl.ba_timeout = 5000;
8056 priv->lq_mngr.agg_ctrl.auto_agg = 1;
8057
8058 if (priv->lq_mngr.agg_ctrl.auto_agg)
8059 priv->lq_mngr.agg_ctrl.requested_ba = TID_ALL_ENABLED;
c8b0e6e1
CH
8060#endif /*CONFIG_IWL4965_HT_AGG */
8061#endif /* CONFIG_IWL4965_HT */
b481de9c 8062
c8b0e6e1 8063#ifdef CONFIG_IWL4965_QOS
bb8c093b 8064 iwl4965_reset_qos(priv);
b481de9c
ZY
8065#endif
8066
8067 cancel_delayed_work(&priv->post_associate);
8068
8069 spin_lock_irqsave(&priv->lock, flags);
8070 priv->assoc_id = 0;
8071 priv->assoc_capability = 0;
8072 priv->call_post_assoc_from_beacon = 0;
8073 priv->assoc_station_added = 0;
8074
8075 /* new association get rid of ibss beacon skb */
8076 if (priv->ibss_beacon)
8077 dev_kfree_skb(priv->ibss_beacon);
8078
8079 priv->ibss_beacon = NULL;
8080
8081 priv->beacon_int = priv->hw->conf.beacon_int;
8082 priv->timestamp1 = 0;
8083 priv->timestamp0 = 0;
8084 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
8085 priv->beacon_int = 0;
8086
8087 spin_unlock_irqrestore(&priv->lock, flags);
8088
052c4b9f 8089 /* we are restarting association process
8090 * clear RXON_FILTER_ASSOC_MSK bit
8091 */
8092 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
bb8c093b 8093 iwl4965_scan_cancel_timeout(priv, 100);
052c4b9f 8094 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 8095 iwl4965_commit_rxon(priv);
052c4b9f 8096 }
8097
b481de9c
ZY
8098 /* Per mac80211.h: This is only used in IBSS mode... */
8099 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
052c4b9f 8100
b481de9c
ZY
8101 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
8102 mutex_unlock(&priv->mutex);
8103 return;
8104 }
8105
bb8c093b 8106 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8107 IWL_DEBUG_MAC80211("leave - not ready\n");
8108 mutex_unlock(&priv->mutex);
8109 return;
8110 }
8111
8112 priv->only_active_channel = 0;
8113
bb8c093b 8114 iwl4965_set_rate(priv);
b481de9c
ZY
8115
8116 mutex_unlock(&priv->mutex);
8117
8118 IWL_DEBUG_MAC80211("leave\n");
8119
8120}
8121
bb8c093b 8122static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
b481de9c
ZY
8123 struct ieee80211_tx_control *control)
8124{
bb8c093b 8125 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8126 unsigned long flags;
8127
8128 mutex_lock(&priv->mutex);
8129 IWL_DEBUG_MAC80211("enter\n");
8130
bb8c093b 8131 if (!iwl4965_is_ready_rf(priv)) {
b481de9c
ZY
8132 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8133 mutex_unlock(&priv->mutex);
8134 return -EIO;
8135 }
8136
8137 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
8138 IWL_DEBUG_MAC80211("leave - not IBSS\n");
8139 mutex_unlock(&priv->mutex);
8140 return -EIO;
8141 }
8142
8143 spin_lock_irqsave(&priv->lock, flags);
8144
8145 if (priv->ibss_beacon)
8146 dev_kfree_skb(priv->ibss_beacon);
8147
8148 priv->ibss_beacon = skb;
8149
8150 priv->assoc_id = 0;
8151
8152 IWL_DEBUG_MAC80211("leave\n");
8153 spin_unlock_irqrestore(&priv->lock, flags);
8154
c8b0e6e1 8155#ifdef CONFIG_IWL4965_QOS
bb8c093b 8156 iwl4965_reset_qos(priv);
b481de9c
ZY
8157#endif
8158
8159 queue_work(priv->workqueue, &priv->post_associate.work);
8160
8161 mutex_unlock(&priv->mutex);
8162
8163 return 0;
8164}
8165
c8b0e6e1 8166#ifdef CONFIG_IWL4965_HT
b481de9c
ZY
8167union ht_cap_info {
8168 struct {
8169 u16 advanced_coding_cap :1;
8170 u16 supported_chan_width_set :1;
8171 u16 mimo_power_save_mode :2;
8172 u16 green_field :1;
8173 u16 short_GI20 :1;
8174 u16 short_GI40 :1;
8175 u16 tx_stbc :1;
8176 u16 rx_stbc :1;
8177 u16 beam_forming :1;
8178 u16 delayed_ba :1;
8179 u16 maximal_amsdu_size :1;
8180 u16 cck_mode_at_40MHz :1;
8181 u16 psmp_support :1;
8182 u16 stbc_ctrl_frame_support :1;
8183 u16 sig_txop_protection_support :1;
8184 };
8185 u16 val;
8186} __attribute__ ((packed));
8187
8188union ht_param_info{
8189 struct {
8190 u8 max_rx_ampdu_factor :2;
8191 u8 mpdu_density :3;
8192 u8 reserved :3;
8193 };
8194 u8 val;
8195} __attribute__ ((packed));
8196
8197union ht_exra_param_info {
8198 struct {
8199 u8 ext_chan_offset :2;
8200 u8 tx_chan_width :1;
8201 u8 rifs_mode :1;
8202 u8 controlled_access_only :1;
8203 u8 service_interval_granularity :3;
8204 };
8205 u8 val;
8206} __attribute__ ((packed));
8207
8208union ht_operation_mode{
8209 struct {
8210 u16 op_mode :2;
8211 u16 non_GF :1;
8212 u16 reserved :13;
8213 };
8214 u16 val;
8215} __attribute__ ((packed));
8216
8217
8218static int sta_ht_info_init(struct ieee80211_ht_capability *ht_cap,
8219 struct ieee80211_ht_additional_info *ht_extra,
8220 struct sta_ht_info *ht_info_ap,
8221 struct sta_ht_info *ht_info)
8222{
8223 union ht_cap_info cap;
8224 union ht_operation_mode op_mode;
8225 union ht_param_info param_info;
8226 union ht_exra_param_info extra_param_info;
8227
8228 IWL_DEBUG_MAC80211("enter: \n");
8229
8230 if (!ht_info) {
8231 IWL_DEBUG_MAC80211("leave: ht_info is NULL\n");
8232 return -1;
8233 }
8234
8235 if (ht_cap) {
8236 cap.val = (u16) le16_to_cpu(ht_cap->capabilities_info);
8237 param_info.val = ht_cap->mac_ht_params_info;
8238 ht_info->is_ht = 1;
8239 if (cap.short_GI20)
8240 ht_info->sgf |= 0x1;
8241 if (cap.short_GI40)
8242 ht_info->sgf |= 0x2;
8243 ht_info->is_green_field = cap.green_field;
8244 ht_info->max_amsdu_size = cap.maximal_amsdu_size;
8245 ht_info->supported_chan_width = cap.supported_chan_width_set;
8246 ht_info->tx_mimo_ps_mode = cap.mimo_power_save_mode;
8247 memcpy(ht_info->supp_rates, ht_cap->supported_mcs_set, 16);
8248
8249 ht_info->ampdu_factor = param_info.max_rx_ampdu_factor;
8250 ht_info->mpdu_density = param_info.mpdu_density;
8251
8252 IWL_DEBUG_MAC80211("SISO mask 0x%X MIMO mask 0x%X \n",
8253 ht_cap->supported_mcs_set[0],
8254 ht_cap->supported_mcs_set[1]);
8255
8256 if (ht_info_ap) {
8257 ht_info->control_channel = ht_info_ap->control_channel;
8258 ht_info->extension_chan_offset =
8259 ht_info_ap->extension_chan_offset;
8260 ht_info->tx_chan_width = ht_info_ap->tx_chan_width;
8261 ht_info->operating_mode = ht_info_ap->operating_mode;
8262 }
8263
8264 if (ht_extra) {
8265 extra_param_info.val = ht_extra->ht_param;
8266 ht_info->control_channel = ht_extra->control_chan;
8267 ht_info->extension_chan_offset =
8268 extra_param_info.ext_chan_offset;
8269 ht_info->tx_chan_width = extra_param_info.tx_chan_width;
8270 op_mode.val = (u16)
8271 le16_to_cpu(ht_extra->operation_mode);
8272 ht_info->operating_mode = op_mode.op_mode;
8273 IWL_DEBUG_MAC80211("control channel %d\n",
8274 ht_extra->control_chan);
8275 }
8276 } else
8277 ht_info->is_ht = 0;
8278
8279 IWL_DEBUG_MAC80211("leave\n");
8280 return 0;
8281}
8282
bb8c093b 8283static int iwl4965_mac_conf_ht(struct ieee80211_hw *hw,
b481de9c
ZY
8284 struct ieee80211_ht_capability *ht_cap,
8285 struct ieee80211_ht_additional_info *ht_extra)
8286{
bb8c093b 8287 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8288 int rs;
8289
8290 IWL_DEBUG_MAC80211("enter: \n");
8291
8292 rs = sta_ht_info_init(ht_cap, ht_extra, NULL, &priv->current_assoc_ht);
8293 iwl4965_set_rxon_chain(priv);
8294
8295 if (priv && priv->assoc_id &&
8296 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
8297 unsigned long flags;
8298
8299 spin_lock_irqsave(&priv->lock, flags);
8300 if (priv->beacon_int)
8301 queue_work(priv->workqueue, &priv->post_associate.work);
8302 else
8303 priv->call_post_assoc_from_beacon = 1;
8304 spin_unlock_irqrestore(&priv->lock, flags);
8305 }
8306
8307 IWL_DEBUG_MAC80211("leave: control channel %d\n",
8308 ht_extra->control_chan);
8309 return rs;
8310
8311}
8312
bb8c093b 8313static void iwl4965_set_ht_capab(struct ieee80211_hw *hw,
b481de9c
ZY
8314 struct ieee80211_ht_capability *ht_cap,
8315 u8 use_wide_chan)
8316{
8317 union ht_cap_info cap;
8318 union ht_param_info param_info;
8319
8320 memset(&cap, 0, sizeof(union ht_cap_info));
8321 memset(&param_info, 0, sizeof(union ht_param_info));
8322
8323 cap.maximal_amsdu_size = HT_IE_MAX_AMSDU_SIZE_4K;
8324 cap.green_field = 1;
8325 cap.short_GI20 = 1;
8326 cap.short_GI40 = 1;
8327 cap.supported_chan_width_set = use_wide_chan;
8328 cap.mimo_power_save_mode = 0x3;
8329
8330 param_info.max_rx_ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
8331 param_info.mpdu_density = CFG_HT_MPDU_DENSITY_DEF;
8332 ht_cap->capabilities_info = (__le16) cpu_to_le16(cap.val);
8333 ht_cap->mac_ht_params_info = (u8) param_info.val;
8334
8335 ht_cap->supported_mcs_set[0] = 0xff;
8336 ht_cap->supported_mcs_set[1] = 0xff;
8337 ht_cap->supported_mcs_set[4] =
8338 (cap.supported_chan_width_set) ? 0x1: 0x0;
8339}
8340
bb8c093b 8341static void iwl4965_mac_get_ht_capab(struct ieee80211_hw *hw,
b481de9c
ZY
8342 struct ieee80211_ht_capability *ht_cap)
8343{
8344 u8 use_wide_channel = 1;
bb8c093b 8345 struct iwl4965_priv *priv = hw->priv;
b481de9c
ZY
8346
8347 IWL_DEBUG_MAC80211("enter: \n");
8348 if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ)
8349 use_wide_channel = 0;
8350
8351 /* no fat tx allowed on 2.4GHZ */
8352 if (priv->phymode != MODE_IEEE80211A)
8353 use_wide_channel = 0;
8354
bb8c093b 8355 iwl4965_set_ht_capab(hw, ht_cap, use_wide_channel);
b481de9c
ZY
8356 IWL_DEBUG_MAC80211("leave: \n");
8357}
c8b0e6e1 8358#endif /*CONFIG_IWL4965_HT*/
b481de9c
ZY
8359
8360/*****************************************************************************
8361 *
8362 * sysfs attributes
8363 *
8364 *****************************************************************************/
8365
c8b0e6e1 8366#ifdef CONFIG_IWL4965_DEBUG
b481de9c
ZY
8367
8368/*
8369 * The following adds a new attribute to the sysfs representation
8370 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
8371 * used for controlling the debug level.
8372 *
8373 * See the level definitions in iwl for details.
8374 */
8375
8376static ssize_t show_debug_level(struct device_driver *d, char *buf)
8377{
bb8c093b 8378 return sprintf(buf, "0x%08X\n", iwl4965_debug_level);
b481de9c
ZY
8379}
8380static ssize_t store_debug_level(struct device_driver *d,
8381 const char *buf, size_t count)
8382{
8383 char *p = (char *)buf;
8384 u32 val;
8385
8386 val = simple_strtoul(p, &p, 0);
8387 if (p == buf)
8388 printk(KERN_INFO DRV_NAME
8389 ": %s is not in hex or decimal form.\n", buf);
8390 else
bb8c093b 8391 iwl4965_debug_level = val;
b481de9c
ZY
8392
8393 return strnlen(buf, count);
8394}
8395
8396static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
8397 show_debug_level, store_debug_level);
8398
c8b0e6e1 8399#endif /* CONFIG_IWL4965_DEBUG */
b481de9c
ZY
8400
8401static ssize_t show_rf_kill(struct device *d,
8402 struct device_attribute *attr, char *buf)
8403{
8404 /*
8405 * 0 - RF kill not enabled
8406 * 1 - SW based RF kill active (sysfs)
8407 * 2 - HW based RF kill active
8408 * 3 - Both HW and SW based RF kill active
8409 */
bb8c093b 8410 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8411 int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) |
8412 (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0);
8413
8414 return sprintf(buf, "%i\n", val);
8415}
8416
8417static ssize_t store_rf_kill(struct device *d,
8418 struct device_attribute *attr,
8419 const char *buf, size_t count)
8420{
bb8c093b 8421 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8422
8423 mutex_lock(&priv->mutex);
bb8c093b 8424 iwl4965_radio_kill_sw(priv, buf[0] == '1');
b481de9c
ZY
8425 mutex_unlock(&priv->mutex);
8426
8427 return count;
8428}
8429
8430static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
8431
8432static ssize_t show_temperature(struct device *d,
8433 struct device_attribute *attr, char *buf)
8434{
bb8c093b 8435 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c 8436
bb8c093b 8437 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8438 return -EAGAIN;
8439
bb8c093b 8440 return sprintf(buf, "%d\n", iwl4965_hw_get_temperature(priv));
b481de9c
ZY
8441}
8442
8443static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
8444
8445static ssize_t show_rs_window(struct device *d,
8446 struct device_attribute *attr,
8447 char *buf)
8448{
bb8c093b
CH
8449 struct iwl4965_priv *priv = d->driver_data;
8450 return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID);
b481de9c
ZY
8451}
8452static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
8453
8454static ssize_t show_tx_power(struct device *d,
8455 struct device_attribute *attr, char *buf)
8456{
bb8c093b 8457 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8458 return sprintf(buf, "%d\n", priv->user_txpower_limit);
8459}
8460
8461static ssize_t store_tx_power(struct device *d,
8462 struct device_attribute *attr,
8463 const char *buf, size_t count)
8464{
bb8c093b 8465 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8466 char *p = (char *)buf;
8467 u32 val;
8468
8469 val = simple_strtoul(p, &p, 10);
8470 if (p == buf)
8471 printk(KERN_INFO DRV_NAME
8472 ": %s is not in decimal form.\n", buf);
8473 else
bb8c093b 8474 iwl4965_hw_reg_set_txpower(priv, val);
b481de9c
ZY
8475
8476 return count;
8477}
8478
8479static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
8480
8481static ssize_t show_flags(struct device *d,
8482 struct device_attribute *attr, char *buf)
8483{
bb8c093b 8484 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8485
8486 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
8487}
8488
8489static ssize_t store_flags(struct device *d,
8490 struct device_attribute *attr,
8491 const char *buf, size_t count)
8492{
bb8c093b 8493 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8494 u32 flags = simple_strtoul(buf, NULL, 0);
8495
8496 mutex_lock(&priv->mutex);
8497 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
8498 /* Cancel any currently running scans... */
bb8c093b 8499 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8500 IWL_WARNING("Could not cancel scan.\n");
8501 else {
8502 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
8503 flags);
8504 priv->staging_rxon.flags = cpu_to_le32(flags);
bb8c093b 8505 iwl4965_commit_rxon(priv);
b481de9c
ZY
8506 }
8507 }
8508 mutex_unlock(&priv->mutex);
8509
8510 return count;
8511}
8512
8513static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
8514
8515static ssize_t show_filter_flags(struct device *d,
8516 struct device_attribute *attr, char *buf)
8517{
bb8c093b 8518 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8519
8520 return sprintf(buf, "0x%04X\n",
8521 le32_to_cpu(priv->active_rxon.filter_flags));
8522}
8523
8524static ssize_t store_filter_flags(struct device *d,
8525 struct device_attribute *attr,
8526 const char *buf, size_t count)
8527{
bb8c093b 8528 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8529 u32 filter_flags = simple_strtoul(buf, NULL, 0);
8530
8531 mutex_lock(&priv->mutex);
8532 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
8533 /* Cancel any currently running scans... */
bb8c093b 8534 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8535 IWL_WARNING("Could not cancel scan.\n");
8536 else {
8537 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
8538 "0x%04X\n", filter_flags);
8539 priv->staging_rxon.filter_flags =
8540 cpu_to_le32(filter_flags);
bb8c093b 8541 iwl4965_commit_rxon(priv);
b481de9c
ZY
8542 }
8543 }
8544 mutex_unlock(&priv->mutex);
8545
8546 return count;
8547}
8548
8549static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
8550 store_filter_flags);
8551
8552static ssize_t show_tune(struct device *d,
8553 struct device_attribute *attr, char *buf)
8554{
bb8c093b 8555 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8556
8557 return sprintf(buf, "0x%04X\n",
8558 (priv->phymode << 8) |
8559 le16_to_cpu(priv->active_rxon.channel));
8560}
8561
bb8c093b 8562static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode);
b481de9c
ZY
8563
8564static ssize_t store_tune(struct device *d,
8565 struct device_attribute *attr,
8566 const char *buf, size_t count)
8567{
bb8c093b 8568 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
b481de9c
ZY
8569 char *p = (char *)buf;
8570 u16 tune = simple_strtoul(p, &p, 0);
8571 u8 phymode = (tune >> 8) & 0xff;
8572 u16 channel = tune & 0xff;
8573
8574 IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel);
8575
8576 mutex_lock(&priv->mutex);
8577 if ((le16_to_cpu(priv->staging_rxon.channel) != channel) ||
8578 (priv->phymode != phymode)) {
bb8c093b 8579 const struct iwl4965_channel_info *ch_info;
b481de9c 8580
bb8c093b 8581 ch_info = iwl4965_get_channel_info(priv, phymode, channel);
b481de9c
ZY
8582 if (!ch_info) {
8583 IWL_WARNING("Requested invalid phymode/channel "
8584 "combination: %d %d\n", phymode, channel);
8585 mutex_unlock(&priv->mutex);
8586 return -EINVAL;
8587 }
8588
8589 /* Cancel any currently running scans... */
bb8c093b 8590 if (iwl4965_scan_cancel_timeout(priv, 100))
b481de9c
ZY
8591 IWL_WARNING("Could not cancel scan.\n");
8592 else {
8593 IWL_DEBUG_INFO("Committing phymode and "
8594 "rxon.channel = %d %d\n",
8595 phymode, channel);
8596
bb8c093b
CH
8597 iwl4965_set_rxon_channel(priv, phymode, channel);
8598 iwl4965_set_flags_for_phymode(priv, phymode);
b481de9c 8599
bb8c093b
CH
8600 iwl4965_set_rate(priv);
8601 iwl4965_commit_rxon(priv);
b481de9c
ZY
8602 }
8603 }
8604 mutex_unlock(&priv->mutex);
8605
8606 return count;
8607}
8608
8609static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune);
8610
c8b0e6e1 8611#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
8612
8613static ssize_t show_measurement(struct device *d,
8614 struct device_attribute *attr, char *buf)
8615{
bb8c093b
CH
8616 struct iwl4965_priv *priv = dev_get_drvdata(d);
8617 struct iwl4965_spectrum_notification measure_report;
b481de9c
ZY
8618 u32 size = sizeof(measure_report), len = 0, ofs = 0;
8619 u8 *data = (u8 *) & measure_report;
8620 unsigned long flags;
8621
8622 spin_lock_irqsave(&priv->lock, flags);
8623 if (!(priv->measurement_status & MEASUREMENT_READY)) {
8624 spin_unlock_irqrestore(&priv->lock, flags);
8625 return 0;
8626 }
8627 memcpy(&measure_report, &priv->measure_report, size);
8628 priv->measurement_status = 0;
8629 spin_unlock_irqrestore(&priv->lock, flags);
8630
8631 while (size && (PAGE_SIZE - len)) {
8632 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8633 PAGE_SIZE - len, 1);
8634 len = strlen(buf);
8635 if (PAGE_SIZE - len)
8636 buf[len++] = '\n';
8637
8638 ofs += 16;
8639 size -= min(size, 16U);
8640 }
8641
8642 return len;
8643}
8644
8645static ssize_t store_measurement(struct device *d,
8646 struct device_attribute *attr,
8647 const char *buf, size_t count)
8648{
bb8c093b 8649 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8650 struct ieee80211_measurement_params params = {
8651 .channel = le16_to_cpu(priv->active_rxon.channel),
8652 .start_time = cpu_to_le64(priv->last_tsf),
8653 .duration = cpu_to_le16(1),
8654 };
8655 u8 type = IWL_MEASURE_BASIC;
8656 u8 buffer[32];
8657 u8 channel;
8658
8659 if (count) {
8660 char *p = buffer;
8661 strncpy(buffer, buf, min(sizeof(buffer), count));
8662 channel = simple_strtoul(p, NULL, 0);
8663 if (channel)
8664 params.channel = channel;
8665
8666 p = buffer;
8667 while (*p && *p != ' ')
8668 p++;
8669 if (*p)
8670 type = simple_strtoul(p + 1, NULL, 0);
8671 }
8672
8673 IWL_DEBUG_INFO("Invoking measurement of type %d on "
8674 "channel %d (for '%s')\n", type, params.channel, buf);
bb8c093b 8675 iwl4965_get_measurement(priv, &params, type);
b481de9c
ZY
8676
8677 return count;
8678}
8679
8680static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
8681 show_measurement, store_measurement);
c8b0e6e1 8682#endif /* CONFIG_IWL4965_SPECTRUM_MEASUREMENT */
b481de9c
ZY
8683
8684static ssize_t store_retry_rate(struct device *d,
8685 struct device_attribute *attr,
8686 const char *buf, size_t count)
8687{
bb8c093b 8688 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8689
8690 priv->retry_rate = simple_strtoul(buf, NULL, 0);
8691 if (priv->retry_rate <= 0)
8692 priv->retry_rate = 1;
8693
8694 return count;
8695}
8696
8697static ssize_t show_retry_rate(struct device *d,
8698 struct device_attribute *attr, char *buf)
8699{
bb8c093b 8700 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8701 return sprintf(buf, "%d", priv->retry_rate);
8702}
8703
8704static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
8705 store_retry_rate);
8706
8707static ssize_t store_power_level(struct device *d,
8708 struct device_attribute *attr,
8709 const char *buf, size_t count)
8710{
bb8c093b 8711 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8712 int rc;
8713 int mode;
8714
8715 mode = simple_strtoul(buf, NULL, 0);
8716 mutex_lock(&priv->mutex);
8717
bb8c093b 8718 if (!iwl4965_is_ready(priv)) {
b481de9c
ZY
8719 rc = -EAGAIN;
8720 goto out;
8721 }
8722
8723 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC))
8724 mode = IWL_POWER_AC;
8725 else
8726 mode |= IWL_POWER_ENABLED;
8727
8728 if (mode != priv->power_mode) {
bb8c093b 8729 rc = iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(mode));
b481de9c
ZY
8730 if (rc) {
8731 IWL_DEBUG_MAC80211("failed setting power mode.\n");
8732 goto out;
8733 }
8734 priv->power_mode = mode;
8735 }
8736
8737 rc = count;
8738
8739 out:
8740 mutex_unlock(&priv->mutex);
8741 return rc;
8742}
8743
8744#define MAX_WX_STRING 80
8745
8746/* Values are in microsecond */
8747static const s32 timeout_duration[] = {
8748 350000,
8749 250000,
8750 75000,
8751 37000,
8752 25000,
8753};
8754static const s32 period_duration[] = {
8755 400000,
8756 700000,
8757 1000000,
8758 1000000,
8759 1000000
8760};
8761
8762static ssize_t show_power_level(struct device *d,
8763 struct device_attribute *attr, char *buf)
8764{
bb8c093b 8765 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8766 int level = IWL_POWER_LEVEL(priv->power_mode);
8767 char *p = buf;
8768
8769 p += sprintf(p, "%d ", level);
8770 switch (level) {
8771 case IWL_POWER_MODE_CAM:
8772 case IWL_POWER_AC:
8773 p += sprintf(p, "(AC)");
8774 break;
8775 case IWL_POWER_BATTERY:
8776 p += sprintf(p, "(BATTERY)");
8777 break;
8778 default:
8779 p += sprintf(p,
8780 "(Timeout %dms, Period %dms)",
8781 timeout_duration[level - 1] / 1000,
8782 period_duration[level - 1] / 1000);
8783 }
8784
8785 if (!(priv->power_mode & IWL_POWER_ENABLED))
8786 p += sprintf(p, " OFF\n");
8787 else
8788 p += sprintf(p, " \n");
8789
8790 return (p - buf + 1);
8791
8792}
8793
8794static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
8795 store_power_level);
8796
8797static ssize_t show_channels(struct device *d,
8798 struct device_attribute *attr, char *buf)
8799{
bb8c093b 8800 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8801 int len = 0, i;
8802 struct ieee80211_channel *channels = NULL;
8803 const struct ieee80211_hw_mode *hw_mode = NULL;
8804 int count = 0;
8805
bb8c093b 8806 if (!iwl4965_is_ready(priv))
b481de9c
ZY
8807 return -EAGAIN;
8808
bb8c093b 8809 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211G);
b481de9c 8810 if (!hw_mode)
bb8c093b 8811 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211B);
b481de9c
ZY
8812 if (hw_mode) {
8813 channels = hw_mode->channels;
8814 count = hw_mode->num_channels;
8815 }
8816
8817 len +=
8818 sprintf(&buf[len],
8819 "Displaying %d channels in 2.4GHz band "
8820 "(802.11bg):\n", count);
8821
8822 for (i = 0; i < count; i++)
8823 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8824 channels[i].chan,
8825 channels[i].power_level,
8826 channels[i].
8827 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8828 " (IEEE 802.11h required)" : "",
8829 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8830 || (channels[i].
8831 flag &
8832 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8833 ", IBSS",
8834 channels[i].
8835 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8836 "active/passive" : "passive only");
8837
bb8c093b 8838 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211A);
b481de9c
ZY
8839 if (hw_mode) {
8840 channels = hw_mode->channels;
8841 count = hw_mode->num_channels;
8842 } else {
8843 channels = NULL;
8844 count = 0;
8845 }
8846
8847 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
8848 "(802.11a):\n", count);
8849
8850 for (i = 0; i < count; i++)
8851 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8852 channels[i].chan,
8853 channels[i].power_level,
8854 channels[i].
8855 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8856 " (IEEE 802.11h required)" : "",
8857 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8858 || (channels[i].
8859 flag &
8860 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8861 ", IBSS",
8862 channels[i].
8863 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8864 "active/passive" : "passive only");
8865
8866 return len;
8867}
8868
8869static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
8870
8871static ssize_t show_statistics(struct device *d,
8872 struct device_attribute *attr, char *buf)
8873{
bb8c093b
CH
8874 struct iwl4965_priv *priv = dev_get_drvdata(d);
8875 u32 size = sizeof(struct iwl4965_notif_statistics);
b481de9c
ZY
8876 u32 len = 0, ofs = 0;
8877 u8 *data = (u8 *) & priv->statistics;
8878 int rc = 0;
8879
bb8c093b 8880 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8881 return -EAGAIN;
8882
8883 mutex_lock(&priv->mutex);
bb8c093b 8884 rc = iwl4965_send_statistics_request(priv);
b481de9c
ZY
8885 mutex_unlock(&priv->mutex);
8886
8887 if (rc) {
8888 len = sprintf(buf,
8889 "Error sending statistics request: 0x%08X\n", rc);
8890 return len;
8891 }
8892
8893 while (size && (PAGE_SIZE - len)) {
8894 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8895 PAGE_SIZE - len, 1);
8896 len = strlen(buf);
8897 if (PAGE_SIZE - len)
8898 buf[len++] = '\n';
8899
8900 ofs += 16;
8901 size -= min(size, 16U);
8902 }
8903
8904 return len;
8905}
8906
8907static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
8908
8909static ssize_t show_antenna(struct device *d,
8910 struct device_attribute *attr, char *buf)
8911{
bb8c093b 8912 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c 8913
bb8c093b 8914 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8915 return -EAGAIN;
8916
8917 return sprintf(buf, "%d\n", priv->antenna);
8918}
8919
8920static ssize_t store_antenna(struct device *d,
8921 struct device_attribute *attr,
8922 const char *buf, size_t count)
8923{
8924 int ant;
bb8c093b 8925 struct iwl4965_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
8926
8927 if (count == 0)
8928 return 0;
8929
8930 if (sscanf(buf, "%1i", &ant) != 1) {
8931 IWL_DEBUG_INFO("not in hex or decimal form.\n");
8932 return count;
8933 }
8934
8935 if ((ant >= 0) && (ant <= 2)) {
8936 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
bb8c093b 8937 priv->antenna = (enum iwl4965_antenna)ant;
b481de9c
ZY
8938 } else
8939 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
8940
8941
8942 return count;
8943}
8944
8945static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
8946
8947static ssize_t show_status(struct device *d,
8948 struct device_attribute *attr, char *buf)
8949{
bb8c093b
CH
8950 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
8951 if (!iwl4965_is_alive(priv))
b481de9c
ZY
8952 return -EAGAIN;
8953 return sprintf(buf, "0x%08x\n", (int)priv->status);
8954}
8955
8956static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
8957
8958static ssize_t dump_error_log(struct device *d,
8959 struct device_attribute *attr,
8960 const char *buf, size_t count)
8961{
8962 char *p = (char *)buf;
8963
8964 if (p[0] == '1')
bb8c093b 8965 iwl4965_dump_nic_error_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
8966
8967 return strnlen(buf, count);
8968}
8969
8970static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
8971
8972static ssize_t dump_event_log(struct device *d,
8973 struct device_attribute *attr,
8974 const char *buf, size_t count)
8975{
8976 char *p = (char *)buf;
8977
8978 if (p[0] == '1')
bb8c093b 8979 iwl4965_dump_nic_event_log((struct iwl4965_priv *)d->driver_data);
b481de9c
ZY
8980
8981 return strnlen(buf, count);
8982}
8983
8984static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
8985
8986/*****************************************************************************
8987 *
8988 * driver setup and teardown
8989 *
8990 *****************************************************************************/
8991
bb8c093b 8992static void iwl4965_setup_deferred_work(struct iwl4965_priv *priv)
b481de9c
ZY
8993{
8994 priv->workqueue = create_workqueue(DRV_NAME);
8995
8996 init_waitqueue_head(&priv->wait_command_queue);
8997
bb8c093b
CH
8998 INIT_WORK(&priv->up, iwl4965_bg_up);
8999 INIT_WORK(&priv->restart, iwl4965_bg_restart);
9000 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
9001 INIT_WORK(&priv->scan_completed, iwl4965_bg_scan_completed);
9002 INIT_WORK(&priv->request_scan, iwl4965_bg_request_scan);
9003 INIT_WORK(&priv->abort_scan, iwl4965_bg_abort_scan);
9004 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill);
9005 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update);
9006 INIT_DELAYED_WORK(&priv->post_associate, iwl4965_bg_post_associate);
9007 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
9008 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
9009 INIT_DELAYED_WORK(&priv->scan_check, iwl4965_bg_scan_check);
9010
9011 iwl4965_hw_setup_deferred_work(priv);
b481de9c
ZY
9012
9013 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
bb8c093b 9014 iwl4965_irq_tasklet, (unsigned long)priv);
b481de9c
ZY
9015}
9016
bb8c093b 9017static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv)
b481de9c 9018{
bb8c093b 9019 iwl4965_hw_cancel_deferred_work(priv);
b481de9c 9020
3ae6a054 9021 cancel_delayed_work_sync(&priv->init_alive_start);
b481de9c
ZY
9022 cancel_delayed_work(&priv->scan_check);
9023 cancel_delayed_work(&priv->alive_start);
9024 cancel_delayed_work(&priv->post_associate);
9025 cancel_work_sync(&priv->beacon_update);
9026}
9027
bb8c093b 9028static struct attribute *iwl4965_sysfs_entries[] = {
b481de9c
ZY
9029 &dev_attr_antenna.attr,
9030 &dev_attr_channels.attr,
9031 &dev_attr_dump_errors.attr,
9032 &dev_attr_dump_events.attr,
9033 &dev_attr_flags.attr,
9034 &dev_attr_filter_flags.attr,
c8b0e6e1 9035#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
b481de9c
ZY
9036 &dev_attr_measurement.attr,
9037#endif
9038 &dev_attr_power_level.attr,
9039 &dev_attr_retry_rate.attr,
9040 &dev_attr_rf_kill.attr,
9041 &dev_attr_rs_window.attr,
9042 &dev_attr_statistics.attr,
9043 &dev_attr_status.attr,
9044 &dev_attr_temperature.attr,
9045 &dev_attr_tune.attr,
9046 &dev_attr_tx_power.attr,
9047
9048 NULL
9049};
9050
bb8c093b 9051static struct attribute_group iwl4965_attribute_group = {
b481de9c 9052 .name = NULL, /* put in device directory */
bb8c093b 9053 .attrs = iwl4965_sysfs_entries,
b481de9c
ZY
9054};
9055
bb8c093b
CH
9056static struct ieee80211_ops iwl4965_hw_ops = {
9057 .tx = iwl4965_mac_tx,
9058 .start = iwl4965_mac_start,
9059 .stop = iwl4965_mac_stop,
9060 .add_interface = iwl4965_mac_add_interface,
9061 .remove_interface = iwl4965_mac_remove_interface,
9062 .config = iwl4965_mac_config,
9063 .config_interface = iwl4965_mac_config_interface,
9064 .configure_filter = iwl4965_configure_filter,
9065 .set_key = iwl4965_mac_set_key,
9066 .get_stats = iwl4965_mac_get_stats,
9067 .get_tx_stats = iwl4965_mac_get_tx_stats,
9068 .conf_tx = iwl4965_mac_conf_tx,
9069 .get_tsf = iwl4965_mac_get_tsf,
9070 .reset_tsf = iwl4965_mac_reset_tsf,
9071 .beacon_update = iwl4965_mac_beacon_update,
9072 .erp_ie_changed = iwl4965_mac_erp_ie_changed,
c8b0e6e1 9073#ifdef CONFIG_IWL4965_HT
bb8c093b
CH
9074 .conf_ht = iwl4965_mac_conf_ht,
9075 .get_ht_capab = iwl4965_mac_get_ht_capab,
c8b0e6e1 9076#ifdef CONFIG_IWL4965_HT_AGG
bb8c093b
CH
9077 .ht_tx_agg_start = iwl4965_mac_ht_tx_agg_start,
9078 .ht_tx_agg_stop = iwl4965_mac_ht_tx_agg_stop,
9079 .ht_rx_agg_start = iwl4965_mac_ht_rx_agg_start,
9080 .ht_rx_agg_stop = iwl4965_mac_ht_rx_agg_stop,
c8b0e6e1
CH
9081#endif /* CONFIG_IWL4965_HT_AGG */
9082#endif /* CONFIG_IWL4965_HT */
bb8c093b 9083 .hw_scan = iwl4965_mac_hw_scan
b481de9c
ZY
9084};
9085
bb8c093b 9086static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
b481de9c
ZY
9087{
9088 int err = 0;
bb8c093b 9089 struct iwl4965_priv *priv;
b481de9c
ZY
9090 struct ieee80211_hw *hw;
9091 int i;
9092
6440adb5
CB
9093 /* Disabling hardware scan means that mac80211 will perform scans
9094 * "the hard way", rather than using device's scan. */
bb8c093b 9095 if (iwl4965_param_disable_hw_scan) {
b481de9c 9096 IWL_DEBUG_INFO("Disabling hw_scan\n");
bb8c093b 9097 iwl4965_hw_ops.hw_scan = NULL;
b481de9c
ZY
9098 }
9099
bb8c093b
CH
9100 if ((iwl4965_param_queues_num > IWL_MAX_NUM_QUEUES) ||
9101 (iwl4965_param_queues_num < IWL_MIN_NUM_QUEUES)) {
b481de9c
ZY
9102 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
9103 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
9104 err = -EINVAL;
9105 goto out;
9106 }
9107
9108 /* mac80211 allocates memory for this device instance, including
9109 * space for this driver's private structure */
bb8c093b 9110 hw = ieee80211_alloc_hw(sizeof(struct iwl4965_priv), &iwl4965_hw_ops);
b481de9c
ZY
9111 if (hw == NULL) {
9112 IWL_ERROR("Can not allocate network device\n");
9113 err = -ENOMEM;
9114 goto out;
9115 }
9116 SET_IEEE80211_DEV(hw, &pdev->dev);
9117
f51359a8
JB
9118 hw->rate_control_algorithm = "iwl-4965-rs";
9119
b481de9c
ZY
9120 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
9121 priv = hw->priv;
9122 priv->hw = hw;
9123
9124 priv->pci_dev = pdev;
bb8c093b 9125 priv->antenna = (enum iwl4965_antenna)iwl4965_param_antenna;
c8b0e6e1 9126#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9127 iwl4965_debug_level = iwl4965_param_debug;
b481de9c
ZY
9128 atomic_set(&priv->restrict_refcnt, 0);
9129#endif
9130 priv->retry_rate = 1;
9131
9132 priv->ibss_beacon = NULL;
9133
9134 /* Tell mac80211 and its clients (e.g. Wireless Extensions)
9135 * the range of signal quality values that we'll provide.
9136 * Negative values for level/noise indicate that we'll provide dBm.
9137 * For WE, at least, non-0 values here *enable* display of values
9138 * in app (iwconfig). */
9139 hw->max_rssi = -20; /* signal level, negative indicates dBm */
9140 hw->max_noise = -20; /* noise level, negative indicates dBm */
9141 hw->max_signal = 100; /* link quality indication (%) */
9142
9143 /* Tell mac80211 our Tx characteristics */
9144 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
9145
6440adb5 9146 /* Default value; 4 EDCA QOS priorities */
b481de9c 9147 hw->queues = 4;
c8b0e6e1
CH
9148#ifdef CONFIG_IWL4965_HT
9149#ifdef CONFIG_IWL4965_HT_AGG
6440adb5 9150 /* Enhanced value; more queues, to support 11n aggregation */
b481de9c 9151 hw->queues = 16;
c8b0e6e1
CH
9152#endif /* CONFIG_IWL4965_HT_AGG */
9153#endif /* CONFIG_IWL4965_HT */
b481de9c
ZY
9154
9155 spin_lock_init(&priv->lock);
9156 spin_lock_init(&priv->power_data.lock);
9157 spin_lock_init(&priv->sta_lock);
9158 spin_lock_init(&priv->hcmd_lock);
9159 spin_lock_init(&priv->lq_mngr.lock);
9160
9161 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
9162 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
9163
9164 INIT_LIST_HEAD(&priv->free_frames);
9165
9166 mutex_init(&priv->mutex);
9167 if (pci_enable_device(pdev)) {
9168 err = -ENODEV;
9169 goto out_ieee80211_free_hw;
9170 }
9171
9172 pci_set_master(pdev);
9173
6440adb5 9174 /* Clear the driver's (not device's) station table */
bb8c093b 9175 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9176
9177 priv->data_retry_limit = -1;
9178 priv->ieee_channels = NULL;
9179 priv->ieee_rates = NULL;
9180 priv->phymode = -1;
9181
9182 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
9183 if (!err)
9184 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
9185 if (err) {
9186 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
9187 goto out_pci_disable_device;
9188 }
9189
9190 pci_set_drvdata(pdev, priv);
9191 err = pci_request_regions(pdev, DRV_NAME);
9192 if (err)
9193 goto out_pci_disable_device;
6440adb5 9194
b481de9c
ZY
9195 /* We disable the RETRY_TIMEOUT register (0x41) to keep
9196 * PCI Tx retries from interfering with C3 CPU state */
9197 pci_write_config_byte(pdev, 0x41, 0x00);
6440adb5 9198
b481de9c
ZY
9199 priv->hw_base = pci_iomap(pdev, 0, 0);
9200 if (!priv->hw_base) {
9201 err = -ENODEV;
9202 goto out_pci_release_regions;
9203 }
9204
9205 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
9206 (unsigned long long) pci_resource_len(pdev, 0));
9207 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
9208
9209 /* Initialize module parameter values here */
9210
6440adb5 9211 /* Disable radio (SW RF KILL) via parameter when loading driver */
bb8c093b 9212 if (iwl4965_param_disable) {
b481de9c
ZY
9213 set_bit(STATUS_RF_KILL_SW, &priv->status);
9214 IWL_DEBUG_INFO("Radio disabled.\n");
9215 }
9216
9217 priv->iw_mode = IEEE80211_IF_TYPE_STA;
9218
9219 priv->ps_mode = 0;
9220 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
9221 priv->is_ht_enabled = 1;
9222 priv->channel_width = IWL_CHANNEL_WIDTH_40MHZ;
9223 priv->valid_antenna = 0x7; /* assume all 3 connected */
9224 priv->ps_mode = IWL_MIMO_PS_NONE;
b481de9c 9225
6440adb5 9226 /* Choose which receivers/antennas to use */
b481de9c
ZY
9227 iwl4965_set_rxon_chain(priv);
9228
9229 printk(KERN_INFO DRV_NAME
9230 ": Detected Intel Wireless WiFi Link 4965AGN\n");
9231
9232 /* Device-specific setup */
bb8c093b 9233 if (iwl4965_hw_set_hw_setting(priv)) {
b481de9c
ZY
9234 IWL_ERROR("failed to set hw settings\n");
9235 mutex_unlock(&priv->mutex);
9236 goto out_iounmap;
9237 }
9238
c8b0e6e1 9239#ifdef CONFIG_IWL4965_QOS
bb8c093b 9240 if (iwl4965_param_qos_enable)
b481de9c
ZY
9241 priv->qos_data.qos_enable = 1;
9242
bb8c093b 9243 iwl4965_reset_qos(priv);
b481de9c
ZY
9244
9245 priv->qos_data.qos_active = 0;
9246 priv->qos_data.qos_cap.val = 0;
c8b0e6e1 9247#endif /* CONFIG_IWL4965_QOS */
b481de9c 9248
bb8c093b
CH
9249 iwl4965_set_rxon_channel(priv, MODE_IEEE80211G, 6);
9250 iwl4965_setup_deferred_work(priv);
9251 iwl4965_setup_rx_handlers(priv);
b481de9c
ZY
9252
9253 priv->rates_mask = IWL_RATES_MASK;
9254 /* If power management is turned on, default to AC mode */
9255 priv->power_mode = IWL_POWER_AC;
9256 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
9257
bb8c093b 9258 iwl4965_disable_interrupts(priv);
49df2b33 9259
b481de9c
ZY
9260 pci_enable_msi(pdev);
9261
bb8c093b 9262 err = request_irq(pdev->irq, iwl4965_isr, IRQF_SHARED, DRV_NAME, priv);
b481de9c
ZY
9263 if (err) {
9264 IWL_ERROR("Error allocating IRQ %d\n", pdev->irq);
9265 goto out_disable_msi;
9266 }
9267
9268 mutex_lock(&priv->mutex);
9269
bb8c093b 9270 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9271 if (err) {
9272 IWL_ERROR("failed to create sysfs device attributes\n");
9273 mutex_unlock(&priv->mutex);
9274 goto out_release_irq;
9275 }
9276
9277 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
9278 * ucode filename and max sizes are card-specific. */
bb8c093b 9279 err = iwl4965_read_ucode(priv);
b481de9c
ZY
9280 if (err) {
9281 IWL_ERROR("Could not read microcode: %d\n", err);
9282 mutex_unlock(&priv->mutex);
9283 goto out_pci_alloc;
9284 }
9285
9286 mutex_unlock(&priv->mutex);
9287
01ebd063 9288 IWL_DEBUG_INFO("Queueing UP work.\n");
b481de9c
ZY
9289
9290 queue_work(priv->workqueue, &priv->up);
9291
9292 return 0;
9293
9294 out_pci_alloc:
bb8c093b 9295 iwl4965_dealloc_ucode_pci(priv);
b481de9c 9296
bb8c093b 9297 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c
ZY
9298
9299 out_release_irq:
9300 free_irq(pdev->irq, priv);
9301
9302 out_disable_msi:
9303 pci_disable_msi(pdev);
9304 destroy_workqueue(priv->workqueue);
9305 priv->workqueue = NULL;
bb8c093b 9306 iwl4965_unset_hw_setting(priv);
b481de9c
ZY
9307
9308 out_iounmap:
9309 pci_iounmap(pdev, priv->hw_base);
9310 out_pci_release_regions:
9311 pci_release_regions(pdev);
9312 out_pci_disable_device:
9313 pci_disable_device(pdev);
9314 pci_set_drvdata(pdev, NULL);
9315 out_ieee80211_free_hw:
9316 ieee80211_free_hw(priv->hw);
9317 out:
9318 return err;
9319}
9320
bb8c093b 9321static void iwl4965_pci_remove(struct pci_dev *pdev)
b481de9c 9322{
bb8c093b 9323 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c
ZY
9324 struct list_head *p, *q;
9325 int i;
9326
9327 if (!priv)
9328 return;
9329
9330 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
9331
b481de9c 9332 set_bit(STATUS_EXIT_PENDING, &priv->status);
b24d22b1 9333
bb8c093b 9334 iwl4965_down(priv);
b481de9c
ZY
9335
9336 /* Free MAC hash list for ADHOC */
9337 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
9338 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
9339 list_del(p);
bb8c093b 9340 kfree(list_entry(p, struct iwl4965_ibss_seq, list));
b481de9c
ZY
9341 }
9342 }
9343
bb8c093b 9344 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
b481de9c 9345
bb8c093b 9346 iwl4965_dealloc_ucode_pci(priv);
b481de9c
ZY
9347
9348 if (priv->rxq.bd)
bb8c093b
CH
9349 iwl4965_rx_queue_free(priv, &priv->rxq);
9350 iwl4965_hw_txq_ctx_free(priv);
b481de9c 9351
bb8c093b
CH
9352 iwl4965_unset_hw_setting(priv);
9353 iwl4965_clear_stations_table(priv);
b481de9c
ZY
9354
9355 if (priv->mac80211_registered) {
9356 ieee80211_unregister_hw(priv->hw);
bb8c093b 9357 iwl4965_rate_control_unregister(priv->hw);
b481de9c
ZY
9358 }
9359
948c171c
MA
9360 /*netif_stop_queue(dev); */
9361 flush_workqueue(priv->workqueue);
9362
bb8c093b 9363 /* ieee80211_unregister_hw calls iwl4965_mac_stop, which flushes
b481de9c
ZY
9364 * priv->workqueue... so we can't take down the workqueue
9365 * until now... */
9366 destroy_workqueue(priv->workqueue);
9367 priv->workqueue = NULL;
9368
9369 free_irq(pdev->irq, priv);
9370 pci_disable_msi(pdev);
9371 pci_iounmap(pdev, priv->hw_base);
9372 pci_release_regions(pdev);
9373 pci_disable_device(pdev);
9374 pci_set_drvdata(pdev, NULL);
9375
9376 kfree(priv->channel_info);
9377
9378 kfree(priv->ieee_channels);
9379 kfree(priv->ieee_rates);
9380
9381 if (priv->ibss_beacon)
9382 dev_kfree_skb(priv->ibss_beacon);
9383
9384 ieee80211_free_hw(priv->hw);
9385}
9386
9387#ifdef CONFIG_PM
9388
bb8c093b 9389static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state)
b481de9c 9390{
bb8c093b 9391 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c 9392
b481de9c
ZY
9393 set_bit(STATUS_IN_SUSPEND, &priv->status);
9394
9395 /* Take down the device; powers it off, etc. */
bb8c093b 9396 iwl4965_down(priv);
b481de9c
ZY
9397
9398 if (priv->mac80211_registered)
9399 ieee80211_stop_queues(priv->hw);
9400
9401 pci_save_state(pdev);
9402 pci_disable_device(pdev);
9403 pci_set_power_state(pdev, PCI_D3hot);
9404
b481de9c
ZY
9405 return 0;
9406}
9407
bb8c093b 9408static void iwl4965_resume(struct iwl4965_priv *priv)
b481de9c
ZY
9409{
9410 unsigned long flags;
9411
9412 /* The following it a temporary work around due to the
9413 * suspend / resume not fully initializing the NIC correctly.
9414 * Without all of the following, resume will not attempt to take
9415 * down the NIC (it shouldn't really need to) and will just try
9416 * and bring the NIC back up. However that fails during the
bb8c093b
CH
9417 * ucode verification process. This then causes iwl4965_down to be
9418 * called *after* iwl4965_hw_nic_init() has succeeded -- which
b481de9c
ZY
9419 * then lets the next init sequence succeed. So, we've
9420 * replicated all of that NIC init code here... */
9421
bb8c093b 9422 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 9423
bb8c093b 9424 iwl4965_hw_nic_init(priv);
b481de9c 9425
bb8c093b
CH
9426 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
9427 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c 9428 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
bb8c093b
CH
9429 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
9430 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
9431 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
9432
9433 /* tell the device to stop sending interrupts */
bb8c093b 9434 iwl4965_disable_interrupts(priv);
b481de9c
ZY
9435
9436 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 9437 iwl4965_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c 9438
bb8c093b
CH
9439 if (!iwl4965_grab_nic_access(priv)) {
9440 iwl4965_write_prph(priv, APMG_CLK_DIS_REG,
ac17a947 9441 APMG_CLK_VAL_DMA_CLK_RQT);
bb8c093b 9442 iwl4965_release_nic_access(priv);
b481de9c
ZY
9443 }
9444 spin_unlock_irqrestore(&priv->lock, flags);
9445
9446 udelay(5);
9447
bb8c093b 9448 iwl4965_hw_nic_reset(priv);
b481de9c
ZY
9449
9450 /* Bring the device back up */
9451 clear_bit(STATUS_IN_SUSPEND, &priv->status);
9452 queue_work(priv->workqueue, &priv->up);
9453}
9454
bb8c093b 9455static int iwl4965_pci_resume(struct pci_dev *pdev)
b481de9c 9456{
bb8c093b 9457 struct iwl4965_priv *priv = pci_get_drvdata(pdev);
b481de9c
ZY
9458 int err;
9459
9460 printk(KERN_INFO "Coming out of suspend...\n");
9461
b481de9c
ZY
9462 pci_set_power_state(pdev, PCI_D0);
9463 err = pci_enable_device(pdev);
9464 pci_restore_state(pdev);
9465
9466 /*
9467 * Suspend/Resume resets the PCI configuration space, so we have to
9468 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
9469 * from interfering with C3 CPU state. pci_restore_state won't help
9470 * here since it only restores the first 64 bytes pci config header.
9471 */
9472 pci_write_config_byte(pdev, 0x41, 0x00);
9473
bb8c093b 9474 iwl4965_resume(priv);
b481de9c
ZY
9475
9476 return 0;
9477}
9478
9479#endif /* CONFIG_PM */
9480
9481/*****************************************************************************
9482 *
9483 * driver and module entry point
9484 *
9485 *****************************************************************************/
9486
bb8c093b 9487static struct pci_driver iwl4965_driver = {
b481de9c 9488 .name = DRV_NAME,
bb8c093b
CH
9489 .id_table = iwl4965_hw_card_ids,
9490 .probe = iwl4965_pci_probe,
9491 .remove = __devexit_p(iwl4965_pci_remove),
b481de9c 9492#ifdef CONFIG_PM
bb8c093b
CH
9493 .suspend = iwl4965_pci_suspend,
9494 .resume = iwl4965_pci_resume,
b481de9c
ZY
9495#endif
9496};
9497
bb8c093b 9498static int __init iwl4965_init(void)
b481de9c
ZY
9499{
9500
9501 int ret;
9502 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
9503 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
bb8c093b 9504 ret = pci_register_driver(&iwl4965_driver);
b481de9c
ZY
9505 if (ret) {
9506 IWL_ERROR("Unable to initialize PCI module\n");
9507 return ret;
9508 }
c8b0e6e1 9509#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9510 ret = driver_create_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c
ZY
9511 if (ret) {
9512 IWL_ERROR("Unable to create driver sysfs file\n");
bb8c093b 9513 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9514 return ret;
9515 }
9516#endif
9517
9518 return ret;
9519}
9520
bb8c093b 9521static void __exit iwl4965_exit(void)
b481de9c 9522{
c8b0e6e1 9523#ifdef CONFIG_IWL4965_DEBUG
bb8c093b 9524 driver_remove_file(&iwl4965_driver.driver, &driver_attr_debug_level);
b481de9c 9525#endif
bb8c093b 9526 pci_unregister_driver(&iwl4965_driver);
b481de9c
ZY
9527}
9528
bb8c093b 9529module_param_named(antenna, iwl4965_param_antenna, int, 0444);
b481de9c 9530MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
bb8c093b 9531module_param_named(disable, iwl4965_param_disable, int, 0444);
b481de9c 9532MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
bb8c093b 9533module_param_named(hwcrypto, iwl4965_param_hwcrypto, int, 0444);
b481de9c
ZY
9534MODULE_PARM_DESC(hwcrypto,
9535 "using hardware crypto engine (default 0 [software])\n");
bb8c093b 9536module_param_named(debug, iwl4965_param_debug, int, 0444);
b481de9c 9537MODULE_PARM_DESC(debug, "debug output mask");
bb8c093b 9538module_param_named(disable_hw_scan, iwl4965_param_disable_hw_scan, int, 0444);
b481de9c
ZY
9539MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
9540
bb8c093b 9541module_param_named(queues_num, iwl4965_param_queues_num, int, 0444);
b481de9c
ZY
9542MODULE_PARM_DESC(queues_num, "number of hw queues.");
9543
9544/* QoS */
bb8c093b 9545module_param_named(qos_enable, iwl4965_param_qos_enable, int, 0444);
b481de9c
ZY
9546MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
9547
bb8c093b
CH
9548module_exit(iwl4965_exit);
9549module_init(iwl4965_init);
This page took 0.648075 seconds and 5 git commands to generate.