iwl3945: add debugging for wrong command queue
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl3945-base.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
01f8162a 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
b481de9c
ZY
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
759ef89f 25 * Intel Linux Wireless <ilw@linux.intel.com>
b481de9c
ZY
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
b481de9c
ZY
30#include <linux/kernel.h>
31#include <linux/module.h>
b481de9c
ZY
32#include <linux/init.h>
33#include <linux/pci.h>
34#include <linux/dma-mapping.h>
35#include <linux/delay.h>
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/wireless.h>
39#include <linux/firmware.h>
b481de9c
ZY
40#include <linux/etherdevice.h>
41#include <linux/if_arp.h>
42
43#include <net/ieee80211_radiotap.h>
7e272fcf 44#include <net/lib80211.h>
b481de9c
ZY
45#include <net/mac80211.h>
46
47#include <asm/div64.h>
48
a3139c59
SO
49#define DRV_NAME "iwl3945"
50
dbb6654c
WT
51#include "iwl-fh.h"
52#include "iwl-3945-fh.h"
600c0e11 53#include "iwl-commands.h"
b481de9c
ZY
54#include "iwl-3945.h"
55#include "iwl-helpers.h"
5747d47f 56#include "iwl-core.h"
d20b3c65 57#include "iwl-dev.h"
b481de9c 58
4a8a4322 59static int iwl3945_tx_queue_update_write_ptr(struct iwl_priv *priv,
188cf6c7 60 struct iwl_tx_queue *txq);
416e1438 61
b481de9c
ZY
62/*
63 * module name, copyright, version, etc.
b481de9c
ZY
64 */
65
66#define DRV_DESCRIPTION \
67"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
68
c8b0e6e1 69#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
70#define VD "d"
71#else
72#define VD
73#endif
74
c8b0e6e1 75#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
76#define VS "s"
77#else
78#define VS
79#endif
80
eaa686c3 81#define IWL39_VERSION "1.2.26k" VD VS
01f8162a 82#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
a7b75207 83#define DRV_AUTHOR "<ilw@linux.intel.com>"
eaa686c3 84#define DRV_VERSION IWL39_VERSION
b481de9c 85
b481de9c
ZY
86
87MODULE_DESCRIPTION(DRV_DESCRIPTION);
88MODULE_VERSION(DRV_VERSION);
a7b75207 89MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
b481de9c
ZY
90MODULE_LICENSE("GPL");
91
df878d8f
KA
92 /* module parameters */
93struct iwl_mod_params iwl3945_mod_params = {
94 .num_of_queues = IWL39_MAX_NUM_QUEUES,
9c74d9fb 95 .sw_crypto = 1,
df878d8f
KA
96 /* the rest are 0 by default */
97};
98
b481de9c
ZY
99/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
100 * DMA services
101 *
102 * Theory of operation
103 *
6440adb5
CB
104 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
105 * of buffer descriptors, each of which points to one or more data buffers for
106 * the device to read from or fill. Driver and device exchange status of each
107 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
108 * entries in each circular buffer, to protect against confusing empty and full
109 * queue states.
110 *
111 * The device reads or writes the data in the queues via the device's several
112 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
b481de9c
ZY
113 *
114 * For Tx queue, there are low mark and high mark limits. If, after queuing
115 * the packet for Tx, free space become < low mark, Tx queue stopped. When
116 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
117 * Tx queue resumed.
118 *
6440adb5
CB
119 * The 3945 operates with six queues: One receive queue, one transmit queue
120 * (#4) for sending commands to the device firmware, and four transmit queues
121 * (#0-3) for data tx via EDCA. An additional 2 HCCA queues are unused.
b481de9c
ZY
122 ***************************************************/
123
6440adb5
CB
124/**
125 * iwl3945_queue_init - Initialize queue's high/low-water and read/write indexes
126 */
4a8a4322 127static int iwl3945_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
b481de9c
ZY
128 int count, int slots_num, u32 id)
129{
130 q->n_bd = count;
131 q->n_window = slots_num;
132 q->id = id;
133
c54b679d
TW
134 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
135 * and iwl_queue_dec_wrap are broken. */
b481de9c
ZY
136 BUG_ON(!is_power_of_2(count));
137
138 /* slots_num must be power-of-two size, otherwise
139 * get_cmd_index is broken. */
140 BUG_ON(!is_power_of_2(slots_num));
141
142 q->low_mark = q->n_window / 4;
143 if (q->low_mark < 4)
144 q->low_mark = 4;
145
146 q->high_mark = q->n_window / 8;
147 if (q->high_mark < 2)
148 q->high_mark = 2;
149
fc4b6853 150 q->write_ptr = q->read_ptr = 0;
b481de9c
ZY
151
152 return 0;
153}
154
6440adb5
CB
155/**
156 * iwl3945_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
157 */
4a8a4322 158static int iwl3945_tx_queue_alloc(struct iwl_priv *priv,
188cf6c7 159 struct iwl_tx_queue *txq, u32 id)
b481de9c
ZY
160{
161 struct pci_dev *dev = priv->pci_dev;
162
6440adb5
CB
163 /* Driver private data, only for Tx (not command) queues,
164 * not shared with device. */
b481de9c
ZY
165 if (id != IWL_CMD_QUEUE_NUM) {
166 txq->txb = kmalloc(sizeof(txq->txb[0]) *
167 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
168 if (!txq->txb) {
15b1687c 169 IWL_ERR(priv, "kmalloc for auxiliary BD "
b481de9c
ZY
170 "structures failed\n");
171 goto error;
172 }
173 } else
174 txq->txb = NULL;
175
6440adb5
CB
176 /* Circular buffer of transmit frame descriptors (TFDs),
177 * shared with device */
188cf6c7
SO
178 txq->tfds39 = pci_alloc_consistent(dev,
179 sizeof(txq->tfds39[0]) * TFD_QUEUE_SIZE_MAX,
b481de9c
ZY
180 &txq->q.dma_addr);
181
188cf6c7 182 if (!txq->tfds39) {
15b1687c 183 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n",
188cf6c7 184 sizeof(txq->tfds39[0]) * TFD_QUEUE_SIZE_MAX);
b481de9c
ZY
185 goto error;
186 }
187 txq->q.id = id;
188
189 return 0;
190
191 error:
3ac7f146
TW
192 kfree(txq->txb);
193 txq->txb = NULL;
b481de9c
ZY
194
195 return -ENOMEM;
196}
197
6440adb5
CB
198/**
199 * iwl3945_tx_queue_init - Allocate and initialize one tx/cmd queue
200 */
4a8a4322 201int iwl3945_tx_queue_init(struct iwl_priv *priv,
188cf6c7 202 struct iwl_tx_queue *txq, int slots_num, u32 txq_id)
b481de9c 203{
188cf6c7 204 int len, i;
b481de9c
ZY
205 int rc = 0;
206
6440adb5
CB
207 /*
208 * Alloc buffer array for commands (Tx or other types of commands).
209 * For the command queue (#4), allocate command space + one big
210 * command for scan, since scan command is very huge; the system will
211 * not have two scans at the same time, so only one is needed.
212 * For data Tx queues (all other queues), no super-size command
213 * space is needed.
214 */
188cf6c7
SO
215 len = sizeof(struct iwl_cmd);
216 for (i = 0; i <= slots_num; i++) {
217 if (i == slots_num) {
218 if (txq_id == IWL_CMD_QUEUE_NUM)
219 len += IWL_MAX_SCAN_SIZE;
220 else
221 continue;
222 }
223
224 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
225 if (!txq->cmd[i])
226 goto err;
227 }
b481de9c 228
6440adb5 229 /* Alloc driver data array and TFD circular buffer */
bb8c093b 230 rc = iwl3945_tx_queue_alloc(priv, txq, txq_id);
188cf6c7
SO
231 if (rc)
232 goto err;
b481de9c 233
b481de9c
ZY
234 txq->need_update = 0;
235
236 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
c54b679d 237 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
b481de9c 238 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
6440adb5
CB
239
240 /* Initialize queue high/low-water, head/tail indexes */
bb8c093b 241 iwl3945_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
b481de9c 242
6440adb5 243 /* Tell device where to find queue, enable DMA channel. */
bb8c093b 244 iwl3945_hw_tx_queue_init(priv, txq);
b481de9c
ZY
245
246 return 0;
188cf6c7
SO
247err:
248 for (i = 0; i < slots_num; i++) {
249 kfree(txq->cmd[i]);
250 txq->cmd[i] = NULL;
251 }
252
253 if (txq_id == IWL_CMD_QUEUE_NUM) {
254 kfree(txq->cmd[slots_num]);
255 txq->cmd[slots_num] = NULL;
256 }
257 return -ENOMEM;
b481de9c
ZY
258}
259
260/**
bb8c093b 261 * iwl3945_tx_queue_free - Deallocate DMA queue.
b481de9c
ZY
262 * @txq: Transmit queue to deallocate.
263 *
264 * Empty queue by removing and destroying all BD's.
6440adb5
CB
265 * Free all buffers.
266 * 0-fill, but do not free "txq" descriptor structure.
b481de9c 267 */
188cf6c7 268void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
b481de9c 269{
d20b3c65 270 struct iwl_queue *q = &txq->q;
b481de9c 271 struct pci_dev *dev = priv->pci_dev;
188cf6c7 272 int len, i;
b481de9c
ZY
273
274 if (q->n_bd == 0)
275 return;
276
277 /* first, empty all BD's */
fc4b6853 278 for (; q->write_ptr != q->read_ptr;
c54b679d 279 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
bb8c093b 280 iwl3945_hw_txq_free_tfd(priv, txq);
b481de9c 281
c2d79b48 282 len = sizeof(struct iwl_cmd) * q->n_window;
b481de9c
ZY
283 if (q->id == IWL_CMD_QUEUE_NUM)
284 len += IWL_MAX_SCAN_SIZE;
285
6440adb5 286 /* De-alloc array of command/tx buffers */
188cf6c7
SO
287 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
288 kfree(txq->cmd[i]);
b481de9c 289
6440adb5 290 /* De-alloc circular buffer of TFDs */
b481de9c 291 if (txq->q.n_bd)
dbb6654c 292 pci_free_consistent(dev, sizeof(struct iwl3945_tfd) *
188cf6c7 293 txq->q.n_bd, txq->tfds39, txq->q.dma_addr);
b481de9c 294
6440adb5 295 /* De-alloc array of per-TFD driver data */
3ac7f146
TW
296 kfree(txq->txb);
297 txq->txb = NULL;
b481de9c 298
6440adb5 299 /* 0-fill queue descriptor structure */
b481de9c
ZY
300 memset(txq, 0, sizeof(*txq));
301}
302
b481de9c 303/*************** STATION TABLE MANAGEMENT ****
9fbab516 304 * mac80211 should be examined to determine if sta_info is duplicating
b481de9c
ZY
305 * the functionality provided here
306 */
307
308/**************************************************************/
01ebd063 309#if 0 /* temporary disable till we add real remove station */
6440adb5
CB
310/**
311 * iwl3945_remove_station - Remove driver's knowledge of station.
312 *
313 * NOTE: This does not remove station from device's station table.
314 */
4a8a4322 315static u8 iwl3945_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
b481de9c
ZY
316{
317 int index = IWL_INVALID_STATION;
318 int i;
319 unsigned long flags;
320
321 spin_lock_irqsave(&priv->sta_lock, flags);
322
323 if (is_ap)
324 index = IWL_AP_ID;
325 else if (is_broadcast_ether_addr(addr))
3832ec9d 326 index = priv->hw_params.bcast_sta_id;
b481de9c 327 else
3832ec9d 328 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
f2c7e521
AK
329 if (priv->stations_39[i].used &&
330 !compare_ether_addr(priv->stations_39[i].sta.sta.addr,
b481de9c
ZY
331 addr)) {
332 index = i;
333 break;
334 }
335
336 if (unlikely(index == IWL_INVALID_STATION))
337 goto out;
338
f2c7e521
AK
339 if (priv->stations_39[index].used) {
340 priv->stations_39[index].used = 0;
b481de9c
ZY
341 priv->num_stations--;
342 }
343
344 BUG_ON(priv->num_stations < 0);
345
346out:
347 spin_unlock_irqrestore(&priv->sta_lock, flags);
348 return 0;
349}
556f8db7 350#endif
6440adb5
CB
351
352/**
353 * iwl3945_clear_stations_table - Clear the driver's station table
354 *
355 * NOTE: This does not clear or otherwise alter the device's station table.
356 */
4a8a4322 357static void iwl3945_clear_stations_table(struct iwl_priv *priv)
b481de9c
ZY
358{
359 unsigned long flags;
360
361 spin_lock_irqsave(&priv->sta_lock, flags);
362
363 priv->num_stations = 0;
f2c7e521 364 memset(priv->stations_39, 0, sizeof(priv->stations_39));
b481de9c
ZY
365
366 spin_unlock_irqrestore(&priv->sta_lock, flags);
367}
368
6440adb5
CB
369/**
370 * iwl3945_add_station - Add station to station tables in driver and device
371 */
4a8a4322 372u8 iwl3945_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
b481de9c
ZY
373{
374 int i;
375 int index = IWL_INVALID_STATION;
bb8c093b 376 struct iwl3945_station_entry *station;
b481de9c 377 unsigned long flags_spin;
c14c521e 378 u8 rate;
b481de9c
ZY
379
380 spin_lock_irqsave(&priv->sta_lock, flags_spin);
381 if (is_ap)
382 index = IWL_AP_ID;
383 else if (is_broadcast_ether_addr(addr))
3832ec9d 384 index = priv->hw_params.bcast_sta_id;
b481de9c 385 else
3832ec9d 386 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
f2c7e521 387 if (!compare_ether_addr(priv->stations_39[i].sta.sta.addr,
b481de9c
ZY
388 addr)) {
389 index = i;
390 break;
391 }
392
f2c7e521 393 if (!priv->stations_39[i].used &&
b481de9c
ZY
394 index == IWL_INVALID_STATION)
395 index = i;
396 }
397
01ebd063 398 /* These two conditions has the same outcome but keep them separate
b481de9c
ZY
399 since they have different meaning */
400 if (unlikely(index == IWL_INVALID_STATION)) {
401 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
402 return index;
403 }
404
f2c7e521
AK
405 if (priv->stations_39[index].used &&
406 !compare_ether_addr(priv->stations_39[index].sta.sta.addr, addr)) {
b481de9c
ZY
407 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
408 return index;
409 }
410
e174961c 411 IWL_DEBUG_ASSOC("Add STA ID %d: %pM\n", index, addr);
f2c7e521 412 station = &priv->stations_39[index];
b481de9c
ZY
413 station->used = 1;
414 priv->num_stations++;
415
6440adb5 416 /* Set up the REPLY_ADD_STA command to send to device */
bb8c093b 417 memset(&station->sta, 0, sizeof(struct iwl3945_addsta_cmd));
b481de9c
ZY
418 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
419 station->sta.mode = 0;
420 station->sta.sta.sta_id = index;
421 station->sta.station_flags = 0;
422
8318d78a 423 if (priv->band == IEEE80211_BAND_5GHZ)
69946333
TW
424 rate = IWL_RATE_6M_PLCP;
425 else
426 rate = IWL_RATE_1M_PLCP;
c14c521e
ZY
427
428 /* Turn on both antennas for the station... */
429 station->sta.rate_n_flags =
bb8c093b 430 iwl3945_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK);
c14c521e 431
b481de9c 432 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
6440adb5
CB
433
434 /* Add station to device's station table */
bb8c093b 435 iwl3945_send_add_station(priv, &station->sta, flags);
b481de9c
ZY
436 return index;
437
438}
439
b481de9c
ZY
440
441/*************** HOST COMMAND QUEUE FUNCTIONS *****/
442
c3056065 443#define IWL_CMD(x) case x: return #x
b481de9c
ZY
444#define HOST_COMPLETE_TIMEOUT (HZ / 2)
445
446/**
bb8c093b 447 * iwl3945_enqueue_hcmd - enqueue a uCode command
b481de9c
ZY
448 * @priv: device private data point
449 * @cmd: a point to the ucode command structure
450 *
451 * The function returns < 0 values to indicate the operation is
452 * failed. On success, it turns the index (> 0) of command in the
453 * command queue.
454 */
c2d79b48 455static int iwl3945_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
b481de9c 456{
188cf6c7 457 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
d20b3c65 458 struct iwl_queue *q = &txq->q;
dbb6654c 459 struct iwl3945_tfd *tfd;
c2d79b48 460 struct iwl_cmd *out_cmd;
b481de9c
ZY
461 u32 idx;
462 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
463 dma_addr_t phys_addr;
464 int pad;
188cf6c7 465 int ret, len;
b481de9c
ZY
466 unsigned long flags;
467
468 /* If any of the command structures end up being larger than
469 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
470 * we will need to increase the size of the TFD entries */
c2d79b48 471 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
b481de9c
ZY
472 !(cmd->meta.flags & CMD_SIZE_HUGE));
473
c342a1b9 474
775a6e27 475 if (iwl_is_rfkill(priv)) {
c342a1b9
GG
476 IWL_DEBUG_INFO("Not sending command - RF KILL");
477 return -EIO;
478 }
479
d20b3c65 480 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
15b1687c 481 IWL_ERR(priv, "No space for Tx\n");
b481de9c
ZY
482 return -ENOSPC;
483 }
484
485 spin_lock_irqsave(&priv->hcmd_lock, flags);
486
188cf6c7 487 tfd = &txq->tfds39[q->write_ptr];
b481de9c
ZY
488 memset(tfd, 0, sizeof(*tfd));
489
fc4b6853 490 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
188cf6c7 491 out_cmd = txq->cmd[idx];
b481de9c
ZY
492
493 out_cmd->hdr.cmd = cmd->id;
494 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
495 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
496
497 /* At this point, the out_cmd now has all of the incoming cmd
498 * information */
499
500 out_cmd->hdr.flags = 0;
501 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
fc4b6853 502 INDEX_TO_SEQ(q->write_ptr));
b481de9c 503 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
600c0e11 504 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
b481de9c 505
188cf6c7
SO
506 len = (idx == TFD_CMD_SLOTS) ?
507 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
508
509 phys_addr = pci_map_single(priv->pci_dev, out_cmd,
510 len, PCI_DMA_TODEVICE);
511 pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
512 pci_unmap_len_set(&out_cmd->meta, len, len);
513 phys_addr += offsetof(struct iwl_cmd, hdr);
514
bb8c093b 515 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
b481de9c
ZY
516
517 pad = U32_PAD(cmd->len);
dbb6654c 518 tfd->control_flags |= cpu_to_le32(TFD_CTL_PAD_SET(pad));
b481de9c
ZY
519
520 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
521 "%d bytes at %d[%d]:%d\n",
522 get_cmd_string(out_cmd->hdr.cmd),
523 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
fc4b6853 524 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
b481de9c
ZY
525
526 txq->need_update = 1;
6440adb5
CB
527
528 /* Increment and update queue's write index */
c54b679d 529 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
bb8c093b 530 ret = iwl3945_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
531
532 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
533 return ret ? ret : idx;
534}
535
c2d79b48
WT
536static int iwl3945_send_cmd_async(struct iwl_priv *priv,
537 struct iwl_host_cmd *cmd)
b481de9c
ZY
538{
539 int ret;
540
541 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
542
543 /* An asynchronous command can not expect an SKB to be set. */
544 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
545
546 /* An asynchronous command MUST have a callback. */
547 BUG_ON(!cmd->meta.u.callback);
548
549 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
550 return -EBUSY;
551
bb8c093b 552 ret = iwl3945_enqueue_hcmd(priv, cmd);
b481de9c 553 if (ret < 0) {
15b1687c
WT
554 IWL_ERR(priv,
555 "Error sending %s: iwl3945_enqueue_hcmd failed: %d\n",
556 get_cmd_string(cmd->id), ret);
b481de9c
ZY
557 return ret;
558 }
559 return 0;
560}
561
c2d79b48
WT
562static int iwl3945_send_cmd_sync(struct iwl_priv *priv,
563 struct iwl_host_cmd *cmd)
b481de9c
ZY
564{
565 int cmd_idx;
566 int ret;
b481de9c
ZY
567
568 BUG_ON(cmd->meta.flags & CMD_ASYNC);
569
570 /* A synchronous command can not have a callback set. */
571 BUG_ON(cmd->meta.u.callback != NULL);
572
e5472978 573 if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) {
15b1687c
WT
574 IWL_ERR(priv,
575 "Error sending %s: Already sending a host command\n",
576 get_cmd_string(cmd->id));
e5472978
TW
577 ret = -EBUSY;
578 goto out;
b481de9c
ZY
579 }
580
581 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
582
583 if (cmd->meta.flags & CMD_WANT_SKB)
584 cmd->meta.source = &cmd->meta;
585
bb8c093b 586 cmd_idx = iwl3945_enqueue_hcmd(priv, cmd);
b481de9c
ZY
587 if (cmd_idx < 0) {
588 ret = cmd_idx;
15b1687c
WT
589 IWL_ERR(priv,
590 "Error sending %s: iwl3945_enqueue_hcmd failed: %d\n",
591 get_cmd_string(cmd->id), ret);
b481de9c
ZY
592 goto out;
593 }
594
595 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
596 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
597 HOST_COMPLETE_TIMEOUT);
598 if (!ret) {
599 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
15b1687c 600 IWL_ERR(priv, "Error sending %s: time out after %dms\n",
b481de9c
ZY
601 get_cmd_string(cmd->id),
602 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
603
604 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
605 ret = -ETIMEDOUT;
606 goto cancel;
607 }
608 }
609
610 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
611 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
612 get_cmd_string(cmd->id));
613 ret = -ECANCELED;
614 goto fail;
615 }
616 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
617 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
618 get_cmd_string(cmd->id));
619 ret = -EIO;
620 goto fail;
621 }
622 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
15b1687c 623 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
b481de9c
ZY
624 get_cmd_string(cmd->id));
625 ret = -EIO;
73e1a65d 626 goto cancel;
b481de9c
ZY
627 }
628
629 ret = 0;
630 goto out;
631
632cancel:
633 if (cmd->meta.flags & CMD_WANT_SKB) {
c2d79b48 634 struct iwl_cmd *qcmd;
b481de9c
ZY
635
636 /* Cancel the CMD_WANT_SKB flag for the cmd in the
637 * TX cmd queue. Otherwise in case the cmd comes
638 * in later, it will possibly set an invalid
639 * address (cmd->meta.source). */
188cf6c7 640 qcmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
b481de9c
ZY
641 qcmd->meta.flags &= ~CMD_WANT_SKB;
642 }
643fail:
644 if (cmd->meta.u.skb) {
645 dev_kfree_skb_any(cmd->meta.u.skb);
646 cmd->meta.u.skb = NULL;
647 }
648out:
e5472978 649 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
b481de9c
ZY
650 return ret;
651}
652
c2d79b48 653int iwl3945_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
b481de9c 654{
b481de9c 655 if (cmd->meta.flags & CMD_ASYNC)
bb8c093b 656 return iwl3945_send_cmd_async(priv, cmd);
b481de9c 657
bb8c093b 658 return iwl3945_send_cmd_sync(priv, cmd);
b481de9c
ZY
659}
660
4a8a4322 661int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
b481de9c 662{
c2d79b48 663 struct iwl_host_cmd cmd = {
b481de9c
ZY
664 .id = id,
665 .len = len,
666 .data = data,
667 };
668
bb8c093b 669 return iwl3945_send_cmd_sync(priv, &cmd);
b481de9c
ZY
670}
671
4a8a4322 672static int __must_check iwl3945_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val)
b481de9c 673{
c2d79b48 674 struct iwl_host_cmd cmd = {
b481de9c
ZY
675 .id = id,
676 .len = sizeof(val),
677 .data = &val,
678 };
679
bb8c093b 680 return iwl3945_send_cmd_sync(priv, &cmd);
b481de9c
ZY
681}
682
4a8a4322 683int iwl3945_send_statistics_request(struct iwl_priv *priv)
b481de9c 684{
bb8c093b 685 return iwl3945_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
b481de9c
ZY
686}
687
b481de9c 688/**
bb8c093b 689 * iwl3945_set_rxon_channel - Set the phymode and channel values in staging RXON
8318d78a
JB
690 * @band: 2.4 or 5 GHz band
691 * @channel: Any channel valid for the requested band
b481de9c 692
8318d78a 693 * In addition to setting the staging RXON, priv->band is also set.
b481de9c
ZY
694 *
695 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
8318d78a 696 * in the staging RXON flag structure based on the band
b481de9c 697 */
4a8a4322 698static int iwl3945_set_rxon_channel(struct iwl_priv *priv,
8318d78a
JB
699 enum ieee80211_band band,
700 u16 channel)
b481de9c 701{
8318d78a 702 if (!iwl3945_get_channel_info(priv, band, channel)) {
b481de9c 703 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
8318d78a 704 channel, band);
b481de9c
ZY
705 return -EINVAL;
706 }
707
f2c7e521 708 if ((le16_to_cpu(priv->staging39_rxon.channel) == channel) &&
8318d78a 709 (priv->band == band))
b481de9c
ZY
710 return 0;
711
f2c7e521 712 priv->staging39_rxon.channel = cpu_to_le16(channel);
8318d78a 713 if (band == IEEE80211_BAND_5GHZ)
f2c7e521 714 priv->staging39_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
b481de9c 715 else
f2c7e521 716 priv->staging39_rxon.flags |= RXON_FLG_BAND_24G_MSK;
b481de9c 717
8318d78a 718 priv->band = band;
b481de9c 719
8318d78a 720 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band);
b481de9c
ZY
721
722 return 0;
723}
724
725/**
bb8c093b 726 * iwl3945_check_rxon_cmd - validate RXON structure is valid
b481de9c
ZY
727 *
728 * NOTE: This is really only useful during development and can eventually
729 * be #ifdef'd out once the driver is stable and folks aren't actively
730 * making changes
731 */
4a8a4322 732static int iwl3945_check_rxon_cmd(struct iwl_priv *priv)
b481de9c
ZY
733{
734 int error = 0;
735 int counter = 1;
f2c7e521 736 struct iwl3945_rxon_cmd *rxon = &priv->staging39_rxon;
b481de9c
ZY
737
738 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
739 error |= le32_to_cpu(rxon->flags &
740 (RXON_FLG_TGJ_NARROW_BAND_MSK |
741 RXON_FLG_RADAR_DETECT_MSK));
742 if (error)
39aadf8c 743 IWL_WARN(priv, "check 24G fields %d | %d\n",
b481de9c
ZY
744 counter++, error);
745 } else {
746 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
747 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
748 if (error)
39aadf8c 749 IWL_WARN(priv, "check 52 fields %d | %d\n",
b481de9c
ZY
750 counter++, error);
751 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
752 if (error)
39aadf8c 753 IWL_WARN(priv, "check 52 CCK %d | %d\n",
b481de9c
ZY
754 counter++, error);
755 }
756 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
757 if (error)
39aadf8c 758 IWL_WARN(priv, "check mac addr %d | %d\n", counter++, error);
b481de9c
ZY
759
760 /* make sure basic rates 6Mbps and 1Mbps are supported */
761 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
762 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
763 if (error)
39aadf8c 764 IWL_WARN(priv, "check basic rate %d | %d\n", counter++, error);
b481de9c
ZY
765
766 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
767 if (error)
39aadf8c 768 IWL_WARN(priv, "check assoc id %d | %d\n", counter++, error);
b481de9c
ZY
769
770 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
771 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
772 if (error)
39aadf8c 773 IWL_WARN(priv, "check CCK and short slot %d | %d\n",
b481de9c
ZY
774 counter++, error);
775
776 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
777 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
778 if (error)
39aadf8c 779 IWL_WARN(priv, "check CCK & auto detect %d | %d\n",
b481de9c
ZY
780 counter++, error);
781
782 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
783 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
784 if (error)
39aadf8c 785 IWL_WARN(priv, "check TGG and auto detect %d | %d\n",
b481de9c
ZY
786 counter++, error);
787
788 if ((rxon->flags & RXON_FLG_DIS_DIV_MSK))
789 error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK |
790 RXON_FLG_ANT_A_MSK)) == 0);
791 if (error)
39aadf8c 792 IWL_WARN(priv, "check antenna %d %d\n", counter++, error);
b481de9c
ZY
793
794 if (error)
39aadf8c 795 IWL_WARN(priv, "Tuning to channel %d\n",
b481de9c
ZY
796 le16_to_cpu(rxon->channel));
797
798 if (error) {
15b1687c 799 IWL_ERR(priv, "Not a valid rxon_assoc_cmd field values\n");
b481de9c
ZY
800 return -1;
801 }
802 return 0;
803}
804
805/**
9fbab516 806 * iwl3945_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
01ebd063 807 * @priv: staging_rxon is compared to active_rxon
b481de9c 808 *
9fbab516
BC
809 * If the RXON structure is changing enough to require a new tune,
810 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
811 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
b481de9c 812 */
4a8a4322 813static int iwl3945_full_rxon_required(struct iwl_priv *priv)
b481de9c
ZY
814{
815
816 /* These items are only settable from the full RXON command */
5d1e2325 817 if (!(iwl3945_is_associated(priv)) ||
f2c7e521
AK
818 compare_ether_addr(priv->staging39_rxon.bssid_addr,
819 priv->active39_rxon.bssid_addr) ||
820 compare_ether_addr(priv->staging39_rxon.node_addr,
821 priv->active39_rxon.node_addr) ||
822 compare_ether_addr(priv->staging39_rxon.wlap_bssid_addr,
823 priv->active39_rxon.wlap_bssid_addr) ||
824 (priv->staging39_rxon.dev_type != priv->active39_rxon.dev_type) ||
825 (priv->staging39_rxon.channel != priv->active39_rxon.channel) ||
826 (priv->staging39_rxon.air_propagation !=
827 priv->active39_rxon.air_propagation) ||
828 (priv->staging39_rxon.assoc_id != priv->active39_rxon.assoc_id))
b481de9c
ZY
829 return 1;
830
831 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
832 * be updated with the RXON_ASSOC command -- however only some
833 * flag transitions are allowed using RXON_ASSOC */
834
835 /* Check if we are not switching bands */
f2c7e521
AK
836 if ((priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
837 (priv->active39_rxon.flags & RXON_FLG_BAND_24G_MSK))
b481de9c
ZY
838 return 1;
839
840 /* Check if we are switching association toggle */
f2c7e521
AK
841 if ((priv->staging39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
842 (priv->active39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
b481de9c
ZY
843 return 1;
844
845 return 0;
846}
847
4a8a4322 848static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
b481de9c
ZY
849{
850 int rc = 0;
3d24a9f7 851 struct iwl_rx_packet *res = NULL;
bb8c093b 852 struct iwl3945_rxon_assoc_cmd rxon_assoc;
c2d79b48 853 struct iwl_host_cmd cmd = {
b481de9c
ZY
854 .id = REPLY_RXON_ASSOC,
855 .len = sizeof(rxon_assoc),
856 .meta.flags = CMD_WANT_SKB,
857 .data = &rxon_assoc,
858 };
f2c7e521
AK
859 const struct iwl3945_rxon_cmd *rxon1 = &priv->staging39_rxon;
860 const struct iwl3945_rxon_cmd *rxon2 = &priv->active39_rxon;
b481de9c
ZY
861
862 if ((rxon1->flags == rxon2->flags) &&
863 (rxon1->filter_flags == rxon2->filter_flags) &&
864 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
865 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
866 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
867 return 0;
868 }
869
f2c7e521
AK
870 rxon_assoc.flags = priv->staging39_rxon.flags;
871 rxon_assoc.filter_flags = priv->staging39_rxon.filter_flags;
872 rxon_assoc.ofdm_basic_rates = priv->staging39_rxon.ofdm_basic_rates;
873 rxon_assoc.cck_basic_rates = priv->staging39_rxon.cck_basic_rates;
b481de9c
ZY
874 rxon_assoc.reserved = 0;
875
bb8c093b 876 rc = iwl3945_send_cmd_sync(priv, &cmd);
b481de9c
ZY
877 if (rc)
878 return rc;
879
3d24a9f7 880 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
b481de9c 881 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 882 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
b481de9c
ZY
883 rc = -EIO;
884 }
885
886 priv->alloc_rxb_skb--;
887 dev_kfree_skb_any(cmd.meta.u.skb);
888
889 return rc;
890}
891
892/**
bb8c093b 893 * iwl3945_commit_rxon - commit staging_rxon to hardware
b481de9c 894 *
01ebd063 895 * The RXON command in staging_rxon is committed to the hardware and
b481de9c
ZY
896 * the active_rxon structure is updated with the new data. This
897 * function correctly transitions out of the RXON_ASSOC_MSK state if
898 * a HW tune is required based on the RXON structure changes.
899 */
4a8a4322 900static int iwl3945_commit_rxon(struct iwl_priv *priv)
b481de9c
ZY
901{
902 /* cast away the const for active_rxon in this function */
f2c7e521 903 struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active39_rxon;
b481de9c
ZY
904 int rc = 0;
905
775a6e27 906 if (!iwl_is_alive(priv))
b481de9c
ZY
907 return -1;
908
909 /* always get timestamp with Rx frame */
f2c7e521 910 priv->staging39_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
b481de9c
ZY
911
912 /* select antenna */
f2c7e521 913 priv->staging39_rxon.flags &=
b481de9c 914 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
f2c7e521 915 priv->staging39_rxon.flags |= iwl3945_get_antenna_flags(priv);
b481de9c 916
a3139c59 917 rc = iwl3945_check_rxon_cmd(priv);
b481de9c 918 if (rc) {
15b1687c 919 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
b481de9c
ZY
920 return -EINVAL;
921 }
922
923 /* If we don't need to send a full RXON, we can use
bb8c093b 924 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
b481de9c 925 * and other flags for the current radio configuration. */
bb8c093b
CH
926 if (!iwl3945_full_rxon_required(priv)) {
927 rc = iwl3945_send_rxon_assoc(priv);
b481de9c 928 if (rc) {
15b1687c 929 IWL_ERR(priv, "Error setting RXON_ASSOC "
b481de9c
ZY
930 "configuration (%d).\n", rc);
931 return rc;
932 }
933
f2c7e521 934 memcpy(active_rxon, &priv->staging39_rxon, sizeof(*active_rxon));
b481de9c
ZY
935
936 return 0;
937 }
938
939 /* If we are currently associated and the new config requires
940 * an RXON_ASSOC and the new config wants the associated mask enabled,
941 * we must clear the associated from the active configuration
942 * before we apply the new config */
bb8c093b 943 if (iwl3945_is_associated(priv) &&
f2c7e521 944 (priv->staging39_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
b481de9c
ZY
945 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
946 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
947
bb8c093b
CH
948 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON,
949 sizeof(struct iwl3945_rxon_cmd),
f2c7e521 950 &priv->active39_rxon);
b481de9c
ZY
951
952 /* If the mask clearing failed then we set
953 * active_rxon back to what it was previously */
954 if (rc) {
955 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
15b1687c 956 IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
b481de9c
ZY
957 "configuration (%d).\n", rc);
958 return rc;
959 }
b481de9c
ZY
960 }
961
962 IWL_DEBUG_INFO("Sending RXON\n"
963 "* with%s RXON_FILTER_ASSOC_MSK\n"
964 "* channel = %d\n"
e174961c 965 "* bssid = %pM\n",
f2c7e521 966 ((priv->staging39_rxon.filter_flags &
b481de9c 967 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
f2c7e521 968 le16_to_cpu(priv->staging39_rxon.channel),
e174961c 969 priv->staging_rxon.bssid_addr);
b481de9c
ZY
970
971 /* Apply the new configuration */
bb8c093b 972 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON,
f2c7e521 973 sizeof(struct iwl3945_rxon_cmd), &priv->staging39_rxon);
b481de9c 974 if (rc) {
15b1687c 975 IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
b481de9c
ZY
976 return rc;
977 }
978
f2c7e521 979 memcpy(active_rxon, &priv->staging39_rxon, sizeof(*active_rxon));
b481de9c 980
bb8c093b 981 iwl3945_clear_stations_table(priv);
556f8db7 982
b481de9c
ZY
983 /* If we issue a new RXON command which required a tune then we must
984 * send a new TXPOWER command or we won't be able to Tx any frames */
bb8c093b 985 rc = iwl3945_hw_reg_send_txpower(priv);
b481de9c 986 if (rc) {
15b1687c 987 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
b481de9c
ZY
988 return rc;
989 }
990
991 /* Add the broadcast address so we can send broadcast frames */
b5323d36 992 if (iwl3945_add_station(priv, iwl_bcast_addr, 0, 0) ==
b481de9c 993 IWL_INVALID_STATION) {
15b1687c 994 IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
b481de9c
ZY
995 return -EIO;
996 }
997
998 /* If we have set the ASSOC_MSK and we are in BSS mode then
999 * add the IWL_AP_ID to the station rate table */
bb8c093b 1000 if (iwl3945_is_associated(priv) &&
05c914fe 1001 (priv->iw_mode == NL80211_IFTYPE_STATION))
f2c7e521 1002 if (iwl3945_add_station(priv, priv->active39_rxon.bssid_addr, 1, 0)
b481de9c 1003 == IWL_INVALID_STATION) {
15b1687c 1004 IWL_ERR(priv, "Error adding AP address for transmit\n");
b481de9c
ZY
1005 return -EIO;
1006 }
1007
8318d78a 1008 /* Init the hardware's rate fallback order based on the band */
b481de9c
ZY
1009 rc = iwl3945_init_hw_rate_table(priv);
1010 if (rc) {
15b1687c 1011 IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
b481de9c
ZY
1012 return -EIO;
1013 }
1014
1015 return 0;
1016}
1017
4a8a4322 1018static int iwl3945_send_bt_config(struct iwl_priv *priv)
b481de9c 1019{
4c897253 1020 struct iwl_bt_cmd bt_cmd = {
b481de9c
ZY
1021 .flags = 3,
1022 .lead_time = 0xAA,
1023 .max_kill = 1,
1024 .kill_ack_mask = 0,
1025 .kill_cts_mask = 0,
1026 };
1027
bb8c093b 1028 return iwl3945_send_cmd_pdu(priv, REPLY_BT_CONFIG,
4c897253 1029 sizeof(bt_cmd), &bt_cmd);
b481de9c
ZY
1030}
1031
4a8a4322 1032static int iwl3945_send_scan_abort(struct iwl_priv *priv)
b481de9c
ZY
1033{
1034 int rc = 0;
3d24a9f7 1035 struct iwl_rx_packet *res;
c2d79b48 1036 struct iwl_host_cmd cmd = {
b481de9c
ZY
1037 .id = REPLY_SCAN_ABORT_CMD,
1038 .meta.flags = CMD_WANT_SKB,
1039 };
1040
1041 /* If there isn't a scan actively going on in the hardware
1042 * then we are in between scan bands and not actually
1043 * actively scanning, so don't send the abort command */
1044 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1045 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1046 return 0;
1047 }
1048
bb8c093b 1049 rc = iwl3945_send_cmd_sync(priv, &cmd);
b481de9c
ZY
1050 if (rc) {
1051 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1052 return rc;
1053 }
1054
3d24a9f7 1055 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
b481de9c
ZY
1056 if (res->u.status != CAN_ABORT_STATUS) {
1057 /* The scan abort will return 1 for success or
1058 * 2 for "failure". A failure condition can be
1059 * due to simply not being in an active scan which
1060 * can occur if we send the scan abort before we
1061 * the microcode has notified us that a scan is
1062 * completed. */
1063 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1064 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1065 clear_bit(STATUS_SCAN_HW, &priv->status);
1066 }
1067
1068 dev_kfree_skb_any(cmd.meta.u.skb);
1069
1070 return rc;
1071}
1072
4a8a4322 1073static int iwl3945_add_sta_sync_callback(struct iwl_priv *priv,
c2d79b48 1074 struct iwl_cmd *cmd, struct sk_buff *skb)
b481de9c 1075{
3d24a9f7 1076 struct iwl_rx_packet *res = NULL;
b481de9c
ZY
1077
1078 if (!skb) {
15b1687c 1079 IWL_ERR(priv, "Error: Response NULL in REPLY_ADD_STA.\n");
b481de9c
ZY
1080 return 1;
1081 }
1082
3d24a9f7 1083 res = (struct iwl_rx_packet *)skb->data;
b481de9c 1084 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 1085 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
b481de9c
ZY
1086 res->hdr.flags);
1087 return 1;
1088 }
1089
1090 switch (res->u.add_sta.status) {
1091 case ADD_STA_SUCCESS_MSK:
1092 break;
1093 default:
1094 break;
1095 }
1096
1097 /* We didn't cache the SKB; let the caller free it */
1098 return 1;
1099}
1100
4a8a4322 1101int iwl3945_send_add_station(struct iwl_priv *priv,
bb8c093b 1102 struct iwl3945_addsta_cmd *sta, u8 flags)
b481de9c 1103{
3d24a9f7 1104 struct iwl_rx_packet *res = NULL;
b481de9c 1105 int rc = 0;
c2d79b48 1106 struct iwl_host_cmd cmd = {
b481de9c 1107 .id = REPLY_ADD_STA,
bb8c093b 1108 .len = sizeof(struct iwl3945_addsta_cmd),
b481de9c
ZY
1109 .meta.flags = flags,
1110 .data = sta,
1111 };
1112
1113 if (flags & CMD_ASYNC)
bb8c093b 1114 cmd.meta.u.callback = iwl3945_add_sta_sync_callback;
b481de9c
ZY
1115 else
1116 cmd.meta.flags |= CMD_WANT_SKB;
1117
bb8c093b 1118 rc = iwl3945_send_cmd(priv, &cmd);
b481de9c
ZY
1119
1120 if (rc || (flags & CMD_ASYNC))
1121 return rc;
1122
3d24a9f7 1123 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
b481de9c 1124 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 1125 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
b481de9c
ZY
1126 res->hdr.flags);
1127 rc = -EIO;
1128 }
1129
1130 if (rc == 0) {
1131 switch (res->u.add_sta.status) {
1132 case ADD_STA_SUCCESS_MSK:
1133 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1134 break;
1135 default:
1136 rc = -EIO;
39aadf8c 1137 IWL_WARN(priv, "REPLY_ADD_STA failed\n");
b481de9c
ZY
1138 break;
1139 }
1140 }
1141
1142 priv->alloc_rxb_skb--;
1143 dev_kfree_skb_any(cmd.meta.u.skb);
1144
1145 return rc;
1146}
1147
4a8a4322 1148static int iwl3945_update_sta_key_info(struct iwl_priv *priv,
b481de9c
ZY
1149 struct ieee80211_key_conf *keyconf,
1150 u8 sta_id)
1151{
1152 unsigned long flags;
1153 __le16 key_flags = 0;
1154
1155 switch (keyconf->alg) {
1156 case ALG_CCMP:
1157 key_flags |= STA_KEY_FLG_CCMP;
1158 key_flags |= cpu_to_le16(
1159 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1160 key_flags &= ~STA_KEY_FLG_INVALID;
1161 break;
1162 case ALG_TKIP:
1163 case ALG_WEP:
b481de9c
ZY
1164 default:
1165 return -EINVAL;
1166 }
1167 spin_lock_irqsave(&priv->sta_lock, flags);
f2c7e521
AK
1168 priv->stations_39[sta_id].keyinfo.alg = keyconf->alg;
1169 priv->stations_39[sta_id].keyinfo.keylen = keyconf->keylen;
1170 memcpy(priv->stations_39[sta_id].keyinfo.key, keyconf->key,
b481de9c
ZY
1171 keyconf->keylen);
1172
f2c7e521 1173 memcpy(priv->stations_39[sta_id].sta.key.key, keyconf->key,
b481de9c 1174 keyconf->keylen);
f2c7e521
AK
1175 priv->stations_39[sta_id].sta.key.key_flags = key_flags;
1176 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1177 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
b481de9c
ZY
1178
1179 spin_unlock_irqrestore(&priv->sta_lock, flags);
1180
1181 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
f2c7e521 1182 iwl3945_send_add_station(priv, &priv->stations_39[sta_id].sta, 0);
b481de9c
ZY
1183 return 0;
1184}
1185
4a8a4322 1186static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
b481de9c
ZY
1187{
1188 unsigned long flags;
1189
1190 spin_lock_irqsave(&priv->sta_lock, flags);
f2c7e521
AK
1191 memset(&priv->stations_39[sta_id].keyinfo, 0, sizeof(struct iwl3945_hw_key));
1192 memset(&priv->stations_39[sta_id].sta.key, 0,
4c897253 1193 sizeof(struct iwl4965_keyinfo));
f2c7e521
AK
1194 priv->stations_39[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1195 priv->stations_39[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1196 priv->stations_39[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
b481de9c
ZY
1197 spin_unlock_irqrestore(&priv->sta_lock, flags);
1198
1199 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
f2c7e521 1200 iwl3945_send_add_station(priv, &priv->stations_39[sta_id].sta, 0);
b481de9c
ZY
1201 return 0;
1202}
1203
4a8a4322 1204static void iwl3945_clear_free_frames(struct iwl_priv *priv)
b481de9c
ZY
1205{
1206 struct list_head *element;
1207
1208 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1209 priv->frames_count);
1210
1211 while (!list_empty(&priv->free_frames)) {
1212 element = priv->free_frames.next;
1213 list_del(element);
bb8c093b 1214 kfree(list_entry(element, struct iwl3945_frame, list));
b481de9c
ZY
1215 priv->frames_count--;
1216 }
1217
1218 if (priv->frames_count) {
39aadf8c 1219 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
b481de9c
ZY
1220 priv->frames_count);
1221 priv->frames_count = 0;
1222 }
1223}
1224
4a8a4322 1225static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
b481de9c 1226{
bb8c093b 1227 struct iwl3945_frame *frame;
b481de9c
ZY
1228 struct list_head *element;
1229 if (list_empty(&priv->free_frames)) {
1230 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1231 if (!frame) {
15b1687c 1232 IWL_ERR(priv, "Could not allocate frame!\n");
b481de9c
ZY
1233 return NULL;
1234 }
1235
1236 priv->frames_count++;
1237 return frame;
1238 }
1239
1240 element = priv->free_frames.next;
1241 list_del(element);
bb8c093b 1242 return list_entry(element, struct iwl3945_frame, list);
b481de9c
ZY
1243}
1244
4a8a4322 1245static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
b481de9c
ZY
1246{
1247 memset(frame, 0, sizeof(*frame));
1248 list_add(&frame->list, &priv->free_frames);
1249}
1250
4a8a4322 1251unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
b481de9c 1252 struct ieee80211_hdr *hdr,
73ec1cc2 1253 int left)
b481de9c
ZY
1254{
1255
bb8c093b 1256 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon ||
05c914fe
JB
1257 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
1258 (priv->iw_mode != NL80211_IFTYPE_AP)))
b481de9c
ZY
1259 return 0;
1260
1261 if (priv->ibss_beacon->len > left)
1262 return 0;
1263
1264 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1265
1266 return priv->ibss_beacon->len;
1267}
1268
4a8a4322 1269static u8 iwl3945_rate_get_lowest_plcp(struct iwl_priv *priv)
b481de9c
ZY
1270{
1271 u8 i;
c24f0817
KA
1272 int rate_mask;
1273
1274 /* Set rate mask*/
f2c7e521 1275 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK)
dbce56a4 1276 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
c24f0817 1277 else
dbce56a4 1278 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
b481de9c
ZY
1279
1280 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
bb8c093b 1281 i = iwl3945_rates[i].next_ieee) {
b481de9c 1282 if (rate_mask & (1 << i))
bb8c093b 1283 return iwl3945_rates[i].plcp;
b481de9c
ZY
1284 }
1285
c24f0817 1286 /* No valid rate was found. Assign the lowest one */
f2c7e521 1287 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK)
c24f0817
KA
1288 return IWL_RATE_1M_PLCP;
1289 else
1290 return IWL_RATE_6M_PLCP;
b481de9c
ZY
1291}
1292
4a8a4322 1293static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
b481de9c 1294{
bb8c093b 1295 struct iwl3945_frame *frame;
b481de9c
ZY
1296 unsigned int frame_size;
1297 int rc;
1298 u8 rate;
1299
bb8c093b 1300 frame = iwl3945_get_free_frame(priv);
b481de9c
ZY
1301
1302 if (!frame) {
15b1687c 1303 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
b481de9c
ZY
1304 "command.\n");
1305 return -ENOMEM;
1306 }
1307
c24f0817 1308 rate = iwl3945_rate_get_lowest_plcp(priv);
b481de9c 1309
bb8c093b 1310 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
b481de9c 1311
bb8c093b 1312 rc = iwl3945_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
b481de9c
ZY
1313 &frame->u.cmd[0]);
1314
bb8c093b 1315 iwl3945_free_frame(priv, frame);
b481de9c
ZY
1316
1317 return rc;
1318}
1319
1320/******************************************************************************
1321 *
1322 * EEPROM related functions
1323 *
1324 ******************************************************************************/
1325
4a8a4322 1326static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac)
b481de9c 1327{
f2c7e521 1328 memcpy(mac, priv->eeprom39.mac_address, 6);
b481de9c
ZY
1329}
1330
74a3a250
RC
1331/*
1332 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
1333 * embedded controller) as EEPROM reader; each read is a series of pulses
1334 * to/from the EEPROM chip, not a single event, so even reads could conflict
1335 * if they weren't arbitrated by some ownership mechanism. Here, the driver
1336 * simply claims ownership, which should be safe when this function is called
1337 * (i.e. before loading uCode!).
1338 */
4a8a4322 1339static inline int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
74a3a250 1340{
5d49f498 1341 _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
74a3a250
RC
1342 return 0;
1343}
1344
b481de9c 1345/**
bb8c093b 1346 * iwl3945_eeprom_init - read EEPROM contents
b481de9c 1347 *
f2c7e521 1348 * Load the EEPROM contents from adapter into priv->eeprom39
b481de9c
ZY
1349 *
1350 * NOTE: This routine uses the non-debug IO access functions.
1351 */
4a8a4322 1352int iwl3945_eeprom_init(struct iwl_priv *priv)
b481de9c 1353{
f2c7e521 1354 u16 *e = (u16 *)&priv->eeprom39;
5d49f498 1355 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
f2c7e521 1356 int sz = sizeof(priv->eeprom39);
3d5717ad 1357 int ret;
b481de9c
ZY
1358 u16 addr;
1359
1360 /* The EEPROM structure has several padding buffers within it
1361 * and when adding new EEPROM maps is subject to programmer errors
1362 * which may be very difficult to identify without explicitly
1363 * checking the resulting size of the eeprom map. */
f2c7e521 1364 BUILD_BUG_ON(sizeof(priv->eeprom39) != IWL_EEPROM_IMAGE_SIZE);
b481de9c
ZY
1365
1366 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
15b1687c 1367 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
b481de9c
ZY
1368 return -ENOENT;
1369 }
1370
6440adb5 1371 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
3d5717ad
ZY
1372 ret = iwl3945_eeprom_acquire_semaphore(priv);
1373 if (ret < 0) {
15b1687c 1374 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
b481de9c
ZY
1375 return -ENOENT;
1376 }
1377
1378 /* eeprom is an array of 16bit values */
1379 for (addr = 0; addr < sz; addr += sizeof(u16)) {
3d5717ad 1380 u32 r;
b481de9c 1381
5d49f498 1382 _iwl_write32(priv, CSR_EEPROM_REG,
3d5717ad 1383 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
5d49f498
AK
1384 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1385 ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG,
3d5717ad
ZY
1386 CSR_EEPROM_REG_READ_VALID_MSK,
1387 IWL_EEPROM_ACCESS_TIMEOUT);
1388 if (ret < 0) {
15b1687c 1389 IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
3d5717ad 1390 return ret;
b481de9c 1391 }
3d5717ad 1392
5d49f498 1393 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
58ff6d4d 1394 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
b481de9c
ZY
1395 }
1396
1397 return 0;
1398}
1399
4a8a4322 1400static void iwl3945_unset_hw_params(struct iwl_priv *priv)
b481de9c 1401{
3832ec9d 1402 if (priv->shared_virt)
b481de9c 1403 pci_free_consistent(priv->pci_dev,
bb8c093b 1404 sizeof(struct iwl3945_shared),
3832ec9d
AK
1405 priv->shared_virt,
1406 priv->shared_phys);
b481de9c
ZY
1407}
1408
1409/**
bb8c093b 1410 * iwl3945_supported_rate_to_ie - fill in the supported rate in IE field
b481de9c
ZY
1411 *
1412 * return : set the bit for each supported rate insert in ie
1413 */
bb8c093b 1414static u16 iwl3945_supported_rate_to_ie(u8 *ie, u16 supported_rate,
c7c46676 1415 u16 basic_rate, int *left)
b481de9c
ZY
1416{
1417 u16 ret_rates = 0, bit;
1418 int i;
c7c46676
TW
1419 u8 *cnt = ie;
1420 u8 *rates = ie + 1;
b481de9c
ZY
1421
1422 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1423 if (bit & supported_rate) {
1424 ret_rates |= bit;
bb8c093b 1425 rates[*cnt] = iwl3945_rates[i].ieee |
c7c46676
TW
1426 ((bit & basic_rate) ? 0x80 : 0x00);
1427 (*cnt)++;
1428 (*left)--;
1429 if ((*left <= 0) ||
1430 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
b481de9c
ZY
1431 break;
1432 }
1433 }
1434
1435 return ret_rates;
1436}
1437
1438/**
bb8c093b 1439 * iwl3945_fill_probe_req - fill in all required fields and IE for probe request
b481de9c 1440 */
4a8a4322 1441static u16 iwl3945_fill_probe_req(struct iwl_priv *priv,
b481de9c 1442 struct ieee80211_mgmt *frame,
430cfe95 1443 int left)
b481de9c
ZY
1444{
1445 int len = 0;
1446 u8 *pos = NULL;
c7c46676 1447 u16 active_rates, ret_rates, cck_rates;
b481de9c
ZY
1448
1449 /* Make sure there is enough space for the probe request,
1450 * two mandatory IEs and the data */
1451 left -= 24;
1452 if (left < 0)
1453 return 0;
1454 len += 24;
1455
1456 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
b5323d36 1457 memcpy(frame->da, iwl_bcast_addr, ETH_ALEN);
b481de9c 1458 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
b5323d36 1459 memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN);
b481de9c
ZY
1460 frame->seq_ctrl = 0;
1461
1462 /* fill in our indirect SSID IE */
1463 /* ...next IE... */
1464
1465 left -= 2;
1466 if (left < 0)
1467 return 0;
1468 len += 2;
1469 pos = &(frame->u.probe_req.variable[0]);
1470 *pos++ = WLAN_EID_SSID;
1471 *pos++ = 0;
1472
b481de9c
ZY
1473 /* fill in supported rate */
1474 /* ...next IE... */
1475 left -= 2;
1476 if (left < 0)
1477 return 0;
c7c46676 1478
b481de9c
ZY
1479 /* ... fill it in... */
1480 *pos++ = WLAN_EID_SUPP_RATES;
1481 *pos = 0;
c7c46676
TW
1482
1483 priv->active_rate = priv->rates_mask;
1484 active_rates = priv->active_rate;
b481de9c
ZY
1485 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1486
c7c46676 1487 cck_rates = IWL_CCK_RATES_MASK & active_rates;
bb8c093b 1488 ret_rates = iwl3945_supported_rate_to_ie(pos, cck_rates,
c7c46676
TW
1489 priv->active_rate_basic, &left);
1490 active_rates &= ~ret_rates;
1491
bb8c093b 1492 ret_rates = iwl3945_supported_rate_to_ie(pos, active_rates,
c7c46676
TW
1493 priv->active_rate_basic, &left);
1494 active_rates &= ~ret_rates;
1495
b481de9c
ZY
1496 len += 2 + *pos;
1497 pos += (*pos) + 1;
c7c46676 1498 if (active_rates == 0)
b481de9c
ZY
1499 goto fill_end;
1500
1501 /* fill in supported extended rate */
1502 /* ...next IE... */
1503 left -= 2;
1504 if (left < 0)
1505 return 0;
1506 /* ... fill it in... */
1507 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1508 *pos = 0;
bb8c093b 1509 iwl3945_supported_rate_to_ie(pos, active_rates,
c7c46676 1510 priv->active_rate_basic, &left);
b481de9c
ZY
1511 if (*pos > 0)
1512 len += 2 + *pos;
1513
1514 fill_end:
1515 return (u16)len;
1516}
1517
1518/*
1519 * QoS support
1520*/
4a8a4322 1521static int iwl3945_send_qos_params_command(struct iwl_priv *priv,
4c897253 1522 struct iwl_qosparam_cmd *qos)
b481de9c
ZY
1523{
1524
bb8c093b 1525 return iwl3945_send_cmd_pdu(priv, REPLY_QOS_PARAM,
4c897253 1526 sizeof(struct iwl_qosparam_cmd), qos);
b481de9c
ZY
1527}
1528
4a8a4322 1529static void iwl3945_activate_qos(struct iwl_priv *priv, u8 force)
b481de9c
ZY
1530{
1531 unsigned long flags;
1532
b481de9c
ZY
1533 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1534 return;
1535
b481de9c
ZY
1536 spin_lock_irqsave(&priv->lock, flags);
1537 priv->qos_data.def_qos_parm.qos_flags = 0;
1538
1539 if (priv->qos_data.qos_cap.q_AP.queue_request &&
1540 !priv->qos_data.qos_cap.q_AP.txop_request)
1541 priv->qos_data.def_qos_parm.qos_flags |=
1542 QOS_PARAM_FLG_TXOP_TYPE_MSK;
1543
1544 if (priv->qos_data.qos_active)
1545 priv->qos_data.def_qos_parm.qos_flags |=
1546 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
1547
1548 spin_unlock_irqrestore(&priv->lock, flags);
1549
bb8c093b 1550 if (force || iwl3945_is_associated(priv)) {
a96a27f9 1551 IWL_DEBUG_QOS("send QoS cmd with QoS active %d \n",
b481de9c
ZY
1552 priv->qos_data.qos_active);
1553
bb8c093b 1554 iwl3945_send_qos_params_command(priv,
b481de9c
ZY
1555 &(priv->qos_data.def_qos_parm));
1556 }
1557}
1558
b481de9c
ZY
1559/*
1560 * Power management (not Tx power!) functions
1561 */
1562#define MSEC_TO_USEC 1024
1563
600c0e11
TW
1564
1565#define NOSLP __constant_cpu_to_le16(0), 0, 0
1566#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
b481de9c
ZY
1567#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
1568#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
1569 __constant_cpu_to_le32(X1), \
1570 __constant_cpu_to_le32(X2), \
1571 __constant_cpu_to_le32(X3), \
1572 __constant_cpu_to_le32(X4)}
1573
b481de9c 1574/* default power management (not Tx power) table values */
a96a27f9 1575/* for TIM 0-10 */
1125eff3 1576static struct iwl_power_vec_entry range_0[IWL39_POWER_AC] = {
b481de9c
ZY
1577 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1578 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
1579 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
1580 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
1581 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
1582 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
1583};
1584
a96a27f9 1585/* for TIM > 10 */
1125eff3 1586static struct iwl_power_vec_entry range_1[IWL39_POWER_AC] = {
b481de9c
ZY
1587 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1588 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
1589 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
1590 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
1591 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
1592 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
1593 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
1594 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
1595 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
1596 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
1597};
1598
4a8a4322 1599int iwl3945_power_init_handle(struct iwl_priv *priv)
b481de9c
ZY
1600{
1601 int rc = 0, i;
bb8c093b 1602 struct iwl3945_power_mgr *pow_data;
1125eff3 1603 int size = sizeof(struct iwl_power_vec_entry) * IWL39_POWER_AC;
b481de9c
ZY
1604 u16 pci_pm;
1605
1606 IWL_DEBUG_POWER("Initialize power \n");
1607
f2c7e521 1608 pow_data = &(priv->power_data_39);
b481de9c
ZY
1609
1610 memset(pow_data, 0, sizeof(*pow_data));
1611
1612 pow_data->active_index = IWL_POWER_RANGE_0;
1613 pow_data->dtim_val = 0xffff;
1614
1615 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
1616 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
1617
1618 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
1619 if (rc != 0)
1620 return 0;
1621 else {
600c0e11 1622 struct iwl_powertable_cmd *cmd;
b481de9c
ZY
1623
1624 IWL_DEBUG_POWER("adjust power command flags\n");
1625
1125eff3 1626 for (i = 0; i < IWL39_POWER_AC; i++) {
b481de9c
ZY
1627 cmd = &pow_data->pwr_range_0[i].cmd;
1628
1629 if (pci_pm & 0x1)
1630 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
1631 else
1632 cmd->flags |= IWL_POWER_PCI_PM_MSK;
1633 }
1634 }
1635 return rc;
1636}
1637
4a8a4322 1638static int iwl3945_update_power_cmd(struct iwl_priv *priv,
600c0e11 1639 struct iwl_powertable_cmd *cmd, u32 mode)
b481de9c
ZY
1640{
1641 int rc = 0, i;
1642 u8 skip;
1643 u32 max_sleep = 0;
1125eff3 1644 struct iwl_power_vec_entry *range;
b481de9c 1645 u8 period = 0;
bb8c093b 1646 struct iwl3945_power_mgr *pow_data;
b481de9c
ZY
1647
1648 if (mode > IWL_POWER_INDEX_5) {
1649 IWL_DEBUG_POWER("Error invalid power mode \n");
1650 return -1;
1651 }
f2c7e521 1652 pow_data = &(priv->power_data_39);
b481de9c
ZY
1653
1654 if (pow_data->active_index == IWL_POWER_RANGE_0)
1655 range = &pow_data->pwr_range_0[0];
1656 else
1657 range = &pow_data->pwr_range_1[1];
1658
bb8c093b 1659 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl3945_powertable_cmd));
b481de9c
ZY
1660
1661#ifdef IWL_MAC80211_DISABLE
1662 if (priv->assoc_network != NULL) {
1663 unsigned long flags;
1664
1665 period = priv->assoc_network->tim.tim_period;
1666 }
1667#endif /*IWL_MAC80211_DISABLE */
1668 skip = range[mode].no_dtim;
1669
1670 if (period == 0) {
1671 period = 1;
1672 skip = 0;
1673 }
1674
1675 if (skip == 0) {
1676 max_sleep = period;
1677 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
1678 } else {
1679 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
1680 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
1681 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
1682 }
1683
1684 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
1685 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1686 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1687 }
1688
1689 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
1690 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1691 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1692 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1693 le32_to_cpu(cmd->sleep_interval[0]),
1694 le32_to_cpu(cmd->sleep_interval[1]),
1695 le32_to_cpu(cmd->sleep_interval[2]),
1696 le32_to_cpu(cmd->sleep_interval[3]),
1697 le32_to_cpu(cmd->sleep_interval[4]));
1698
1699 return rc;
1700}
1701
4a8a4322 1702static int iwl3945_send_power_mode(struct iwl_priv *priv, u32 mode)
b481de9c 1703{
9a62f73b 1704 u32 uninitialized_var(final_mode);
b481de9c 1705 int rc;
600c0e11 1706 struct iwl_powertable_cmd cmd;
b481de9c
ZY
1707
1708 /* If on battery, set to 3,
01ebd063 1709 * if plugged into AC power, set to CAM ("continuously aware mode"),
b481de9c
ZY
1710 * else user level */
1711 switch (mode) {
1125eff3 1712 case IWL39_POWER_BATTERY:
b481de9c
ZY
1713 final_mode = IWL_POWER_INDEX_3;
1714 break;
1125eff3 1715 case IWL39_POWER_AC:
b481de9c
ZY
1716 final_mode = IWL_POWER_MODE_CAM;
1717 break;
1718 default:
1719 final_mode = mode;
1720 break;
1721 }
1722
bb8c093b 1723 iwl3945_update_power_cmd(priv, &cmd, final_mode);
b481de9c 1724
600c0e11
TW
1725 /* FIXME use get_hcmd_size 3945 command is 4 bytes shorter */
1726 rc = iwl3945_send_cmd_pdu(priv, POWER_TABLE_CMD,
1727 sizeof(struct iwl3945_powertable_cmd), &cmd);
b481de9c
ZY
1728
1729 if (final_mode == IWL_POWER_MODE_CAM)
1730 clear_bit(STATUS_POWER_PMI, &priv->status);
1731 else
1732 set_bit(STATUS_POWER_PMI, &priv->status);
1733
1734 return rc;
1735}
1736
b481de9c
ZY
1737#define MAX_UCODE_BEACON_INTERVAL 1024
1738#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
1739
bb8c093b 1740static __le16 iwl3945_adjust_beacon_interval(u16 beacon_val)
b481de9c
ZY
1741{
1742 u16 new_val = 0;
1743 u16 beacon_factor = 0;
1744
1745 beacon_factor =
1746 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
1747 / MAX_UCODE_BEACON_INTERVAL;
1748 new_val = beacon_val / beacon_factor;
1749
1750 return cpu_to_le16(new_val);
1751}
1752
4a8a4322 1753static void iwl3945_setup_rxon_timing(struct iwl_priv *priv)
b481de9c
ZY
1754{
1755 u64 interval_tm_unit;
1756 u64 tsf, result;
1757 unsigned long flags;
1758 struct ieee80211_conf *conf = NULL;
1759 u16 beacon_int = 0;
1760
1761 conf = ieee80211_get_hw_conf(priv->hw);
1762
1763 spin_lock_irqsave(&priv->lock, flags);
28afaf91 1764 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
b481de9c
ZY
1765 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
1766
28afaf91 1767 tsf = priv->timestamp;
b481de9c
ZY
1768
1769 beacon_int = priv->beacon_int;
1770 spin_unlock_irqrestore(&priv->lock, flags);
1771
05c914fe 1772 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
b481de9c
ZY
1773 if (beacon_int == 0) {
1774 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
1775 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
1776 } else {
1777 priv->rxon_timing.beacon_interval =
1778 cpu_to_le16(beacon_int);
1779 priv->rxon_timing.beacon_interval =
bb8c093b 1780 iwl3945_adjust_beacon_interval(
b481de9c
ZY
1781 le16_to_cpu(priv->rxon_timing.beacon_interval));
1782 }
1783
1784 priv->rxon_timing.atim_window = 0;
1785 } else {
1786 priv->rxon_timing.beacon_interval =
bb8c093b 1787 iwl3945_adjust_beacon_interval(conf->beacon_int);
b481de9c
ZY
1788 /* TODO: we need to get atim_window from upper stack
1789 * for now we set to 0 */
1790 priv->rxon_timing.atim_window = 0;
1791 }
1792
1793 interval_tm_unit =
1794 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
1795 result = do_div(tsf, interval_tm_unit);
1796 priv->rxon_timing.beacon_init_val =
1797 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
1798
1799 IWL_DEBUG_ASSOC
1800 ("beacon interval %d beacon timer %d beacon tim %d\n",
1801 le16_to_cpu(priv->rxon_timing.beacon_interval),
1802 le32_to_cpu(priv->rxon_timing.beacon_init_val),
1803 le16_to_cpu(priv->rxon_timing.atim_window));
1804}
1805
4a8a4322 1806static int iwl3945_scan_initiate(struct iwl_priv *priv)
b481de9c 1807{
775a6e27 1808 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
1809 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
1810 return -EIO;
1811 }
1812
1813 if (test_bit(STATUS_SCANNING, &priv->status)) {
1814 IWL_DEBUG_SCAN("Scan already in progress.\n");
1815 return -EAGAIN;
1816 }
1817
1818 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1819 IWL_DEBUG_SCAN("Scan request while abort pending. "
1820 "Queuing.\n");
1821 return -EAGAIN;
1822 }
1823
1824 IWL_DEBUG_INFO("Starting scan...\n");
66b5004d
RR
1825 if (priv->cfg->sku & IWL_SKU_G)
1826 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
1827 if (priv->cfg->sku & IWL_SKU_A)
1828 priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
b481de9c
ZY
1829 set_bit(STATUS_SCANNING, &priv->status);
1830 priv->scan_start = jiffies;
1831 priv->scan_pass_start = priv->scan_start;
1832
1833 queue_work(priv->workqueue, &priv->request_scan);
1834
1835 return 0;
1836}
1837
4a8a4322 1838static int iwl3945_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
b481de9c 1839{
f2c7e521 1840 struct iwl3945_rxon_cmd *rxon = &priv->staging39_rxon;
b481de9c
ZY
1841
1842 if (hw_decrypt)
1843 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
1844 else
1845 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
1846
1847 return 0;
1848}
1849
4a8a4322 1850static void iwl3945_set_flags_for_phymode(struct iwl_priv *priv,
8318d78a 1851 enum ieee80211_band band)
b481de9c 1852{
8318d78a 1853 if (band == IEEE80211_BAND_5GHZ) {
f2c7e521 1854 priv->staging39_rxon.flags &=
b481de9c
ZY
1855 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
1856 | RXON_FLG_CCK_MSK);
f2c7e521 1857 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1858 } else {
bb8c093b 1859 /* Copied from iwl3945_bg_post_associate() */
b481de9c 1860 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
f2c7e521 1861 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1862 else
f2c7e521 1863 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1864
05c914fe 1865 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
f2c7e521 1866 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c 1867
f2c7e521
AK
1868 priv->staging39_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1869 priv->staging39_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
1870 priv->staging39_rxon.flags &= ~RXON_FLG_CCK_MSK;
b481de9c
ZY
1871 }
1872}
1873
1874/*
01ebd063 1875 * initialize rxon structure with default values from eeprom
b481de9c 1876 */
4a8a4322 1877static void iwl3945_connection_init_rx_config(struct iwl_priv *priv,
60294de3 1878 int mode)
b481de9c 1879{
d20b3c65 1880 const struct iwl_channel_info *ch_info;
b481de9c 1881
f2c7e521 1882 memset(&priv->staging39_rxon, 0, sizeof(priv->staging39_rxon));
b481de9c 1883
60294de3 1884 switch (mode) {
05c914fe 1885 case NL80211_IFTYPE_AP:
f2c7e521 1886 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_AP;
b481de9c
ZY
1887 break;
1888
05c914fe 1889 case NL80211_IFTYPE_STATION:
f2c7e521
AK
1890 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_ESS;
1891 priv->staging39_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
b481de9c
ZY
1892 break;
1893
05c914fe 1894 case NL80211_IFTYPE_ADHOC:
f2c7e521
AK
1895 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_IBSS;
1896 priv->staging39_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
1897 priv->staging39_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
b481de9c
ZY
1898 RXON_FILTER_ACCEPT_GRP_MSK;
1899 break;
1900
05c914fe 1901 case NL80211_IFTYPE_MONITOR:
f2c7e521
AK
1902 priv->staging39_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
1903 priv->staging39_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
b481de9c
ZY
1904 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
1905 break;
69dc5d9d 1906 default:
15b1687c 1907 IWL_ERR(priv, "Unsupported interface type %d\n", mode);
69dc5d9d 1908 break;
b481de9c
ZY
1909 }
1910
1911#if 0
1912 /* TODO: Figure out when short_preamble would be set and cache from
1913 * that */
1914 if (!hw_to_local(priv->hw)->short_preamble)
f2c7e521 1915 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c 1916 else
f2c7e521 1917 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c
ZY
1918#endif
1919
8318d78a 1920 ch_info = iwl3945_get_channel_info(priv, priv->band,
f2c7e521 1921 le16_to_cpu(priv->active39_rxon.channel));
b481de9c
ZY
1922
1923 if (!ch_info)
1924 ch_info = &priv->channel_info[0];
1925
1926 /*
1927 * in some case A channels are all non IBSS
1928 * in this case force B/G channel
1929 */
60294de3 1930 if ((mode == NL80211_IFTYPE_ADHOC) && !(is_channel_ibss(ch_info)))
b481de9c
ZY
1931 ch_info = &priv->channel_info[0];
1932
f2c7e521 1933 priv->staging39_rxon.channel = cpu_to_le16(ch_info->channel);
b481de9c 1934 if (is_channel_a_band(ch_info))
8318d78a 1935 priv->band = IEEE80211_BAND_5GHZ;
b481de9c 1936 else
8318d78a 1937 priv->band = IEEE80211_BAND_2GHZ;
b481de9c 1938
8318d78a 1939 iwl3945_set_flags_for_phymode(priv, priv->band);
b481de9c 1940
f2c7e521 1941 priv->staging39_rxon.ofdm_basic_rates =
b481de9c 1942 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
f2c7e521 1943 priv->staging39_rxon.cck_basic_rates =
b481de9c
ZY
1944 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1945}
1946
4a8a4322 1947static int iwl3945_set_mode(struct iwl_priv *priv, int mode)
b481de9c 1948{
05c914fe 1949 if (mode == NL80211_IFTYPE_ADHOC) {
d20b3c65 1950 const struct iwl_channel_info *ch_info;
b481de9c 1951
bb8c093b 1952 ch_info = iwl3945_get_channel_info(priv,
8318d78a 1953 priv->band,
f2c7e521 1954 le16_to_cpu(priv->staging39_rxon.channel));
b481de9c
ZY
1955
1956 if (!ch_info || !is_channel_ibss(ch_info)) {
15b1687c 1957 IWL_ERR(priv, "channel %d not IBSS channel\n",
f2c7e521 1958 le16_to_cpu(priv->staging39_rxon.channel));
b481de9c
ZY
1959 return -EINVAL;
1960 }
1961 }
1962
60294de3 1963 iwl3945_connection_init_rx_config(priv, mode);
f2c7e521 1964 memcpy(priv->staging39_rxon.node_addr, priv->mac_addr, ETH_ALEN);
b481de9c 1965
bb8c093b 1966 iwl3945_clear_stations_table(priv);
b481de9c 1967
a96a27f9 1968 /* don't commit rxon if rf-kill is on*/
775a6e27 1969 if (!iwl_is_ready_rf(priv))
fde3571f
MA
1970 return -EAGAIN;
1971
1972 cancel_delayed_work(&priv->scan_check);
af0053d6 1973 if (iwl_scan_cancel_timeout(priv, 100)) {
39aadf8c 1974 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
fde3571f
MA
1975 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
1976 return -EAGAIN;
1977 }
1978
bb8c093b 1979 iwl3945_commit_rxon(priv);
b481de9c
ZY
1980
1981 return 0;
1982}
1983
4a8a4322 1984static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
e039fa4a 1985 struct ieee80211_tx_info *info,
c2d79b48 1986 struct iwl_cmd *cmd,
b481de9c
ZY
1987 struct sk_buff *skb_frag,
1988 int last_frag)
1989{
e52119c5 1990 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
1c014420 1991 struct iwl3945_hw_key *keyinfo =
f2c7e521 1992 &priv->stations_39[info->control.hw_key->hw_key_idx].keyinfo;
b481de9c
ZY
1993
1994 switch (keyinfo->alg) {
1995 case ALG_CCMP:
e52119c5
WT
1996 tx->sec_ctl = TX_CMD_SEC_CCM;
1997 memcpy(tx->key, keyinfo->key, keyinfo->keylen);
a96a27f9 1998 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
b481de9c
ZY
1999 break;
2000
2001 case ALG_TKIP:
2002#if 0
e52119c5 2003 tx->sec_ctl = TX_CMD_SEC_TKIP;
b481de9c
ZY
2004
2005 if (last_frag)
e52119c5 2006 memcpy(tx->tkip_mic.byte, skb_frag->tail - 8,
b481de9c
ZY
2007 8);
2008 else
e52119c5 2009 memset(tx->tkip_mic.byte, 0, 8);
b481de9c
ZY
2010#endif
2011 break;
2012
2013 case ALG_WEP:
e52119c5 2014 tx->sec_ctl = TX_CMD_SEC_WEP |
e039fa4a 2015 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
b481de9c
ZY
2016
2017 if (keyinfo->keylen == 13)
e52119c5 2018 tx->sec_ctl |= TX_CMD_SEC_KEY128;
b481de9c 2019
e52119c5 2020 memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen);
b481de9c
ZY
2021
2022 IWL_DEBUG_TX("Configuring packet for WEP encryption "
e039fa4a 2023 "with key %d\n", info->control.hw_key->hw_key_idx);
b481de9c
ZY
2024 break;
2025
b481de9c 2026 default:
978785a3 2027 IWL_ERR(priv, "Unknown encode alg %d\n", keyinfo->alg);
b481de9c
ZY
2028 break;
2029 }
2030}
2031
2032/*
2033 * handle build REPLY_TX command notification.
2034 */
4a8a4322 2035static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
c2d79b48 2036 struct iwl_cmd *cmd,
e039fa4a 2037 struct ieee80211_tx_info *info,
e52119c5 2038 struct ieee80211_hdr *hdr, u8 std_id)
b481de9c 2039{
e52119c5
WT
2040 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
2041 __le32 tx_flags = tx->tx_flags;
fd7c8a40 2042 __le16 fc = hdr->frame_control;
e6a9854b 2043 u8 rc_flags = info->control.rates[0].flags;
b481de9c 2044
e52119c5 2045 tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
e039fa4a 2046 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
b481de9c 2047 tx_flags |= TX_CMD_FLG_ACK_MSK;
fd7c8a40 2048 if (ieee80211_is_mgmt(fc))
b481de9c 2049 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
fd7c8a40 2050 if (ieee80211_is_probe_resp(fc) &&
b481de9c
ZY
2051 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2052 tx_flags |= TX_CMD_FLG_TSF_MSK;
2053 } else {
2054 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2055 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2056 }
2057
e52119c5 2058 tx->sta_id = std_id;
8b7b1e05 2059 if (ieee80211_has_morefrags(fc))
b481de9c
ZY
2060 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2061
fd7c8a40
HH
2062 if (ieee80211_is_data_qos(fc)) {
2063 u8 *qc = ieee80211_get_qos_ctl(hdr);
e52119c5 2064 tx->tid_tspec = qc[0] & 0xf;
b481de9c 2065 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
54dbb525 2066 } else {
b481de9c 2067 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
54dbb525 2068 }
b481de9c 2069
e6a9854b 2070 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
b481de9c
ZY
2071 tx_flags |= TX_CMD_FLG_RTS_MSK;
2072 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
e6a9854b 2073 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
b481de9c
ZY
2074 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2075 tx_flags |= TX_CMD_FLG_CTS_MSK;
2076 }
2077
2078 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2079 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2080
2081 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
fd7c8a40
HH
2082 if (ieee80211_is_mgmt(fc)) {
2083 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
e52119c5 2084 tx->timeout.pm_frame_timeout = cpu_to_le16(3);
b481de9c 2085 else
e52119c5 2086 tx->timeout.pm_frame_timeout = cpu_to_le16(2);
ab53d8af 2087 } else {
e52119c5 2088 tx->timeout.pm_frame_timeout = 0;
ab53d8af
MA
2089#ifdef CONFIG_IWL3945_LEDS
2090 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
2091#endif
2092 }
b481de9c 2093
e52119c5
WT
2094 tx->driver_txop = 0;
2095 tx->tx_flags = tx_flags;
2096 tx->next_frame_len = 0;
b481de9c
ZY
2097}
2098
6440adb5
CB
2099/**
2100 * iwl3945_get_sta_id - Find station's index within station table
2101 */
4a8a4322 2102static int iwl3945_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
b481de9c
ZY
2103{
2104 int sta_id;
2105 u16 fc = le16_to_cpu(hdr->frame_control);
2106
6440adb5 2107 /* If this frame is broadcast or management, use broadcast station id */
b481de9c
ZY
2108 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2109 is_multicast_ether_addr(hdr->addr1))
3832ec9d 2110 return priv->hw_params.bcast_sta_id;
b481de9c
ZY
2111
2112 switch (priv->iw_mode) {
2113
6440adb5
CB
2114 /* If we are a client station in a BSS network, use the special
2115 * AP station entry (that's the only station we communicate with) */
05c914fe 2116 case NL80211_IFTYPE_STATION:
b481de9c
ZY
2117 return IWL_AP_ID;
2118
2119 /* If we are an AP, then find the station, or use BCAST */
05c914fe 2120 case NL80211_IFTYPE_AP:
bb8c093b 2121 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2122 if (sta_id != IWL_INVALID_STATION)
2123 return sta_id;
3832ec9d 2124 return priv->hw_params.bcast_sta_id;
b481de9c 2125
6440adb5
CB
2126 /* If this frame is going out to an IBSS network, find the station,
2127 * or create a new station table entry */
05c914fe 2128 case NL80211_IFTYPE_ADHOC: {
6440adb5 2129 /* Create new station table entry */
bb8c093b 2130 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
b481de9c
ZY
2131 if (sta_id != IWL_INVALID_STATION)
2132 return sta_id;
2133
bb8c093b 2134 sta_id = iwl3945_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
b481de9c
ZY
2135
2136 if (sta_id != IWL_INVALID_STATION)
2137 return sta_id;
2138
e174961c 2139 IWL_DEBUG_DROP("Station %pM not in station map. "
b481de9c 2140 "Defaulting to broadcast...\n",
e174961c 2141 hdr->addr1);
40b8ec0b 2142 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
3832ec9d 2143 return priv->hw_params.bcast_sta_id;
0795af57 2144 }
914233d6
SG
2145 /* If we are in monitor mode, use BCAST. This is required for
2146 * packet injection. */
05c914fe 2147 case NL80211_IFTYPE_MONITOR:
3832ec9d 2148 return priv->hw_params.bcast_sta_id;
914233d6 2149
b481de9c 2150 default:
39aadf8c
WT
2151 IWL_WARN(priv, "Unknown mode of operation: %d\n",
2152 priv->iw_mode);
3832ec9d 2153 return priv->hw_params.bcast_sta_id;
b481de9c
ZY
2154 }
2155}
2156
2157/*
2158 * start REPLY_TX command process
2159 */
4a8a4322 2160static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
b481de9c
ZY
2161{
2162 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
e039fa4a 2163 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
dbb6654c 2164 struct iwl3945_tfd *tfd;
e52119c5 2165 struct iwl3945_tx_cmd *tx;
188cf6c7 2166 struct iwl_tx_queue *txq = NULL;
d20b3c65 2167 struct iwl_queue *q = NULL;
e52119c5 2168 struct iwl_cmd *out_cmd = NULL;
b481de9c
ZY
2169 dma_addr_t phys_addr;
2170 dma_addr_t txcmd_phys;
e52119c5 2171 int txq_id = skb_get_queue_mapping(skb);
54dbb525
TW
2172 u16 len, idx, len_org, hdr_len;
2173 u8 id;
2174 u8 unicast;
b481de9c 2175 u8 sta_id;
54dbb525 2176 u8 tid = 0;
b481de9c 2177 u16 seq_number = 0;
fd7c8a40 2178 __le16 fc;
b481de9c 2179 u8 wait_write_ptr = 0;
54dbb525 2180 u8 *qc = NULL;
b481de9c
ZY
2181 unsigned long flags;
2182 int rc;
2183
2184 spin_lock_irqsave(&priv->lock, flags);
775a6e27 2185 if (iwl_is_rfkill(priv)) {
b481de9c
ZY
2186 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2187 goto drop_unlock;
2188 }
2189
e039fa4a 2190 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
15b1687c 2191 IWL_ERR(priv, "ERROR: No TX rate available.\n");
b481de9c
ZY
2192 goto drop_unlock;
2193 }
2194
2195 unicast = !is_multicast_ether_addr(hdr->addr1);
2196 id = 0;
2197
fd7c8a40 2198 fc = hdr->frame_control;
b481de9c 2199
c8b0e6e1 2200#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
2201 if (ieee80211_is_auth(fc))
2202 IWL_DEBUG_TX("Sending AUTH frame\n");
fd7c8a40 2203 else if (ieee80211_is_assoc_req(fc))
b481de9c 2204 IWL_DEBUG_TX("Sending ASSOC frame\n");
fd7c8a40 2205 else if (ieee80211_is_reassoc_req(fc))
b481de9c
ZY
2206 IWL_DEBUG_TX("Sending REASSOC frame\n");
2207#endif
2208
7878a5a4 2209 /* drop all data frame if we are not associated */
914233d6 2210 if (ieee80211_is_data(fc) &&
05c914fe 2211 (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */
914233d6 2212 (!iwl3945_is_associated(priv) ||
05c914fe 2213 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
bb8c093b 2214 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
b481de9c
ZY
2215 goto drop_unlock;
2216 }
2217
2218 spin_unlock_irqrestore(&priv->lock, flags);
2219
7294ec95 2220 hdr_len = ieee80211_hdrlen(fc);
6440adb5
CB
2221
2222 /* Find (or create) index into station table for destination station */
bb8c093b 2223 sta_id = iwl3945_get_sta_id(priv, hdr);
b481de9c 2224 if (sta_id == IWL_INVALID_STATION) {
e174961c
JB
2225 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
2226 hdr->addr1);
b481de9c
ZY
2227 goto drop;
2228 }
2229
2230 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2231
fd7c8a40
HH
2232 if (ieee80211_is_data_qos(fc)) {
2233 qc = ieee80211_get_qos_ctl(hdr);
7294ec95 2234 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
f2c7e521 2235 seq_number = priv->stations_39[sta_id].tid[tid].seq_number &
b481de9c
ZY
2236 IEEE80211_SCTL_SEQ;
2237 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2238 (hdr->seq_ctrl &
2239 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2240 seq_number += 0x10;
2241 }
6440adb5
CB
2242
2243 /* Descriptor for chosen Tx queue */
188cf6c7 2244 txq = &priv->txq[txq_id];
b481de9c
ZY
2245 q = &txq->q;
2246
2247 spin_lock_irqsave(&priv->lock, flags);
2248
6440adb5 2249 /* Set up first empty TFD within this queue's circular TFD buffer */
188cf6c7 2250 tfd = &txq->tfds39[q->write_ptr];
b481de9c 2251 memset(tfd, 0, sizeof(*tfd));
fc4b6853 2252 idx = get_cmd_index(q, q->write_ptr, 0);
b481de9c 2253
6440adb5 2254 /* Set up driver data for this TFD */
dbb6654c 2255 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
fc4b6853 2256 txq->txb[q->write_ptr].skb[0] = skb;
6440adb5
CB
2257
2258 /* Init first empty entry in queue's array of Tx/cmd buffers */
188cf6c7 2259 out_cmd = txq->cmd[idx];
e52119c5 2260 tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
b481de9c 2261 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
e52119c5 2262 memset(tx, 0, sizeof(*tx));
6440adb5
CB
2263
2264 /*
2265 * Set up the Tx-command (not MAC!) header.
2266 * Store the chosen Tx queue and TFD index within the sequence field;
2267 * after Tx, uCode's Tx response will return this value so driver can
2268 * locate the frame within the tx queue and do post-tx processing.
2269 */
b481de9c
ZY
2270 out_cmd->hdr.cmd = REPLY_TX;
2271 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
fc4b6853 2272 INDEX_TO_SEQ(q->write_ptr)));
6440adb5
CB
2273
2274 /* Copy MAC header from skb into command buffer */
e52119c5 2275 memcpy(tx->hdr, hdr, hdr_len);
b481de9c 2276
6440adb5
CB
2277 /*
2278 * Use the first empty entry in this queue's command buffer array
2279 * to contain the Tx command and MAC header concatenated together
2280 * (payload data will be in another buffer).
2281 * Size of this varies, due to varying MAC header length.
2282 * If end is not dword aligned, we'll have 2 extra bytes at the end
2283 * of the MAC header (device reads on dword boundaries).
2284 * We'll tell device about this padding later.
2285 */
3832ec9d 2286 len = sizeof(struct iwl3945_tx_cmd) +
4c897253 2287 sizeof(struct iwl_cmd_header) + hdr_len;
b481de9c
ZY
2288
2289 len_org = len;
2290 len = (len + 3) & ~3;
2291
2292 if (len_org != len)
2293 len_org = 1;
2294 else
2295 len_org = 0;
2296
6440adb5
CB
2297 /* Physical address of this Tx command's header (not MAC header!),
2298 * within command buffer array. */
188cf6c7
SO
2299 txcmd_phys = pci_map_single(priv->pci_dev,
2300 out_cmd, sizeof(struct iwl_cmd),
2301 PCI_DMA_TODEVICE);
2302 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
2303 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
2304 /* Add buffer containing Tx command and MAC(!) header to TFD's
2305 * first entry */
2306 txcmd_phys += offsetof(struct iwl_cmd, hdr);
b481de9c 2307
6440adb5
CB
2308 /* Add buffer containing Tx command and MAC(!) header to TFD's
2309 * first entry */
bb8c093b 2310 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
b481de9c 2311
d0f09804 2312 if (info->control.hw_key)
e039fa4a 2313 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
b481de9c 2314
6440adb5
CB
2315 /* Set up TFD's 2nd entry to point directly to remainder of skb,
2316 * if any (802.11 null frames have no payload). */
b481de9c
ZY
2317 len = skb->len - hdr_len;
2318 if (len) {
2319 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2320 len, PCI_DMA_TODEVICE);
bb8c093b 2321 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
b481de9c
ZY
2322 }
2323
b481de9c 2324 if (!len)
6440adb5 2325 /* If there is no payload, then we use only one Tx buffer */
dbb6654c 2326 tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(1));
b481de9c 2327 else
6440adb5
CB
2328 /* Else use 2 buffers.
2329 * Tell 3945 about any padding after MAC header */
dbb6654c
WT
2330 tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(2) |
2331 TFD_CTL_PAD_SET(U32_PAD(len)));
b481de9c 2332
6440adb5 2333 /* Total # bytes to be transmitted */
b481de9c 2334 len = (u16)skb->len;
e52119c5 2335 tx->len = cpu_to_le16(len);
b481de9c
ZY
2336
2337 /* TODO need this for burst mode later on */
e52119c5 2338 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
b481de9c
ZY
2339
2340 /* set is_hcca to 0; it probably will never be implemented */
e039fa4a 2341 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
b481de9c 2342
e52119c5
WT
2343 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
2344 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
b481de9c 2345
8b7b1e05 2346 if (!ieee80211_has_morefrags(hdr->frame_control)) {
b481de9c 2347 txq->need_update = 1;
3ac7f146 2348 if (qc)
f2c7e521 2349 priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
b481de9c
ZY
2350 } else {
2351 wait_write_ptr = 1;
2352 txq->need_update = 0;
2353 }
2354
e52119c5 2355 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
b481de9c 2356
e52119c5 2357 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
7294ec95 2358 ieee80211_hdrlen(fc));
b481de9c 2359
6440adb5 2360 /* Tell device the write index *just past* this latest filled TFD */
c54b679d 2361 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
bb8c093b 2362 rc = iwl3945_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
2363 spin_unlock_irqrestore(&priv->lock, flags);
2364
2365 if (rc)
2366 return rc;
2367
d20b3c65 2368 if ((iwl_queue_space(q) < q->high_mark)
b481de9c
ZY
2369 && priv->mac80211_registered) {
2370 if (wait_write_ptr) {
2371 spin_lock_irqsave(&priv->lock, flags);
2372 txq->need_update = 1;
bb8c093b 2373 iwl3945_tx_queue_update_write_ptr(priv, txq);
b481de9c
ZY
2374 spin_unlock_irqrestore(&priv->lock, flags);
2375 }
2376
e2530083 2377 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
b481de9c
ZY
2378 }
2379
2380 return 0;
2381
2382drop_unlock:
2383 spin_unlock_irqrestore(&priv->lock, flags);
2384drop:
2385 return -1;
2386}
2387
4a8a4322 2388static void iwl3945_set_rate(struct iwl_priv *priv)
b481de9c 2389{
8318d78a 2390 const struct ieee80211_supported_band *sband = NULL;
b481de9c
ZY
2391 struct ieee80211_rate *rate;
2392 int i;
2393
cbba18c6 2394 sband = iwl_get_hw_mode(priv, priv->band);
8318d78a 2395 if (!sband) {
15b1687c 2396 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
c4ba9621
SA
2397 return;
2398 }
b481de9c
ZY
2399
2400 priv->active_rate = 0;
2401 priv->active_rate_basic = 0;
2402
8318d78a
JB
2403 IWL_DEBUG_RATE("Setting rates for %s GHz\n",
2404 sband->band == IEEE80211_BAND_2GHZ ? "2.4" : "5");
2405
2406 for (i = 0; i < sband->n_bitrates; i++) {
2407 rate = &sband->bitrates[i];
2408 if ((rate->hw_value < IWL_RATE_COUNT) &&
2409 !(rate->flags & IEEE80211_CHAN_DISABLED)) {
2410 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)\n",
2411 rate->hw_value, iwl3945_rates[rate->hw_value].plcp);
2412 priv->active_rate |= (1 << rate->hw_value);
2413 }
b481de9c
ZY
2414 }
2415
2416 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
2417 priv->active_rate, priv->active_rate_basic);
2418
2419 /*
2420 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
2421 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
2422 * OFDM
2423 */
2424 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
f2c7e521 2425 priv->staging39_rxon.cck_basic_rates =
b481de9c
ZY
2426 ((priv->active_rate_basic &
2427 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
2428 else
f2c7e521 2429 priv->staging39_rxon.cck_basic_rates =
b481de9c
ZY
2430 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2431
2432 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
f2c7e521 2433 priv->staging39_rxon.ofdm_basic_rates =
b481de9c
ZY
2434 ((priv->active_rate_basic &
2435 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
2436 IWL_FIRST_OFDM_RATE) & 0xFF;
2437 else
f2c7e521 2438 priv->staging39_rxon.ofdm_basic_rates =
b481de9c
ZY
2439 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2440}
2441
4a8a4322 2442static void iwl3945_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
b481de9c
ZY
2443{
2444 unsigned long flags;
2445
2446 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2447 return;
2448
2449 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
2450 disable_radio ? "OFF" : "ON");
2451
2452 if (disable_radio) {
af0053d6 2453 iwl_scan_cancel(priv);
b481de9c 2454 /* FIXME: This is a workaround for AP */
05c914fe 2455 if (priv->iw_mode != NL80211_IFTYPE_AP) {
b481de9c 2456 spin_lock_irqsave(&priv->lock, flags);
5d49f498 2457 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
2458 CSR_UCODE_SW_BIT_RFKILL);
2459 spin_unlock_irqrestore(&priv->lock, flags);
c496294e 2460 iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
b481de9c
ZY
2461 set_bit(STATUS_RF_KILL_SW, &priv->status);
2462 }
2463 return;
2464 }
2465
2466 spin_lock_irqsave(&priv->lock, flags);
5d49f498 2467 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
2468
2469 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2470 spin_unlock_irqrestore(&priv->lock, flags);
2471
2472 /* wake up ucode */
2473 msleep(10);
2474
2475 spin_lock_irqsave(&priv->lock, flags);
5d49f498
AK
2476 iwl_read32(priv, CSR_UCODE_DRV_GP1);
2477 if (!iwl_grab_nic_access(priv))
2478 iwl_release_nic_access(priv);
b481de9c
ZY
2479 spin_unlock_irqrestore(&priv->lock, flags);
2480
2481 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
2482 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
2483 "disabled by HW switch\n");
2484 return;
2485 }
2486
808e72a0
ZY
2487 if (priv->is_open)
2488 queue_work(priv->workqueue, &priv->restart);
b481de9c
ZY
2489 return;
2490}
2491
4a8a4322 2492void iwl3945_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
b481de9c
ZY
2493 u32 decrypt_res, struct ieee80211_rx_status *stats)
2494{
2495 u16 fc =
2496 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
2497
f2c7e521 2498 if (priv->active39_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
b481de9c
ZY
2499 return;
2500
2501 if (!(fc & IEEE80211_FCTL_PROTECTED))
2502 return;
2503
2504 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2505 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2506 case RX_RES_STATUS_SEC_TYPE_TKIP:
2507 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2508 RX_RES_STATUS_BAD_ICV_MIC)
2509 stats->flag |= RX_FLAG_MMIC_ERROR;
2510 case RX_RES_STATUS_SEC_TYPE_WEP:
2511 case RX_RES_STATUS_SEC_TYPE_CCMP:
2512 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2513 RX_RES_STATUS_DECRYPT_OK) {
2514 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2515 stats->flag |= RX_FLAG_DECRYPTED;
2516 }
2517 break;
2518
2519 default:
2520 break;
2521 }
2522}
2523
c8b0e6e1 2524#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
2525
2526#include "iwl-spectrum.h"
2527
2528#define BEACON_TIME_MASK_LOW 0x00FFFFFF
2529#define BEACON_TIME_MASK_HIGH 0xFF000000
2530#define TIME_UNIT 1024
2531
2532/*
2533 * extended beacon time format
2534 * time in usec will be changed into a 32-bit value in 8:24 format
2535 * the high 1 byte is the beacon counts
2536 * the lower 3 bytes is the time in usec within one beacon interval
2537 */
2538
bb8c093b 2539static u32 iwl3945_usecs_to_beacons(u32 usec, u32 beacon_interval)
b481de9c
ZY
2540{
2541 u32 quot;
2542 u32 rem;
2543 u32 interval = beacon_interval * 1024;
2544
2545 if (!interval || !usec)
2546 return 0;
2547
2548 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
2549 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
2550
2551 return (quot << 24) + rem;
2552}
2553
2554/* base is usually what we get from ucode with each received frame,
2555 * the same as HW timer counter counting down
2556 */
2557
bb8c093b 2558static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
b481de9c
ZY
2559{
2560 u32 base_low = base & BEACON_TIME_MASK_LOW;
2561 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
2562 u32 interval = beacon_interval * TIME_UNIT;
2563 u32 res = (base & BEACON_TIME_MASK_HIGH) +
2564 (addon & BEACON_TIME_MASK_HIGH);
2565
2566 if (base_low > addon_low)
2567 res += base_low - addon_low;
2568 else if (base_low < addon_low) {
2569 res += interval + base_low - addon_low;
2570 res += (1 << 24);
2571 } else
2572 res += (1 << 24);
2573
2574 return cpu_to_le32(res);
2575}
2576
4a8a4322 2577static int iwl3945_get_measurement(struct iwl_priv *priv,
b481de9c
ZY
2578 struct ieee80211_measurement_params *params,
2579 u8 type)
2580{
600c0e11 2581 struct iwl_spectrum_cmd spectrum;
3d24a9f7 2582 struct iwl_rx_packet *res;
c2d79b48 2583 struct iwl_host_cmd cmd = {
b481de9c
ZY
2584 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
2585 .data = (void *)&spectrum,
2586 .meta.flags = CMD_WANT_SKB,
2587 };
2588 u32 add_time = le64_to_cpu(params->start_time);
2589 int rc;
2590 int spectrum_resp_status;
2591 int duration = le16_to_cpu(params->duration);
2592
bb8c093b 2593 if (iwl3945_is_associated(priv))
b481de9c 2594 add_time =
bb8c093b 2595 iwl3945_usecs_to_beacons(
b481de9c
ZY
2596 le64_to_cpu(params->start_time) - priv->last_tsf,
2597 le16_to_cpu(priv->rxon_timing.beacon_interval));
2598
2599 memset(&spectrum, 0, sizeof(spectrum));
2600
2601 spectrum.channel_count = cpu_to_le16(1);
2602 spectrum.flags =
2603 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
2604 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
2605 cmd.len = sizeof(spectrum);
2606 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
2607
bb8c093b 2608 if (iwl3945_is_associated(priv))
b481de9c 2609 spectrum.start_time =
bb8c093b 2610 iwl3945_add_beacon_time(priv->last_beacon_time,
b481de9c
ZY
2611 add_time,
2612 le16_to_cpu(priv->rxon_timing.beacon_interval));
2613 else
2614 spectrum.start_time = 0;
2615
2616 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
2617 spectrum.channels[0].channel = params->channel;
2618 spectrum.channels[0].type = type;
f2c7e521 2619 if (priv->active39_rxon.flags & RXON_FLG_BAND_24G_MSK)
b481de9c
ZY
2620 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
2621 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
2622
bb8c093b 2623 rc = iwl3945_send_cmd_sync(priv, &cmd);
b481de9c
ZY
2624 if (rc)
2625 return rc;
2626
3d24a9f7 2627 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
b481de9c 2628 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
15b1687c 2629 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
b481de9c
ZY
2630 rc = -EIO;
2631 }
2632
2633 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
2634 switch (spectrum_resp_status) {
2635 case 0: /* Command will be handled */
2636 if (res->u.spectrum.id != 0xff) {
bc434dd2
IS
2637 IWL_DEBUG_INFO("Replaced existing measurement: %d\n",
2638 res->u.spectrum.id);
b481de9c
ZY
2639 priv->measurement_status &= ~MEASUREMENT_READY;
2640 }
2641 priv->measurement_status |= MEASUREMENT_ACTIVE;
2642 rc = 0;
2643 break;
2644
2645 case 1: /* Command will not be handled */
2646 rc = -EAGAIN;
2647 break;
2648 }
2649
2650 dev_kfree_skb_any(cmd.meta.u.skb);
2651
2652 return rc;
2653}
2654#endif
2655
4a8a4322 2656static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
6100b588 2657 struct iwl_rx_mem_buffer *rxb)
b481de9c 2658{
3d24a9f7
TW
2659 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2660 struct iwl_alive_resp *palive;
b481de9c
ZY
2661 struct delayed_work *pwork;
2662
2663 palive = &pkt->u.alive_frame;
2664
2665 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
2666 "0x%01X 0x%01X\n",
2667 palive->is_valid, palive->ver_type,
2668 palive->ver_subtype);
2669
2670 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
2671 IWL_DEBUG_INFO("Initialization Alive received.\n");
3d24a9f7
TW
2672 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
2673 sizeof(struct iwl_alive_resp));
b481de9c
ZY
2674 pwork = &priv->init_alive_start;
2675 } else {
2676 IWL_DEBUG_INFO("Runtime Alive received.\n");
2677 memcpy(&priv->card_alive, &pkt->u.alive_frame,
3d24a9f7 2678 sizeof(struct iwl_alive_resp));
b481de9c 2679 pwork = &priv->alive_start;
bb8c093b 2680 iwl3945_disable_events(priv);
b481de9c
ZY
2681 }
2682
2683 /* We delay the ALIVE response by 5ms to
2684 * give the HW RF Kill time to activate... */
2685 if (palive->is_valid == UCODE_VALID_OK)
2686 queue_delayed_work(priv->workqueue, pwork,
2687 msecs_to_jiffies(5));
2688 else
39aadf8c 2689 IWL_WARN(priv, "uCode did not respond OK.\n");
b481de9c
ZY
2690}
2691
4a8a4322 2692static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
6100b588 2693 struct iwl_rx_mem_buffer *rxb)
b481de9c 2694{
c7e035a9 2695#ifdef CONFIG_IWLWIFI_DEBUG
3d24a9f7 2696 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
c7e035a9 2697#endif
b481de9c
ZY
2698
2699 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
2700 return;
2701}
2702
4a8a4322 2703static void iwl3945_rx_reply_error(struct iwl_priv *priv,
6100b588 2704 struct iwl_rx_mem_buffer *rxb)
b481de9c 2705{
3d24a9f7 2706 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c 2707
15b1687c 2708 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
b481de9c
ZY
2709 "seq 0x%04X ser 0x%08X\n",
2710 le32_to_cpu(pkt->u.err_resp.error_type),
2711 get_cmd_string(pkt->u.err_resp.cmd_id),
2712 pkt->u.err_resp.cmd_id,
2713 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
2714 le32_to_cpu(pkt->u.err_resp.error_info));
2715}
2716
2717#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2718
4a8a4322 2719static void iwl3945_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
b481de9c 2720{
3d24a9f7 2721 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
f2c7e521 2722 struct iwl3945_rxon_cmd *rxon = (void *)&priv->active39_rxon;
600c0e11 2723 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
b481de9c
ZY
2724 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
2725 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
2726 rxon->channel = csa->channel;
f2c7e521 2727 priv->staging39_rxon.channel = csa->channel;
b481de9c
ZY
2728}
2729
4a8a4322 2730static void iwl3945_rx_spectrum_measure_notif(struct iwl_priv *priv,
6100b588 2731 struct iwl_rx_mem_buffer *rxb)
b481de9c 2732{
c8b0e6e1 2733#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
3d24a9f7 2734 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
600c0e11 2735 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
b481de9c
ZY
2736
2737 if (!report->state) {
2738 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
2739 "Spectrum Measure Notification: Start\n");
2740 return;
2741 }
2742
2743 memcpy(&priv->measure_report, report, sizeof(*report));
2744 priv->measurement_status |= MEASUREMENT_READY;
2745#endif
2746}
2747
4a8a4322 2748static void iwl3945_rx_pm_sleep_notif(struct iwl_priv *priv,
6100b588 2749 struct iwl_rx_mem_buffer *rxb)
b481de9c 2750{
c8b0e6e1 2751#ifdef CONFIG_IWL3945_DEBUG
3d24a9f7 2752 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
600c0e11 2753 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
b481de9c
ZY
2754 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
2755 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
2756#endif
2757}
2758
4a8a4322 2759static void iwl3945_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
6100b588 2760 struct iwl_rx_mem_buffer *rxb)
b481de9c 2761{
3d24a9f7 2762 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
2763 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
2764 "notification for %s:\n",
2765 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
40b8ec0b
SO
2766 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw,
2767 le32_to_cpu(pkt->len));
b481de9c
ZY
2768}
2769
bb8c093b 2770static void iwl3945_bg_beacon_update(struct work_struct *work)
b481de9c 2771{
4a8a4322
AK
2772 struct iwl_priv *priv =
2773 container_of(work, struct iwl_priv, beacon_update);
b481de9c
ZY
2774 struct sk_buff *beacon;
2775
2776 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
e039fa4a 2777 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
b481de9c
ZY
2778
2779 if (!beacon) {
15b1687c 2780 IWL_ERR(priv, "update beacon failed\n");
b481de9c
ZY
2781 return;
2782 }
2783
2784 mutex_lock(&priv->mutex);
2785 /* new beacon skb is allocated every time; dispose previous.*/
2786 if (priv->ibss_beacon)
2787 dev_kfree_skb(priv->ibss_beacon);
2788
2789 priv->ibss_beacon = beacon;
2790 mutex_unlock(&priv->mutex);
2791
bb8c093b 2792 iwl3945_send_beacon_cmd(priv);
b481de9c
ZY
2793}
2794
4a8a4322 2795static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
6100b588 2796 struct iwl_rx_mem_buffer *rxb)
b481de9c 2797{
c8b0e6e1 2798#ifdef CONFIG_IWL3945_DEBUG
3d24a9f7 2799 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
bb8c093b 2800 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
b481de9c
ZY
2801 u8 rate = beacon->beacon_notify_hdr.rate;
2802
2803 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
2804 "tsf %d %d rate %d\n",
2805 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
2806 beacon->beacon_notify_hdr.failure_frame,
2807 le32_to_cpu(beacon->ibss_mgr_status),
2808 le32_to_cpu(beacon->high_tsf),
2809 le32_to_cpu(beacon->low_tsf), rate);
2810#endif
2811
05c914fe 2812 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
b481de9c
ZY
2813 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
2814 queue_work(priv->workqueue, &priv->beacon_update);
2815}
2816
2817/* Service response to REPLY_SCAN_CMD (0x80) */
4a8a4322 2818static void iwl3945_rx_reply_scan(struct iwl_priv *priv,
6100b588 2819 struct iwl_rx_mem_buffer *rxb)
b481de9c 2820{
c8b0e6e1 2821#ifdef CONFIG_IWL3945_DEBUG
3d24a9f7 2822 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253
TW
2823 struct iwl_scanreq_notification *notif =
2824 (struct iwl_scanreq_notification *)pkt->u.raw;
b481de9c
ZY
2825
2826 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
2827#endif
2828}
2829
2830/* Service SCAN_START_NOTIFICATION (0x82) */
4a8a4322 2831static void iwl3945_rx_scan_start_notif(struct iwl_priv *priv,
6100b588 2832 struct iwl_rx_mem_buffer *rxb)
b481de9c 2833{
3d24a9f7 2834 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253
TW
2835 struct iwl_scanstart_notification *notif =
2836 (struct iwl_scanstart_notification *)pkt->u.raw;
b481de9c
ZY
2837 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
2838 IWL_DEBUG_SCAN("Scan start: "
2839 "%d [802.11%s] "
2840 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
2841 notif->channel,
2842 notif->band ? "bg" : "a",
2843 notif->tsf_high,
2844 notif->tsf_low, notif->status, notif->beacon_timer);
2845}
2846
2847/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
4a8a4322 2848static void iwl3945_rx_scan_results_notif(struct iwl_priv *priv,
6100b588 2849 struct iwl_rx_mem_buffer *rxb)
b481de9c 2850{
c7e035a9 2851#ifdef CONFIG_IWLWIFI_DEBUG
3d24a9f7 2852 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253
TW
2853 struct iwl_scanresults_notification *notif =
2854 (struct iwl_scanresults_notification *)pkt->u.raw;
c7e035a9 2855#endif
b481de9c
ZY
2856
2857 IWL_DEBUG_SCAN("Scan ch.res: "
2858 "%d [802.11%s] "
2859 "(TSF: 0x%08X:%08X) - %d "
2860 "elapsed=%lu usec (%dms since last)\n",
2861 notif->channel,
2862 notif->band ? "bg" : "a",
2863 le32_to_cpu(notif->tsf_high),
2864 le32_to_cpu(notif->tsf_low),
2865 le32_to_cpu(notif->statistics[0]),
2866 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
2867 jiffies_to_msecs(elapsed_jiffies
2868 (priv->last_scan_jiffies, jiffies)));
2869
2870 priv->last_scan_jiffies = jiffies;
7878a5a4 2871 priv->next_scan_jiffies = 0;
b481de9c
ZY
2872}
2873
2874/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
4a8a4322 2875static void iwl3945_rx_scan_complete_notif(struct iwl_priv *priv,
6100b588 2876 struct iwl_rx_mem_buffer *rxb)
b481de9c 2877{
c7e035a9 2878#ifdef CONFIG_IWLWIFI_DEBUG
3d24a9f7 2879 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4c897253 2880 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
c7e035a9 2881#endif
b481de9c
ZY
2882
2883 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
2884 scan_notif->scanned_channels,
2885 scan_notif->tsf_low,
2886 scan_notif->tsf_high, scan_notif->status);
2887
2888 /* The HW is no longer scanning */
2889 clear_bit(STATUS_SCAN_HW, &priv->status);
2890
2891 /* The scan completion notification came in, so kill that timer... */
2892 cancel_delayed_work(&priv->scan_check);
2893
2894 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
66b5004d
RR
2895 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
2896 "2.4" : "5.2",
b481de9c
ZY
2897 jiffies_to_msecs(elapsed_jiffies
2898 (priv->scan_pass_start, jiffies)));
2899
66b5004d
RR
2900 /* Remove this scanned band from the list of pending
2901 * bands to scan, band G precedes A in order of scanning
2902 * as seen in iwl3945_bg_request_scan */
2903 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
2904 priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
2905 else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
2906 priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
b481de9c
ZY
2907
2908 /* If a request to abort was given, or the scan did not succeed
2909 * then we reset the scan state machine and terminate,
2910 * re-queuing another scan if one has been requested */
2911 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2912 IWL_DEBUG_INFO("Aborted scan completed.\n");
2913 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
2914 } else {
2915 /* If there are more bands on this scan pass reschedule */
2916 if (priv->scan_bands > 0)
2917 goto reschedule;
2918 }
2919
2920 priv->last_scan_jiffies = jiffies;
7878a5a4 2921 priv->next_scan_jiffies = 0;
b481de9c
ZY
2922 IWL_DEBUG_INFO("Setting scan to off\n");
2923
2924 clear_bit(STATUS_SCANNING, &priv->status);
2925
2926 IWL_DEBUG_INFO("Scan took %dms\n",
2927 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
2928
2929 queue_work(priv->workqueue, &priv->scan_completed);
2930
2931 return;
2932
2933reschedule:
2934 priv->scan_pass_start = jiffies;
2935 queue_work(priv->workqueue, &priv->request_scan);
2936}
2937
2938/* Handle notification from uCode that card's power state is changing
2939 * due to software, hardware, or critical temperature RFKILL */
4a8a4322 2940static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
6100b588 2941 struct iwl_rx_mem_buffer *rxb)
b481de9c 2942{
3d24a9f7 2943 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
b481de9c
ZY
2944 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
2945 unsigned long status = priv->status;
2946
2947 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
2948 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
2949 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
2950
5d49f498 2951 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
b481de9c
ZY
2952 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2953
2954 if (flags & HW_CARD_DISABLED)
2955 set_bit(STATUS_RF_KILL_HW, &priv->status);
2956 else
2957 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2958
2959
2960 if (flags & SW_CARD_DISABLED)
2961 set_bit(STATUS_RF_KILL_SW, &priv->status);
2962 else
2963 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2964
af0053d6 2965 iwl_scan_cancel(priv);
b481de9c
ZY
2966
2967 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
2968 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
2969 (test_bit(STATUS_RF_KILL_SW, &status) !=
2970 test_bit(STATUS_RF_KILL_SW, &priv->status)))
2971 queue_work(priv->workqueue, &priv->rf_kill);
2972 else
2973 wake_up_interruptible(&priv->wait_command_queue);
2974}
2975
2976/**
bb8c093b 2977 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
b481de9c
ZY
2978 *
2979 * Setup the RX handlers for each of the reply types sent from the uCode
2980 * to the host.
2981 *
2982 * This function chains into the hardware specific files for them to setup
2983 * any hardware specific handlers as well.
2984 */
4a8a4322 2985static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
b481de9c 2986{
bb8c093b
CH
2987 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
2988 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
2989 priv->rx_handlers[REPLY_ERROR] = iwl3945_rx_reply_error;
2990 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl3945_rx_csa;
b481de9c 2991 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
bb8c093b
CH
2992 iwl3945_rx_spectrum_measure_notif;
2993 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl3945_rx_pm_sleep_notif;
b481de9c 2994 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
bb8c093b
CH
2995 iwl3945_rx_pm_debug_statistics_notif;
2996 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
b481de9c 2997
9fbab516
BC
2998 /*
2999 * The same handler is used for both the REPLY to a discrete
3000 * statistics request from the host as well as for the periodic
3001 * statistics notifications (after received beacons) from the uCode.
b481de9c 3002 */
bb8c093b
CH
3003 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
3004 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
b481de9c 3005
bb8c093b
CH
3006 priv->rx_handlers[REPLY_SCAN_CMD] = iwl3945_rx_reply_scan;
3007 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl3945_rx_scan_start_notif;
b481de9c 3008 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
bb8c093b 3009 iwl3945_rx_scan_results_notif;
b481de9c 3010 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
bb8c093b
CH
3011 iwl3945_rx_scan_complete_notif;
3012 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
b481de9c 3013
9fbab516 3014 /* Set up hardware specific Rx handlers */
bb8c093b 3015 iwl3945_hw_rx_handler_setup(priv);
b481de9c
ZY
3016}
3017
91c066f2
TW
3018/**
3019 * iwl3945_cmd_queue_reclaim - Reclaim CMD queue entries
3020 * When FW advances 'R' index, all entries between old and new 'R' index
3021 * need to be reclaimed.
3022 */
4a8a4322 3023static void iwl3945_cmd_queue_reclaim(struct iwl_priv *priv,
91c066f2
TW
3024 int txq_id, int index)
3025{
188cf6c7 3026 struct iwl_tx_queue *txq = &priv->txq[txq_id];
d20b3c65 3027 struct iwl_queue *q = &txq->q;
91c066f2
TW
3028 int nfreed = 0;
3029
625a381a 3030 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
15b1687c 3031 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
91c066f2
TW
3032 "is out of range [0-%d] %d %d.\n", txq_id,
3033 index, q->n_bd, q->write_ptr, q->read_ptr);
3034 return;
3035 }
3036
3037 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
3038 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3039 if (nfreed > 1) {
15b1687c 3040 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", index,
91c066f2
TW
3041 q->write_ptr, q->read_ptr);
3042 queue_work(priv->workqueue, &priv->restart);
3043 break;
3044 }
3045 nfreed++;
3046 }
3047}
3048
3049
b481de9c 3050/**
bb8c093b 3051 * iwl3945_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
b481de9c
ZY
3052 * @rxb: Rx buffer to reclaim
3053 *
3054 * If an Rx buffer has an async callback associated with it the callback
3055 * will be executed. The attached skb (if present) will only be freed
3056 * if the callback returns 1
3057 */
4a8a4322 3058static void iwl3945_tx_cmd_complete(struct iwl_priv *priv,
6100b588 3059 struct iwl_rx_mem_buffer *rxb)
b481de9c 3060{
3d24a9f7 3061 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
b481de9c
ZY
3062 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3063 int txq_id = SEQ_TO_QUEUE(sequence);
3064 int index = SEQ_TO_INDEX(sequence);
600c0e11 3065 int huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
b481de9c 3066 int cmd_index;
c2d79b48 3067 struct iwl_cmd *cmd;
b481de9c 3068
638d0eb9
CR
3069 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
3070 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
3071 txq_id, sequence,
3072 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
3073 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
3074 iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32);
3075 return;
3076 }
b481de9c 3077
188cf6c7
SO
3078 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
3079 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
b481de9c
ZY
3080
3081 /* Input error checking is done when commands are added to queue. */
3082 if (cmd->meta.flags & CMD_WANT_SKB) {
3083 cmd->meta.source->u.skb = rxb->skb;
3084 rxb->skb = NULL;
3085 } else if (cmd->meta.u.callback &&
3086 !cmd->meta.u.callback(priv, cmd, rxb->skb))
3087 rxb->skb = NULL;
3088
91c066f2 3089 iwl3945_cmd_queue_reclaim(priv, txq_id, index);
b481de9c
ZY
3090
3091 if (!(cmd->meta.flags & CMD_ASYNC)) {
3092 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3093 wake_up_interruptible(&priv->wait_command_queue);
3094 }
3095}
3096
3097/************************** RX-FUNCTIONS ****************************/
3098/*
3099 * Rx theory of operation
3100 *
3101 * The host allocates 32 DMA target addresses and passes the host address
3102 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3103 * 0 to 31
3104 *
3105 * Rx Queue Indexes
3106 * The host/firmware share two index registers for managing the Rx buffers.
3107 *
3108 * The READ index maps to the first position that the firmware may be writing
3109 * to -- the driver can read up to (but not including) this position and get
3110 * good data.
3111 * The READ index is managed by the firmware once the card is enabled.
3112 *
3113 * The WRITE index maps to the last position the driver has read from -- the
3114 * position preceding WRITE is the last slot the firmware can place a packet.
3115 *
3116 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3117 * WRITE = READ.
3118 *
9fbab516 3119 * During initialization, the host sets up the READ queue position to the first
b481de9c
ZY
3120 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3121 *
9fbab516 3122 * When the firmware places a packet in a buffer, it will advance the READ index
b481de9c
ZY
3123 * and fire the RX interrupt. The driver can then query the READ index and
3124 * process as many packets as possible, moving the WRITE index forward as it
3125 * resets the Rx queue buffers with new memory.
3126 *
3127 * The management in the driver is as follows:
3128 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
3129 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
01ebd063 3130 * to replenish the iwl->rxq->rx_free.
bb8c093b 3131 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
b481de9c
ZY
3132 * iwl->rxq is replenished and the READ INDEX is updated (updating the
3133 * 'processed' and 'read' driver indexes as well)
3134 * + A received packet is processed and handed to the kernel network stack,
3135 * detached from the iwl->rxq. The driver 'processed' index is updated.
3136 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
3137 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
3138 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
3139 * were enough free buffers and RX_STALLED is set it is cleared.
3140 *
3141 *
3142 * Driver sequence:
3143 *
9fbab516 3144 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
bb8c093b 3145 * iwl3945_rx_queue_restock
9fbab516 3146 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
b481de9c
ZY
3147 * queue, updates firmware pointers, and updates
3148 * the WRITE index. If insufficient rx_free buffers
bb8c093b 3149 * are available, schedules iwl3945_rx_replenish
b481de9c
ZY
3150 *
3151 * -- enable interrupts --
6100b588 3152 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
b481de9c
ZY
3153 * READ INDEX, detaching the SKB from the pool.
3154 * Moves the packet buffer from queue to rx_used.
bb8c093b 3155 * Calls iwl3945_rx_queue_restock to refill any empty
b481de9c
ZY
3156 * slots.
3157 * ...
3158 *
3159 */
3160
b481de9c 3161/**
9fbab516 3162 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
b481de9c 3163 */
4a8a4322 3164static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
b481de9c
ZY
3165 dma_addr_t dma_addr)
3166{
3167 return cpu_to_le32((u32)dma_addr);
3168}
3169
3170/**
bb8c093b 3171 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
b481de9c 3172 *
9fbab516 3173 * If there are slots in the RX queue that need to be restocked,
b481de9c 3174 * and we have free pre-allocated buffers, fill the ranks as much
9fbab516 3175 * as we can, pulling from rx_free.
b481de9c
ZY
3176 *
3177 * This moves the 'write' index forward to catch up with 'processed', and
3178 * also updates the memory address in the firmware to reference the new
3179 * target buffer.
3180 */
4a8a4322 3181static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
b481de9c 3182{
cc2f362c 3183 struct iwl_rx_queue *rxq = &priv->rxq;
b481de9c 3184 struct list_head *element;
6100b588 3185 struct iwl_rx_mem_buffer *rxb;
b481de9c
ZY
3186 unsigned long flags;
3187 int write, rc;
3188
3189 spin_lock_irqsave(&rxq->lock, flags);
3190 write = rxq->write & ~0x7;
37d68317 3191 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
6440adb5 3192 /* Get next free Rx buffer, remove from free list */
b481de9c 3193 element = rxq->rx_free.next;
6100b588 3194 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
b481de9c 3195 list_del(element);
6440adb5
CB
3196
3197 /* Point to Rx buffer via next RBD in circular buffer */
6100b588 3198 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr);
b481de9c
ZY
3199 rxq->queue[rxq->write] = rxb;
3200 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
3201 rxq->free_count--;
3202 }
3203 spin_unlock_irqrestore(&rxq->lock, flags);
3204 /* If the pre-allocated buffer pool is dropping low, schedule to
3205 * refill it */
3206 if (rxq->free_count <= RX_LOW_WATERMARK)
3207 queue_work(priv->workqueue, &priv->rx_replenish);
3208
3209
6440adb5
CB
3210 /* If we've added more space for the firmware to place data, tell it.
3211 * Increment device's write pointer in multiples of 8. */
b481de9c
ZY
3212 if ((write != (rxq->write & ~0x7))
3213 || (abs(rxq->write - rxq->read) > 7)) {
3214 spin_lock_irqsave(&rxq->lock, flags);
3215 rxq->need_update = 1;
3216 spin_unlock_irqrestore(&rxq->lock, flags);
141c43a3 3217 rc = iwl_rx_queue_update_write_ptr(priv, rxq);
b481de9c
ZY
3218 if (rc)
3219 return rc;
3220 }
3221
3222 return 0;
3223}
3224
3225/**
bb8c093b 3226 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
b481de9c
ZY
3227 *
3228 * When moving to rx_free an SKB is allocated for the slot.
3229 *
bb8c093b 3230 * Also restock the Rx queue via iwl3945_rx_queue_restock.
01ebd063 3231 * This is called as a scheduled work item (except for during initialization)
b481de9c 3232 */
4a8a4322 3233static void iwl3945_rx_allocate(struct iwl_priv *priv)
b481de9c 3234{
cc2f362c 3235 struct iwl_rx_queue *rxq = &priv->rxq;
b481de9c 3236 struct list_head *element;
6100b588 3237 struct iwl_rx_mem_buffer *rxb;
b481de9c
ZY
3238 unsigned long flags;
3239 spin_lock_irqsave(&rxq->lock, flags);
3240 while (!list_empty(&rxq->rx_used)) {
3241 element = rxq->rx_used.next;
6100b588 3242 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
6440adb5
CB
3243
3244 /* Alloc a new receive buffer */
b481de9c 3245 rxb->skb =
1e33dc64
WT
3246 alloc_skb(priv->hw_params.rx_buf_size,
3247 __GFP_NOWARN | GFP_ATOMIC);
b481de9c
ZY
3248 if (!rxb->skb) {
3249 if (net_ratelimit())
978785a3 3250 IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
b481de9c
ZY
3251 /* We don't reschedule replenish work here -- we will
3252 * call the restock method and if it still needs
3253 * more buffers it will schedule replenish */
3254 break;
3255 }
12342c47
ZY
3256
3257 /* If radiotap head is required, reserve some headroom here.
3258 * The physical head count is a variable rx_stats->phy_count.
3259 * We reserve 4 bytes here. Plus these extra bytes, the
3260 * headroom of the physical head should be enough for the
3261 * radiotap head that iwl3945 supported. See iwl3945_rt.
3262 */
3263 skb_reserve(rxb->skb, 4);
3264
b481de9c
ZY
3265 priv->alloc_rxb_skb++;
3266 list_del(element);
6440adb5
CB
3267
3268 /* Get physical address of RB/SKB */
1e33dc64
WT
3269 rxb->real_dma_addr = pci_map_single(priv->pci_dev,
3270 rxb->skb->data,
3271 priv->hw_params.rx_buf_size,
3272 PCI_DMA_FROMDEVICE);
b481de9c
ZY
3273 list_add_tail(&rxb->list, &rxq->rx_free);
3274 rxq->free_count++;
3275 }
3276 spin_unlock_irqrestore(&rxq->lock, flags);
5c0eef96
MA
3277}
3278
3279/*
3280 * this should be called while priv->lock is locked
3281 */
4fd1f841 3282static void __iwl3945_rx_replenish(void *data)
5c0eef96 3283{
4a8a4322 3284 struct iwl_priv *priv = data;
5c0eef96
MA
3285
3286 iwl3945_rx_allocate(priv);
3287 iwl3945_rx_queue_restock(priv);
3288}
3289
3290
3291void iwl3945_rx_replenish(void *data)
3292{
4a8a4322 3293 struct iwl_priv *priv = data;
5c0eef96
MA
3294 unsigned long flags;
3295
3296 iwl3945_rx_allocate(priv);
b481de9c
ZY
3297
3298 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 3299 iwl3945_rx_queue_restock(priv);
b481de9c
ZY
3300 spin_unlock_irqrestore(&priv->lock, flags);
3301}
3302
b481de9c
ZY
3303/* Convert linear signal-to-noise ratio into dB */
3304static u8 ratio2dB[100] = {
3305/* 0 1 2 3 4 5 6 7 8 9 */
3306 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
3307 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
3308 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
3309 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
3310 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
3311 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
3312 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
3313 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
3314 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
3315 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
3316};
3317
3318/* Calculates a relative dB value from a ratio of linear
3319 * (i.e. not dB) signal levels.
3320 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
bb8c093b 3321int iwl3945_calc_db_from_ratio(int sig_ratio)
b481de9c 3322{
221c80cf
AB
3323 /* 1000:1 or higher just report as 60 dB */
3324 if (sig_ratio >= 1000)
b481de9c
ZY
3325 return 60;
3326
221c80cf 3327 /* 100:1 or higher, divide by 10 and use table,
b481de9c 3328 * add 20 dB to make up for divide by 10 */
221c80cf 3329 if (sig_ratio >= 100)
3ac7f146 3330 return 20 + (int)ratio2dB[sig_ratio/10];
b481de9c
ZY
3331
3332 /* We shouldn't see this */
3333 if (sig_ratio < 1)
3334 return 0;
3335
3336 /* Use table for ratios 1:1 - 99:1 */
3337 return (int)ratio2dB[sig_ratio];
3338}
3339
3340#define PERFECT_RSSI (-20) /* dBm */
3341#define WORST_RSSI (-95) /* dBm */
3342#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
3343
3344/* Calculate an indication of rx signal quality (a percentage, not dBm!).
3345 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
3346 * about formulas used below. */
bb8c093b 3347int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
b481de9c
ZY
3348{
3349 int sig_qual;
3350 int degradation = PERFECT_RSSI - rssi_dbm;
3351
3352 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
3353 * as indicator; formula is (signal dbm - noise dbm).
3354 * SNR at or above 40 is a great signal (100%).
3355 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
3356 * Weakest usable signal is usually 10 - 15 dB SNR. */
3357 if (noise_dbm) {
3358 if (rssi_dbm - noise_dbm >= 40)
3359 return 100;
3360 else if (rssi_dbm < noise_dbm)
3361 return 0;
3362 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
3363
3364 /* Else use just the signal level.
3365 * This formula is a least squares fit of data points collected and
3366 * compared with a reference system that had a percentage (%) display
3367 * for signal quality. */
3368 } else
3369 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
3370 (15 * RSSI_RANGE + 62 * degradation)) /
3371 (RSSI_RANGE * RSSI_RANGE);
3372
3373 if (sig_qual > 100)
3374 sig_qual = 100;
3375 else if (sig_qual < 1)
3376 sig_qual = 0;
3377
3378 return sig_qual;
3379}
3380
3381/**
9fbab516 3382 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
b481de9c
ZY
3383 *
3384 * Uses the priv->rx_handlers callback function array to invoke
3385 * the appropriate handlers, including command responses,
3386 * frame-received notifications, and other notifications.
3387 */
4a8a4322 3388static void iwl3945_rx_handle(struct iwl_priv *priv)
b481de9c 3389{
6100b588 3390 struct iwl_rx_mem_buffer *rxb;
3d24a9f7 3391 struct iwl_rx_packet *pkt;
cc2f362c 3392 struct iwl_rx_queue *rxq = &priv->rxq;
b481de9c
ZY
3393 u32 r, i;
3394 int reclaim;
3395 unsigned long flags;
5c0eef96 3396 u8 fill_rx = 0;
d68ab680 3397 u32 count = 8;
b481de9c 3398
6440adb5
CB
3399 /* uCode's read index (stored in shared DRAM) indicates the last Rx
3400 * buffer that the driver may process (last buffer filled by ucode). */
8cd812bc 3401 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
b481de9c
ZY
3402 i = rxq->read;
3403
37d68317 3404 if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
5c0eef96 3405 fill_rx = 1;
b481de9c
ZY
3406 /* Rx interrupt, but nothing sent from uCode */
3407 if (i == r)
3408 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
3409
3410 while (i != r) {
3411 rxb = rxq->queue[i];
3412
9fbab516 3413 /* If an RXB doesn't have a Rx queue slot associated with it,
b481de9c
ZY
3414 * then a bug has been introduced in the queue refilling
3415 * routines -- catch it here */
3416 BUG_ON(rxb == NULL);
3417
3418 rxq->queue[i] = NULL;
3419
6100b588 3420 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr,
1e33dc64 3421 priv->hw_params.rx_buf_size,
b481de9c 3422 PCI_DMA_FROMDEVICE);
3d24a9f7 3423 pkt = (struct iwl_rx_packet *)rxb->skb->data;
b481de9c
ZY
3424
3425 /* Reclaim a command buffer only if this packet is a response
3426 * to a (driver-originated) command.
3427 * If the packet (e.g. Rx frame) originated from uCode,
3428 * there is no command buffer to reclaim.
3429 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
3430 * but apparently a few don't get set; catch them here. */
3431 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
3432 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
3433 (pkt->hdr.cmd != REPLY_TX);
3434
3435 /* Based on type of command response or notification,
3436 * handle those that need handling via function in
bb8c093b 3437 * rx_handlers table. See iwl3945_setup_rx_handlers() */
b481de9c 3438 if (priv->rx_handlers[pkt->hdr.cmd]) {
40b8ec0b 3439 IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
b481de9c
ZY
3440 "r = %d, i = %d, %s, 0x%02x\n", r, i,
3441 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3442 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
3443 } else {
3444 /* No handling needed */
40b8ec0b 3445 IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
b481de9c
ZY
3446 "r %d i %d No handler needed for %s, 0x%02x\n",
3447 r, i, get_cmd_string(pkt->hdr.cmd),
3448 pkt->hdr.cmd);
3449 }
3450
3451 if (reclaim) {
9fbab516
BC
3452 /* Invoke any callbacks, transfer the skb to caller, and
3453 * fire off the (possibly) blocking iwl3945_send_cmd()
b481de9c
ZY
3454 * as we reclaim the driver command queue */
3455 if (rxb && rxb->skb)
bb8c093b 3456 iwl3945_tx_cmd_complete(priv, rxb);
b481de9c 3457 else
39aadf8c 3458 IWL_WARN(priv, "Claim null rxb?\n");
b481de9c
ZY
3459 }
3460
3461 /* For now we just don't re-use anything. We can tweak this
3462 * later to try and re-use notification packets and SKBs that
3463 * fail to Rx correctly */
3464 if (rxb->skb != NULL) {
3465 priv->alloc_rxb_skb--;
3466 dev_kfree_skb_any(rxb->skb);
3467 rxb->skb = NULL;
3468 }
3469
6100b588 3470 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
1e33dc64
WT
3471 priv->hw_params.rx_buf_size,
3472 PCI_DMA_FROMDEVICE);
b481de9c
ZY
3473 spin_lock_irqsave(&rxq->lock, flags);
3474 list_add_tail(&rxb->list, &priv->rxq.rx_used);
3475 spin_unlock_irqrestore(&rxq->lock, flags);
3476 i = (i + 1) & RX_QUEUE_MASK;
5c0eef96
MA
3477 /* If there are a lot of unused frames,
3478 * restock the Rx queue so ucode won't assert. */
3479 if (fill_rx) {
3480 count++;
3481 if (count >= 8) {
3482 priv->rxq.read = i;
3483 __iwl3945_rx_replenish(priv);
3484 count = 0;
3485 }
3486 }
b481de9c
ZY
3487 }
3488
3489 /* Backtrack one entry */
3490 priv->rxq.read = i;
bb8c093b 3491 iwl3945_rx_queue_restock(priv);
b481de9c
ZY
3492}
3493
6440adb5
CB
3494/**
3495 * iwl3945_tx_queue_update_write_ptr - Send new write index to hardware
3496 */
4a8a4322 3497static int iwl3945_tx_queue_update_write_ptr(struct iwl_priv *priv,
188cf6c7 3498 struct iwl_tx_queue *txq)
b481de9c
ZY
3499{
3500 u32 reg = 0;
3501 int rc = 0;
3502 int txq_id = txq->q.id;
3503
3504 if (txq->need_update == 0)
3505 return rc;
3506
3507 /* if we're trying to save power */
3508 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3509 /* wake up nic if it's powered down ...
3510 * uCode will wake up, and interrupt us again, so next
3511 * time we'll skip this part. */
5d49f498 3512 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
b481de9c
ZY
3513
3514 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3515 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
5d49f498 3516 iwl_set_bit(priv, CSR_GP_CNTRL,
b481de9c
ZY
3517 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3518 return rc;
3519 }
3520
3521 /* restore this queue's parameters in nic hardware. */
5d49f498 3522 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
3523 if (rc)
3524 return rc;
5d49f498 3525 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
fc4b6853 3526 txq->q.write_ptr | (txq_id << 8));
5d49f498 3527 iwl_release_nic_access(priv);
b481de9c
ZY
3528
3529 /* else not in power-save mode, uCode will never sleep when we're
3530 * trying to tx (during RFKILL, we're not trying to tx). */
3531 } else
5d49f498 3532 iwl_write32(priv, HBUS_TARG_WRPTR,
fc4b6853 3533 txq->q.write_ptr | (txq_id << 8));
b481de9c
ZY
3534
3535 txq->need_update = 0;
3536
3537 return rc;
3538}
3539
c8b0e6e1 3540#ifdef CONFIG_IWL3945_DEBUG
4a8a4322 3541static void iwl3945_print_rx_config_cmd(struct iwl_priv *priv,
40b8ec0b 3542 struct iwl3945_rxon_cmd *rxon)
b481de9c
ZY
3543{
3544 IWL_DEBUG_RADIO("RX CONFIG:\n");
40b8ec0b 3545 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
b481de9c
ZY
3546 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
3547 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
3548 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
3549 le32_to_cpu(rxon->filter_flags));
3550 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
3551 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
3552 rxon->ofdm_basic_rates);
3553 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
e174961c
JB
3554 IWL_DEBUG_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
3555 IWL_DEBUG_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
b481de9c
ZY
3556 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
3557}
3558#endif
3559
4a8a4322 3560static void iwl3945_enable_interrupts(struct iwl_priv *priv)
b481de9c
ZY
3561{
3562 IWL_DEBUG_ISR("Enabling interrupts\n");
3563 set_bit(STATUS_INT_ENABLED, &priv->status);
5d49f498 3564 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
b481de9c
ZY
3565}
3566
0359facc
MA
3567
3568/* call this function to flush any scheduled tasklet */
4a8a4322 3569static inline void iwl_synchronize_irq(struct iwl_priv *priv)
0359facc 3570{
a96a27f9 3571 /* wait to make sure we flush pending tasklet*/
0359facc
MA
3572 synchronize_irq(priv->pci_dev->irq);
3573 tasklet_kill(&priv->irq_tasklet);
3574}
3575
3576
4a8a4322 3577static inline void iwl3945_disable_interrupts(struct iwl_priv *priv)
b481de9c
ZY
3578{
3579 clear_bit(STATUS_INT_ENABLED, &priv->status);
3580
3581 /* disable interrupts from uCode/NIC to host */
5d49f498 3582 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
3583
3584 /* acknowledge/clear/reset any interrupts still pending
3585 * from uCode or flow handler (Rx/Tx DMA) */
5d49f498
AK
3586 iwl_write32(priv, CSR_INT, 0xffffffff);
3587 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
b481de9c
ZY
3588 IWL_DEBUG_ISR("Disabled interrupts\n");
3589}
3590
3591static const char *desc_lookup(int i)
3592{
3593 switch (i) {
3594 case 1:
3595 return "FAIL";
3596 case 2:
3597 return "BAD_PARAM";
3598 case 3:
3599 return "BAD_CHECKSUM";
3600 case 4:
3601 return "NMI_INTERRUPT";
3602 case 5:
3603 return "SYSASSERT";
3604 case 6:
3605 return "FATAL_ERROR";
3606 }
3607
3608 return "UNKNOWN";
3609}
3610
3611#define ERROR_START_OFFSET (1 * sizeof(u32))
3612#define ERROR_ELEM_SIZE (7 * sizeof(u32))
3613
4a8a4322 3614static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
b481de9c
ZY
3615{
3616 u32 i;
3617 u32 desc, time, count, base, data1;
3618 u32 blink1, blink2, ilink1, ilink2;
3619 int rc;
3620
3621 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
3622
bb8c093b 3623 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
15b1687c 3624 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
b481de9c
ZY
3625 return;
3626 }
3627
5d49f498 3628 rc = iwl_grab_nic_access(priv);
b481de9c 3629 if (rc) {
39aadf8c 3630 IWL_WARN(priv, "Can not read from adapter at this time.\n");
b481de9c
ZY
3631 return;
3632 }
3633
5d49f498 3634 count = iwl_read_targ_mem(priv, base);
b481de9c
ZY
3635
3636 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
15b1687c
WT
3637 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
3638 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
3639 priv->status, count);
b481de9c
ZY
3640 }
3641
15b1687c 3642 IWL_ERR(priv, "Desc Time asrtPC blink2 "
b481de9c
ZY
3643 "ilink1 nmiPC Line\n");
3644 for (i = ERROR_START_OFFSET;
3645 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
3646 i += ERROR_ELEM_SIZE) {
5d49f498 3647 desc = iwl_read_targ_mem(priv, base + i);
b481de9c 3648 time =
5d49f498 3649 iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32));
b481de9c 3650 blink1 =
5d49f498 3651 iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32));
b481de9c 3652 blink2 =
5d49f498 3653 iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32));
b481de9c 3654 ilink1 =
5d49f498 3655 iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32));
b481de9c 3656 ilink2 =
5d49f498 3657 iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32));
b481de9c 3658 data1 =
5d49f498 3659 iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
b481de9c 3660
15b1687c
WT
3661 IWL_ERR(priv,
3662 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
3663 desc_lookup(desc), desc, time, blink1, blink2,
3664 ilink1, ilink2, data1);
b481de9c
ZY
3665 }
3666
5d49f498 3667 iwl_release_nic_access(priv);
b481de9c
ZY
3668
3669}
3670
f58177b9 3671#define EVENT_START_OFFSET (6 * sizeof(u32))
b481de9c
ZY
3672
3673/**
bb8c093b 3674 * iwl3945_print_event_log - Dump error event log to syslog
b481de9c 3675 *
5d49f498 3676 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
b481de9c 3677 */
4a8a4322 3678static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
b481de9c
ZY
3679 u32 num_events, u32 mode)
3680{
3681 u32 i;
3682 u32 base; /* SRAM byte address of event log header */
3683 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
3684 u32 ptr; /* SRAM byte address of log data */
3685 u32 ev, time, data; /* event log data */
3686
3687 if (num_events == 0)
3688 return;
3689
3690 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
3691
3692 if (mode == 0)
3693 event_size = 2 * sizeof(u32);
3694 else
3695 event_size = 3 * sizeof(u32);
3696
3697 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
3698
3699 /* "time" is actually "data" for mode 0 (no timestamp).
3700 * place event id # at far right for easier visual parsing. */
3701 for (i = 0; i < num_events; i++) {
5d49f498 3702 ev = iwl_read_targ_mem(priv, ptr);
b481de9c 3703 ptr += sizeof(u32);
5d49f498 3704 time = iwl_read_targ_mem(priv, ptr);
b481de9c 3705 ptr += sizeof(u32);
15b1687c
WT
3706 if (mode == 0) {
3707 /* data, ev */
3708 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
3709 } else {
5d49f498 3710 data = iwl_read_targ_mem(priv, ptr);
b481de9c 3711 ptr += sizeof(u32);
15b1687c 3712 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
b481de9c
ZY
3713 }
3714 }
3715}
3716
4a8a4322 3717static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
b481de9c
ZY
3718{
3719 int rc;
3720 u32 base; /* SRAM byte address of event log header */
3721 u32 capacity; /* event log capacity in # entries */
3722 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
3723 u32 num_wraps; /* # times uCode wrapped to top of log */
3724 u32 next_entry; /* index of next entry to be written by uCode */
3725 u32 size; /* # entries that we'll print */
3726
3727 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
bb8c093b 3728 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
15b1687c 3729 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
b481de9c
ZY
3730 return;
3731 }
3732
5d49f498 3733 rc = iwl_grab_nic_access(priv);
b481de9c 3734 if (rc) {
39aadf8c 3735 IWL_WARN(priv, "Can not read from adapter at this time.\n");
b481de9c
ZY
3736 return;
3737 }
3738
3739 /* event log header */
5d49f498
AK
3740 capacity = iwl_read_targ_mem(priv, base);
3741 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
3742 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
3743 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
b481de9c
ZY
3744
3745 size = num_wraps ? capacity : next_entry;
3746
3747 /* bail out if nothing in log */
3748 if (size == 0) {
15b1687c 3749 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
5d49f498 3750 iwl_release_nic_access(priv);
b481de9c
ZY
3751 return;
3752 }
3753
15b1687c 3754 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
b481de9c
ZY
3755 size, num_wraps);
3756
3757 /* if uCode has wrapped back to top of log, start at the oldest entry,
3758 * i.e the next one that uCode would fill. */
3759 if (num_wraps)
bb8c093b 3760 iwl3945_print_event_log(priv, next_entry,
b481de9c
ZY
3761 capacity - next_entry, mode);
3762
3763 /* (then/else) start at top of log */
bb8c093b 3764 iwl3945_print_event_log(priv, 0, next_entry, mode);
b481de9c 3765
5d49f498 3766 iwl_release_nic_access(priv);
b481de9c
ZY
3767}
3768
3769/**
bb8c093b 3770 * iwl3945_irq_handle_error - called for HW or SW error interrupt from card
b481de9c 3771 */
4a8a4322 3772static void iwl3945_irq_handle_error(struct iwl_priv *priv)
b481de9c 3773{
bb8c093b 3774 /* Set the FW error flag -- cleared on iwl3945_down */
b481de9c
ZY
3775 set_bit(STATUS_FW_ERROR, &priv->status);
3776
3777 /* Cancel currently queued command. */
3778 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3779
c8b0e6e1 3780#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3781 if (priv->debug_level & IWL_DL_FW_ERRORS) {
bb8c093b
CH
3782 iwl3945_dump_nic_error_log(priv);
3783 iwl3945_dump_nic_event_log(priv);
f2c7e521 3784 iwl3945_print_rx_config_cmd(priv, &priv->staging39_rxon);
b481de9c
ZY
3785 }
3786#endif
3787
3788 wake_up_interruptible(&priv->wait_command_queue);
3789
3790 /* Keep the restart process from trying to send host
3791 * commands by clearing the INIT status bit */
3792 clear_bit(STATUS_READY, &priv->status);
3793
3794 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
3795 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
3796 "Restarting adapter due to uCode error.\n");
3797
bb8c093b 3798 if (iwl3945_is_associated(priv)) {
f2c7e521
AK
3799 memcpy(&priv->recovery39_rxon, &priv->active39_rxon,
3800 sizeof(priv->recovery39_rxon));
b481de9c
ZY
3801 priv->error_recovering = 1;
3802 }
3803 queue_work(priv->workqueue, &priv->restart);
3804 }
3805}
3806
4a8a4322 3807static void iwl3945_error_recovery(struct iwl_priv *priv)
b481de9c
ZY
3808{
3809 unsigned long flags;
3810
f2c7e521
AK
3811 memcpy(&priv->staging39_rxon, &priv->recovery39_rxon,
3812 sizeof(priv->staging39_rxon));
3813 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 3814 iwl3945_commit_rxon(priv);
b481de9c 3815
bb8c093b 3816 iwl3945_add_station(priv, priv->bssid, 1, 0);
b481de9c
ZY
3817
3818 spin_lock_irqsave(&priv->lock, flags);
f2c7e521 3819 priv->assoc_id = le16_to_cpu(priv->staging39_rxon.assoc_id);
b481de9c
ZY
3820 priv->error_recovering = 0;
3821 spin_unlock_irqrestore(&priv->lock, flags);
3822}
3823
4a8a4322 3824static void iwl3945_irq_tasklet(struct iwl_priv *priv)
b481de9c
ZY
3825{
3826 u32 inta, handled = 0;
3827 u32 inta_fh;
3828 unsigned long flags;
c8b0e6e1 3829#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
3830 u32 inta_mask;
3831#endif
3832
3833 spin_lock_irqsave(&priv->lock, flags);
3834
3835 /* Ack/clear/reset pending uCode interrupts.
3836 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
3837 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
5d49f498
AK
3838 inta = iwl_read32(priv, CSR_INT);
3839 iwl_write32(priv, CSR_INT, inta);
b481de9c
ZY
3840
3841 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
3842 * Any new interrupts that happen after this, either while we're
3843 * in this tasklet, or later, will show up in next ISR/tasklet. */
5d49f498
AK
3844 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
3845 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
b481de9c 3846
c8b0e6e1 3847#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3848 if (priv->debug_level & IWL_DL_ISR) {
9fbab516 3849 /* just for debug */
5d49f498 3850 inta_mask = iwl_read32(priv, CSR_INT_MASK);
b481de9c
ZY
3851 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
3852 inta, inta_mask, inta_fh);
3853 }
3854#endif
3855
3856 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
3857 * atomic, make sure that inta covers all the interrupts that
3858 * we've discovered, even if FH interrupt came in just after
3859 * reading CSR_INT. */
6f83eaa1 3860 if (inta_fh & CSR39_FH_INT_RX_MASK)
b481de9c 3861 inta |= CSR_INT_BIT_FH_RX;
6f83eaa1 3862 if (inta_fh & CSR39_FH_INT_TX_MASK)
b481de9c
ZY
3863 inta |= CSR_INT_BIT_FH_TX;
3864
3865 /* Now service all interrupt bits discovered above. */
3866 if (inta & CSR_INT_BIT_HW_ERR) {
15b1687c 3867 IWL_ERR(priv, "Microcode HW error detected. Restarting.\n");
b481de9c
ZY
3868
3869 /* Tell the device to stop sending interrupts */
bb8c093b 3870 iwl3945_disable_interrupts(priv);
b481de9c 3871
bb8c093b 3872 iwl3945_irq_handle_error(priv);
b481de9c
ZY
3873
3874 handled |= CSR_INT_BIT_HW_ERR;
3875
3876 spin_unlock_irqrestore(&priv->lock, flags);
3877
3878 return;
3879 }
3880
c8b0e6e1 3881#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3882 if (priv->debug_level & (IWL_DL_ISR)) {
b481de9c 3883 /* NIC fires this, but we don't use it, redundant with WAKEUP */
25c03d8e
JP
3884 if (inta & CSR_INT_BIT_SCD)
3885 IWL_DEBUG_ISR("Scheduler finished to transmit "
3886 "the frame/frames.\n");
b481de9c
ZY
3887
3888 /* Alive notification via Rx interrupt will do the real work */
3889 if (inta & CSR_INT_BIT_ALIVE)
3890 IWL_DEBUG_ISR("Alive interrupt\n");
3891 }
3892#endif
3893 /* Safely ignore these bits for debug checks below */
25c03d8e 3894 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
b481de9c 3895
b481de9c
ZY
3896 /* Error detected by uCode */
3897 if (inta & CSR_INT_BIT_SW_ERR) {
15b1687c
WT
3898 IWL_ERR(priv, "Microcode SW error detected. "
3899 "Restarting 0x%X.\n", inta);
bb8c093b 3900 iwl3945_irq_handle_error(priv);
b481de9c
ZY
3901 handled |= CSR_INT_BIT_SW_ERR;
3902 }
3903
3904 /* uCode wakes up after power-down sleep */
3905 if (inta & CSR_INT_BIT_WAKEUP) {
3906 IWL_DEBUG_ISR("Wakeup interrupt\n");
141c43a3 3907 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
188cf6c7
SO
3908 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[0]);
3909 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[1]);
3910 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[2]);
3911 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[3]);
3912 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[4]);
3913 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[5]);
b481de9c
ZY
3914
3915 handled |= CSR_INT_BIT_WAKEUP;
3916 }
3917
3918 /* All uCode command responses, including Tx command responses,
3919 * Rx "responses" (frame-received notification), and other
3920 * notifications from uCode come through here*/
3921 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
bb8c093b 3922 iwl3945_rx_handle(priv);
b481de9c
ZY
3923 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
3924 }
3925
3926 if (inta & CSR_INT_BIT_FH_TX) {
3927 IWL_DEBUG_ISR("Tx interrupt\n");
3928
5d49f498
AK
3929 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
3930 if (!iwl_grab_nic_access(priv)) {
3931 iwl_write_direct32(priv, FH39_TCSR_CREDIT
bddadf86 3932 (FH39_SRVC_CHNL), 0x0);
5d49f498 3933 iwl_release_nic_access(priv);
b481de9c
ZY
3934 }
3935 handled |= CSR_INT_BIT_FH_TX;
3936 }
3937
3938 if (inta & ~handled)
15b1687c 3939 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
b481de9c
ZY
3940
3941 if (inta & ~CSR_INI_SET_MASK) {
39aadf8c 3942 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
b481de9c 3943 inta & ~CSR_INI_SET_MASK);
39aadf8c 3944 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
b481de9c
ZY
3945 }
3946
3947 /* Re-enable all interrupts */
0359facc
MA
3948 /* only Re-enable if disabled by irq */
3949 if (test_bit(STATUS_INT_ENABLED, &priv->status))
3950 iwl3945_enable_interrupts(priv);
b481de9c 3951
c8b0e6e1 3952#ifdef CONFIG_IWL3945_DEBUG
40b8ec0b 3953 if (priv->debug_level & (IWL_DL_ISR)) {
5d49f498
AK
3954 inta = iwl_read32(priv, CSR_INT);
3955 inta_mask = iwl_read32(priv, CSR_INT_MASK);
3956 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
3957 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
3958 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
3959 }
3960#endif
3961 spin_unlock_irqrestore(&priv->lock, flags);
3962}
3963
bb8c093b 3964static irqreturn_t iwl3945_isr(int irq, void *data)
b481de9c 3965{
4a8a4322 3966 struct iwl_priv *priv = data;
b481de9c
ZY
3967 u32 inta, inta_mask;
3968 u32 inta_fh;
3969 if (!priv)
3970 return IRQ_NONE;
3971
3972 spin_lock(&priv->lock);
3973
3974 /* Disable (but don't clear!) interrupts here to avoid
3975 * back-to-back ISRs and sporadic interrupts from our NIC.
3976 * If we have something to service, the tasklet will re-enable ints.
3977 * If we *don't* have something, we'll re-enable before leaving here. */
5d49f498
AK
3978 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
3979 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
b481de9c
ZY
3980
3981 /* Discover which interrupts are active/pending */
5d49f498
AK
3982 inta = iwl_read32(priv, CSR_INT);
3983 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
b481de9c
ZY
3984
3985 /* Ignore interrupt if there's nothing in NIC to service.
3986 * This may be due to IRQ shared with another device,
3987 * or due to sporadic interrupts thrown from our NIC. */
3988 if (!inta && !inta_fh) {
3989 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
3990 goto none;
3991 }
3992
3993 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
3994 /* Hardware disappeared */
39aadf8c 3995 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
cb4da1a3 3996 goto unplugged;
b481de9c
ZY
3997 }
3998
3999 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4000 inta, inta_mask, inta_fh);
4001
25c03d8e
JP
4002 inta &= ~CSR_INT_BIT_SCD;
4003
bb8c093b 4004 /* iwl3945_irq_tasklet() will service interrupts and re-enable them */
25c03d8e
JP
4005 if (likely(inta || inta_fh))
4006 tasklet_schedule(&priv->irq_tasklet);
cb4da1a3 4007unplugged:
b481de9c
ZY
4008 spin_unlock(&priv->lock);
4009
4010 return IRQ_HANDLED;
4011
4012 none:
4013 /* re-enable interrupts here since we don't have anything to service. */
0359facc
MA
4014 /* only Re-enable if disabled by irq */
4015 if (test_bit(STATUS_INT_ENABLED, &priv->status))
4016 iwl3945_enable_interrupts(priv);
b481de9c
ZY
4017 spin_unlock(&priv->lock);
4018 return IRQ_NONE;
4019}
4020
4021/************************** EEPROM BANDS ****************************
4022 *
bb8c093b 4023 * The iwl3945_eeprom_band definitions below provide the mapping from the
b481de9c
ZY
4024 * EEPROM contents to the specific channel number supported for each
4025 * band.
4026 *
f2c7e521 4027 * For example, iwl3945_priv->eeprom39.band_3_channels[4] from the band_3
b481de9c
ZY
4028 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
4029 * The specific geography and calibration information for that channel
4030 * is contained in the eeprom map itself.
4031 *
4032 * During init, we copy the eeprom information and channel map
4033 * information into priv->channel_info_24/52 and priv->channel_map_24/52
4034 *
4035 * channel_map_24/52 provides the index in the channel_info array for a
4036 * given channel. We have to have two separate maps as there is channel
4037 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
4038 * band_2
4039 *
4040 * A value of 0xff stored in the channel_map indicates that the channel
4041 * is not supported by the hardware at all.
4042 *
4043 * A value of 0xfe in the channel_map indicates that the channel is not
4044 * valid for Tx with the current hardware. This means that
4045 * while the system can tune and receive on a given channel, it may not
4046 * be able to associate or transmit any frames on that
4047 * channel. There is no corresponding channel information for that
4048 * entry.
4049 *
4050 *********************************************************************/
4051
4052/* 2.4 GHz */
bb8c093b 4053static const u8 iwl3945_eeprom_band_1[14] = {
b481de9c
ZY
4054 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4055};
4056
4057/* 5.2 GHz bands */
9fbab516 4058static const u8 iwl3945_eeprom_band_2[] = { /* 4915-5080MHz */
b481de9c
ZY
4059 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4060};
4061
9fbab516 4062static const u8 iwl3945_eeprom_band_3[] = { /* 5170-5320MHz */
b481de9c
ZY
4063 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4064};
4065
bb8c093b 4066static const u8 iwl3945_eeprom_band_4[] = { /* 5500-5700MHz */
b481de9c
ZY
4067 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4068};
4069
bb8c093b 4070static const u8 iwl3945_eeprom_band_5[] = { /* 5725-5825MHz */
b481de9c
ZY
4071 145, 149, 153, 157, 161, 165
4072};
4073
4a8a4322 4074static void iwl3945_init_band_reference(const struct iwl_priv *priv, int band,
b481de9c 4075 int *eeprom_ch_count,
0f741d99 4076 const struct iwl_eeprom_channel
b481de9c
ZY
4077 **eeprom_ch_info,
4078 const u8 **eeprom_ch_index)
4079{
4080 switch (band) {
4081 case 1: /* 2.4GHz band */
bb8c093b 4082 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_1);
f2c7e521 4083 *eeprom_ch_info = priv->eeprom39.band_1_channels;
bb8c093b 4084 *eeprom_ch_index = iwl3945_eeprom_band_1;
b481de9c 4085 break;
9fbab516 4086 case 2: /* 4.9GHz band */
bb8c093b 4087 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_2);
f2c7e521 4088 *eeprom_ch_info = priv->eeprom39.band_2_channels;
bb8c093b 4089 *eeprom_ch_index = iwl3945_eeprom_band_2;
b481de9c
ZY
4090 break;
4091 case 3: /* 5.2GHz band */
bb8c093b 4092 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_3);
f2c7e521 4093 *eeprom_ch_info = priv->eeprom39.band_3_channels;
bb8c093b 4094 *eeprom_ch_index = iwl3945_eeprom_band_3;
b481de9c 4095 break;
9fbab516 4096 case 4: /* 5.5GHz band */
bb8c093b 4097 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_4);
f2c7e521 4098 *eeprom_ch_info = priv->eeprom39.band_4_channels;
bb8c093b 4099 *eeprom_ch_index = iwl3945_eeprom_band_4;
b481de9c 4100 break;
9fbab516 4101 case 5: /* 5.7GHz band */
bb8c093b 4102 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_5);
f2c7e521 4103 *eeprom_ch_info = priv->eeprom39.band_5_channels;
bb8c093b 4104 *eeprom_ch_index = iwl3945_eeprom_band_5;
b481de9c
ZY
4105 break;
4106 default:
4107 BUG();
4108 return;
4109 }
4110}
4111
6440adb5
CB
4112/**
4113 * iwl3945_get_channel_info - Find driver's private channel info
4114 *
4115 * Based on band and channel number.
4116 */
d20b3c65 4117const struct iwl_channel_info *
4a8a4322 4118iwl3945_get_channel_info(const struct iwl_priv *priv,
d20b3c65 4119 enum ieee80211_band band, u16 channel)
b481de9c
ZY
4120{
4121 int i;
4122
8318d78a
JB
4123 switch (band) {
4124 case IEEE80211_BAND_5GHZ:
b481de9c
ZY
4125 for (i = 14; i < priv->channel_count; i++) {
4126 if (priv->channel_info[i].channel == channel)
4127 return &priv->channel_info[i];
4128 }
4129 break;
4130
8318d78a 4131 case IEEE80211_BAND_2GHZ:
b481de9c
ZY
4132 if (channel >= 1 && channel <= 14)
4133 return &priv->channel_info[channel - 1];
4134 break;
8318d78a
JB
4135 case IEEE80211_NUM_BANDS:
4136 WARN_ON(1);
b481de9c
ZY
4137 }
4138
4139 return NULL;
4140}
4141
4142#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
4143 ? # x " " : "")
4144
6440adb5
CB
4145/**
4146 * iwl3945_init_channel_map - Set up driver's info for all possible channels
4147 */
4a8a4322 4148static int iwl3945_init_channel_map(struct iwl_priv *priv)
b481de9c
ZY
4149{
4150 int eeprom_ch_count = 0;
4151 const u8 *eeprom_ch_index = NULL;
0f741d99 4152 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
b481de9c 4153 int band, ch;
d20b3c65 4154 struct iwl_channel_info *ch_info;
b481de9c
ZY
4155
4156 if (priv->channel_count) {
4157 IWL_DEBUG_INFO("Channel map already initialized.\n");
4158 return 0;
4159 }
4160
f2c7e521 4161 if (priv->eeprom39.version < 0x2f) {
39aadf8c 4162 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
f2c7e521 4163 priv->eeprom39.version);
b481de9c
ZY
4164 return -EINVAL;
4165 }
4166
4167 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
4168
4169 priv->channel_count =
bb8c093b
CH
4170 ARRAY_SIZE(iwl3945_eeprom_band_1) +
4171 ARRAY_SIZE(iwl3945_eeprom_band_2) +
4172 ARRAY_SIZE(iwl3945_eeprom_band_3) +
4173 ARRAY_SIZE(iwl3945_eeprom_band_4) +
4174 ARRAY_SIZE(iwl3945_eeprom_band_5);
b481de9c
ZY
4175
4176 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
4177
d20b3c65 4178 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
b481de9c
ZY
4179 priv->channel_count, GFP_KERNEL);
4180 if (!priv->channel_info) {
15b1687c 4181 IWL_ERR(priv, "Could not allocate channel_info\n");
b481de9c
ZY
4182 priv->channel_count = 0;
4183 return -ENOMEM;
4184 }
4185
4186 ch_info = priv->channel_info;
4187
4188 /* Loop through the 5 EEPROM bands adding them in order to the
4189 * channel map we maintain (that contains additional information than
4190 * what just in the EEPROM) */
4191 for (band = 1; band <= 5; band++) {
4192
bb8c093b 4193 iwl3945_init_band_reference(priv, band, &eeprom_ch_count,
b481de9c
ZY
4194 &eeprom_ch_info, &eeprom_ch_index);
4195
4196 /* Loop through each band adding each of the channels */
4197 for (ch = 0; ch < eeprom_ch_count; ch++) {
4198 ch_info->channel = eeprom_ch_index[ch];
8318d78a
JB
4199 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
4200 IEEE80211_BAND_5GHZ;
b481de9c
ZY
4201
4202 /* permanently store EEPROM's channel regulatory flags
4203 * and max power in channel info database. */
4204 ch_info->eeprom = eeprom_ch_info[ch];
4205
4206 /* Copy the run-time flags so they are there even on
4207 * invalid channels */
4208 ch_info->flags = eeprom_ch_info[ch].flags;
4209
4210 if (!(is_channel_valid(ch_info))) {
4211 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
4212 "No traffic\n",
4213 ch_info->channel,
4214 ch_info->flags,
4215 is_channel_a_band(ch_info) ?
4216 "5.2" : "2.4");
4217 ch_info++;
4218 continue;
4219 }
4220
4221 /* Initialize regulatory-based run-time data */
4222 ch_info->max_power_avg = ch_info->curr_txpow =
4223 eeprom_ch_info[ch].max_power_avg;
4224 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
4225 ch_info->min_power = 0;
4226
fe7c4040 4227 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
b481de9c
ZY
4228 " %ddBm): Ad-Hoc %ssupported\n",
4229 ch_info->channel,
4230 is_channel_a_band(ch_info) ?
4231 "5.2" : "2.4",
8211ef78 4232 CHECK_AND_PRINT(VALID),
b481de9c
ZY
4233 CHECK_AND_PRINT(IBSS),
4234 CHECK_AND_PRINT(ACTIVE),
4235 CHECK_AND_PRINT(RADAR),
4236 CHECK_AND_PRINT(WIDE),
b481de9c
ZY
4237 CHECK_AND_PRINT(DFS),
4238 eeprom_ch_info[ch].flags,
4239 eeprom_ch_info[ch].max_power_avg,
4240 ((eeprom_ch_info[ch].
4241 flags & EEPROM_CHANNEL_IBSS)
4242 && !(eeprom_ch_info[ch].
4243 flags & EEPROM_CHANNEL_RADAR))
4244 ? "" : "not ");
4245
4246 /* Set the user_txpower_limit to the highest power
4247 * supported by any channel */
4248 if (eeprom_ch_info[ch].max_power_avg >
4249 priv->user_txpower_limit)
4250 priv->user_txpower_limit =
4251 eeprom_ch_info[ch].max_power_avg;
4252
4253 ch_info++;
4254 }
4255 }
4256
6440adb5 4257 /* Set up txpower settings in driver for all channels */
b481de9c
ZY
4258 if (iwl3945_txpower_set_from_eeprom(priv))
4259 return -EIO;
4260
4261 return 0;
4262}
4263
849e0dce
RC
4264/*
4265 * iwl3945_free_channel_map - undo allocations in iwl3945_init_channel_map
4266 */
4a8a4322 4267static void iwl3945_free_channel_map(struct iwl_priv *priv)
849e0dce
RC
4268{
4269 kfree(priv->channel_info);
4270 priv->channel_count = 0;
4271}
4272
b481de9c
ZY
4273/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
4274 * sending probe req. This should be set long enough to hear probe responses
4275 * from more than one AP. */
f9340520
AK
4276#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
4277#define IWL_ACTIVE_DWELL_TIME_52 (20)
4278
4279#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
4280#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
b481de9c
ZY
4281
4282/* For faster active scanning, scan will move to the next channel if fewer than
4283 * PLCP_QUIET_THRESH packets are heard on this channel within
4284 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
4285 * time if it's a quiet channel (nothing responded to our probe, and there's
4286 * no other traffic).
4287 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
4288#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
f9340520 4289#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(10) /* msec */
b481de9c
ZY
4290
4291/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
4292 * Must be set longer than active dwell time.
4293 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
4294#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
4295#define IWL_PASSIVE_DWELL_TIME_52 (10)
4296#define IWL_PASSIVE_DWELL_BASE (100)
4297#define IWL_CHANNEL_TUNE_TIME 5
4298
e720ce9d 4299#define IWL_SCAN_PROBE_MASK(n) (BIT(n) | (BIT(n) - BIT(1)))
f9340520 4300
4a8a4322 4301static inline u16 iwl3945_get_active_dwell_time(struct iwl_priv *priv,
f9340520
AK
4302 enum ieee80211_band band,
4303 u8 n_probes)
b481de9c 4304{
8318d78a 4305 if (band == IEEE80211_BAND_5GHZ)
f9340520
AK
4306 return IWL_ACTIVE_DWELL_TIME_52 +
4307 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
b481de9c 4308 else
f9340520
AK
4309 return IWL_ACTIVE_DWELL_TIME_24 +
4310 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
b481de9c
ZY
4311}
4312
4a8a4322 4313static u16 iwl3945_get_passive_dwell_time(struct iwl_priv *priv,
8318d78a 4314 enum ieee80211_band band)
b481de9c 4315{
8318d78a 4316 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
b481de9c
ZY
4317 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
4318 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
4319
bb8c093b 4320 if (iwl3945_is_associated(priv)) {
b481de9c
ZY
4321 /* If we're associated, we clamp the maximum passive
4322 * dwell time to be 98% of the beacon interval (minus
4323 * 2 * channel tune time) */
4324 passive = priv->beacon_int;
4325 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
4326 passive = IWL_PASSIVE_DWELL_BASE;
4327 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
4328 }
4329
b481de9c
ZY
4330 return passive;
4331}
4332
4a8a4322 4333static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
8318d78a 4334 enum ieee80211_band band,
f9340520 4335 u8 is_active, u8 n_probes,
bb8c093b 4336 struct iwl3945_scan_channel *scan_ch)
b481de9c
ZY
4337{
4338 const struct ieee80211_channel *channels = NULL;
8318d78a 4339 const struct ieee80211_supported_band *sband;
d20b3c65 4340 const struct iwl_channel_info *ch_info;
b481de9c
ZY
4341 u16 passive_dwell = 0;
4342 u16 active_dwell = 0;
4343 int added, i;
4344
cbba18c6 4345 sband = iwl_get_hw_mode(priv, band);
8318d78a 4346 if (!sband)
b481de9c
ZY
4347 return 0;
4348
8318d78a 4349 channels = sband->channels;
b481de9c 4350
f9340520 4351 active_dwell = iwl3945_get_active_dwell_time(priv, band, n_probes);
8318d78a 4352 passive_dwell = iwl3945_get_passive_dwell_time(priv, band);
b481de9c 4353
8f4807a1
AK
4354 if (passive_dwell <= active_dwell)
4355 passive_dwell = active_dwell + 1;
4356
8318d78a 4357 for (i = 0, added = 0; i < sband->n_channels; i++) {
182e2e66
JB
4358 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4359 continue;
4360
8318d78a 4361 scan_ch->channel = channels[i].hw_value;
b481de9c 4362
8318d78a 4363 ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel);
b481de9c 4364 if (!is_channel_valid(ch_info)) {
66b5004d 4365 IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
b481de9c
ZY
4366 scan_ch->channel);
4367 continue;
4368 }
4369
011a0330
AK
4370 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4371 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
4372 /* If passive , set up for auto-switch
4373 * and use long active_dwell time.
4374 */
b481de9c 4375 if (!is_active || is_channel_passive(ch_info) ||
011a0330 4376 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
b481de9c 4377 scan_ch->type = 0; /* passive */
011a0330
AK
4378 if (IWL_UCODE_API(priv->ucode_ver) == 1)
4379 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
4380 } else {
b481de9c 4381 scan_ch->type = 1; /* active */
011a0330 4382 }
b481de9c 4383
011a0330
AK
4384 /* Set direct probe bits. These may be used both for active
4385 * scan channels (probes gets sent right away),
4386 * or for passive channels (probes get se sent only after
4387 * hearing clear Rx packet).*/
4388 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
4389 if (n_probes)
4390 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4391 } else {
4392 /* uCode v1 does not allow setting direct probe bits on
4393 * passive channel. */
4394 if ((scan_ch->type & 1) && n_probes)
4395 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4396 }
b481de9c 4397
9fbab516 4398 /* Set txpower levels to defaults */
b481de9c
ZY
4399 scan_ch->tpc.dsp_atten = 110;
4400 /* scan_pwr_info->tpc.dsp_atten; */
4401
4402 /*scan_pwr_info->tpc.tx_gain; */
8318d78a 4403 if (band == IEEE80211_BAND_5GHZ)
b481de9c
ZY
4404 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
4405 else {
4406 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
4407 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
9fbab516 4408 * power level:
8a1b0245 4409 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
b481de9c
ZY
4410 */
4411 }
4412
4413 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
4414 scan_ch->channel,
4415 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
4416 (scan_ch->type & 1) ?
4417 active_dwell : passive_dwell);
4418
4419 scan_ch++;
4420 added++;
4421 }
4422
4423 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
4424 return added;
4425}
4426
4a8a4322 4427static void iwl3945_init_hw_rates(struct iwl_priv *priv,
b481de9c
ZY
4428 struct ieee80211_rate *rates)
4429{
4430 int i;
4431
4432 for (i = 0; i < IWL_RATE_COUNT; i++) {
8318d78a
JB
4433 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
4434 rates[i].hw_value = i; /* Rate scaling will work on indexes */
4435 rates[i].hw_value_short = i;
4436 rates[i].flags = 0;
d9829a67 4437 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
b481de9c 4438 /*
8318d78a 4439 * If CCK != 1M then set short preamble rate flag.
b481de9c 4440 */
bb8c093b 4441 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
8318d78a 4442 0 : IEEE80211_RATE_SHORT_PREAMBLE;
b481de9c 4443 }
b481de9c
ZY
4444 }
4445}
4446
4447/**
bb8c093b 4448 * iwl3945_init_geos - Initialize mac80211's geo/channel info based from eeprom
b481de9c 4449 */
4a8a4322 4450static int iwl3945_init_geos(struct iwl_priv *priv)
b481de9c 4451{
d20b3c65 4452 struct iwl_channel_info *ch;
8211ef78 4453 struct ieee80211_supported_band *sband;
b481de9c
ZY
4454 struct ieee80211_channel *channels;
4455 struct ieee80211_channel *geo_ch;
4456 struct ieee80211_rate *rates;
4457 int i = 0;
b481de9c 4458
8318d78a
JB
4459 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
4460 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
b481de9c
ZY
4461 IWL_DEBUG_INFO("Geography modes already initialized.\n");
4462 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4463 return 0;
4464 }
4465
b481de9c
ZY
4466 channels = kzalloc(sizeof(struct ieee80211_channel) *
4467 priv->channel_count, GFP_KERNEL);
8318d78a 4468 if (!channels)
b481de9c 4469 return -ENOMEM;
b481de9c 4470
8211ef78 4471 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
b481de9c
ZY
4472 GFP_KERNEL);
4473 if (!rates) {
b481de9c
ZY
4474 kfree(channels);
4475 return -ENOMEM;
4476 }
4477
b481de9c 4478 /* 5.2GHz channels start after the 2.4GHz channels */
8211ef78
TW
4479 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4480 sband->channels = &channels[ARRAY_SIZE(iwl3945_eeprom_band_1)];
4481 /* just OFDM */
4482 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
4483 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
4484
4485 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4486 sband->channels = channels;
4487 /* OFDM & CCK */
4488 sband->bitrates = rates;
4489 sband->n_bitrates = IWL_RATE_COUNT;
b481de9c
ZY
4490
4491 priv->ieee_channels = channels;
4492 priv->ieee_rates = rates;
4493
bb8c093b 4494 iwl3945_init_hw_rates(priv, rates);
b481de9c 4495
8211ef78 4496 for (i = 0; i < priv->channel_count; i++) {
b481de9c
ZY
4497 ch = &priv->channel_info[i];
4498
8211ef78
TW
4499 /* FIXME: might be removed if scan is OK*/
4500 if (!is_channel_valid(ch))
b481de9c 4501 continue;
b481de9c
ZY
4502
4503 if (is_channel_a_band(ch))
8211ef78 4504 sband = &priv->bands[IEEE80211_BAND_5GHZ];
8318d78a 4505 else
8211ef78 4506 sband = &priv->bands[IEEE80211_BAND_2GHZ];
b481de9c 4507
8211ef78
TW
4508 geo_ch = &sband->channels[sband->n_channels++];
4509
4510 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
8318d78a
JB
4511 geo_ch->max_power = ch->max_power_avg;
4512 geo_ch->max_antenna_gain = 0xff;
7b72304d 4513 geo_ch->hw_value = ch->channel;
b481de9c
ZY
4514
4515 if (is_channel_valid(ch)) {
8318d78a
JB
4516 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
4517 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
b481de9c 4518
8318d78a
JB
4519 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
4520 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
b481de9c
ZY
4521
4522 if (ch->flags & EEPROM_CHANNEL_RADAR)
8318d78a 4523 geo_ch->flags |= IEEE80211_CHAN_RADAR;
b481de9c
ZY
4524
4525 if (ch->max_power_avg > priv->max_channel_txpower_limit)
4526 priv->max_channel_txpower_limit =
4527 ch->max_power_avg;
8211ef78 4528 } else {
8318d78a 4529 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
8211ef78
TW
4530 }
4531
4532 /* Save flags for reg domain usage */
4533 geo_ch->orig_flags = geo_ch->flags;
4534
4535 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4536 ch->channel, geo_ch->center_freq,
4537 is_channel_a_band(ch) ? "5.2" : "2.4",
4538 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4539 "restricted" : "valid",
4540 geo_ch->flags);
b481de9c
ZY
4541 }
4542
82b9a121
TW
4543 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4544 priv->cfg->sku & IWL_SKU_A) {
978785a3
TW
4545 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
4546 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
4547 priv->pci_dev->device, priv->pci_dev->subsystem_device);
82b9a121 4548 priv->cfg->sku &= ~IWL_SKU_A;
b481de9c
ZY
4549 }
4550
978785a3 4551 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
8318d78a
JB
4552 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
4553 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
b481de9c 4554
e0e0a67e
JL
4555 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
4556 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
4557 &priv->bands[IEEE80211_BAND_2GHZ];
4558 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
4559 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
4560 &priv->bands[IEEE80211_BAND_5GHZ];
b481de9c 4561
b481de9c
ZY
4562 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4563
4564 return 0;
4565}
4566
849e0dce
RC
4567/*
4568 * iwl3945_free_geos - undo allocations in iwl3945_init_geos
4569 */
4a8a4322 4570static void iwl3945_free_geos(struct iwl_priv *priv)
849e0dce 4571{
849e0dce
RC
4572 kfree(priv->ieee_channels);
4573 kfree(priv->ieee_rates);
4574 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
4575}
4576
b481de9c
ZY
4577/******************************************************************************
4578 *
4579 * uCode download functions
4580 *
4581 ******************************************************************************/
4582
4a8a4322 4583static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
b481de9c 4584{
98c92211
TW
4585 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
4586 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
4587 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
4588 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
4589 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
4590 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c
ZY
4591}
4592
4593/**
bb8c093b 4594 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
b481de9c
ZY
4595 * looking at all data.
4596 */
4a8a4322 4597static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
4598{
4599 u32 val;
4600 u32 save_len = len;
4601 int rc = 0;
4602 u32 errcnt;
4603
4604 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4605
5d49f498 4606 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
4607 if (rc)
4608 return rc;
4609
5d49f498 4610 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
250bdd21 4611 IWL39_RTC_INST_LOWER_BOUND);
b481de9c
ZY
4612
4613 errcnt = 0;
4614 for (; len > 0; len -= sizeof(u32), image++) {
4615 /* read data comes through single port, auto-incr addr */
4616 /* NOTE: Use the debugless read so we don't flood kernel log
4617 * if IWL_DL_IO is set */
5d49f498 4618 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c 4619 if (val != le32_to_cpu(*image)) {
15b1687c 4620 IWL_ERR(priv, "uCode INST section is invalid at "
b481de9c
ZY
4621 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4622 save_len - len, val, le32_to_cpu(*image));
4623 rc = -EIO;
4624 errcnt++;
4625 if (errcnt >= 20)
4626 break;
4627 }
4628 }
4629
5d49f498 4630 iwl_release_nic_access(priv);
b481de9c
ZY
4631
4632 if (!errcnt)
bc434dd2 4633 IWL_DEBUG_INFO("ucode image in INSTRUCTION memory is good\n");
b481de9c
ZY
4634
4635 return rc;
4636}
4637
4638
4639/**
bb8c093b 4640 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
b481de9c
ZY
4641 * using sample data 100 bytes apart. If these sample points are good,
4642 * it's a pretty good bet that everything between them is good, too.
4643 */
4a8a4322 4644static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
b481de9c
ZY
4645{
4646 u32 val;
4647 int rc = 0;
4648 u32 errcnt = 0;
4649 u32 i;
4650
4651 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
4652
5d49f498 4653 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
4654 if (rc)
4655 return rc;
4656
4657 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
4658 /* read data comes through single port, auto-incr addr */
4659 /* NOTE: Use the debugless read so we don't flood kernel log
4660 * if IWL_DL_IO is set */
5d49f498 4661 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
250bdd21 4662 i + IWL39_RTC_INST_LOWER_BOUND);
5d49f498 4663 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
b481de9c
ZY
4664 if (val != le32_to_cpu(*image)) {
4665#if 0 /* Enable this if you want to see details */
15b1687c 4666 IWL_ERR(priv, "uCode INST section is invalid at "
b481de9c
ZY
4667 "offset 0x%x, is 0x%x, s/b 0x%x\n",
4668 i, val, *image);
4669#endif
4670 rc = -EIO;
4671 errcnt++;
4672 if (errcnt >= 3)
4673 break;
4674 }
4675 }
4676
5d49f498 4677 iwl_release_nic_access(priv);
b481de9c
ZY
4678
4679 return rc;
4680}
4681
4682
4683/**
bb8c093b 4684 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
b481de9c
ZY
4685 * and verify its contents
4686 */
4a8a4322 4687static int iwl3945_verify_ucode(struct iwl_priv *priv)
b481de9c
ZY
4688{
4689 __le32 *image;
4690 u32 len;
4691 int rc = 0;
4692
4693 /* Try bootstrap */
4694 image = (__le32 *)priv->ucode_boot.v_addr;
4695 len = priv->ucode_boot.len;
bb8c093b 4696 rc = iwl3945_verify_inst_sparse(priv, image, len);
b481de9c
ZY
4697 if (rc == 0) {
4698 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
4699 return 0;
4700 }
4701
4702 /* Try initialize */
4703 image = (__le32 *)priv->ucode_init.v_addr;
4704 len = priv->ucode_init.len;
bb8c093b 4705 rc = iwl3945_verify_inst_sparse(priv, image, len);
b481de9c
ZY
4706 if (rc == 0) {
4707 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
4708 return 0;
4709 }
4710
4711 /* Try runtime/protocol */
4712 image = (__le32 *)priv->ucode_code.v_addr;
4713 len = priv->ucode_code.len;
bb8c093b 4714 rc = iwl3945_verify_inst_sparse(priv, image, len);
b481de9c
ZY
4715 if (rc == 0) {
4716 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
4717 return 0;
4718 }
4719
15b1687c 4720 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
b481de9c 4721
9fbab516
BC
4722 /* Since nothing seems to match, show first several data entries in
4723 * instruction SRAM, so maybe visual inspection will give a clue.
4724 * Selection of bootstrap image (vs. other images) is arbitrary. */
b481de9c
ZY
4725 image = (__le32 *)priv->ucode_boot.v_addr;
4726 len = priv->ucode_boot.len;
bb8c093b 4727 rc = iwl3945_verify_inst_full(priv, image, len);
b481de9c
ZY
4728
4729 return rc;
4730}
4731
4a8a4322 4732static void iwl3945_nic_start(struct iwl_priv *priv)
b481de9c
ZY
4733{
4734 /* Remove all resets to allow NIC to operate */
5d49f498 4735 iwl_write32(priv, CSR_RESET, 0);
b481de9c
ZY
4736}
4737
4738/**
bb8c093b 4739 * iwl3945_read_ucode - Read uCode images from disk file.
b481de9c
ZY
4740 *
4741 * Copy into buffers for card to fetch via bus-mastering
4742 */
4a8a4322 4743static int iwl3945_read_ucode(struct iwl_priv *priv)
b481de9c 4744{
a78fe754 4745 struct iwl_ucode *ucode;
a0987a8d 4746 int ret = -EINVAL, index;
b481de9c
ZY
4747 const struct firmware *ucode_raw;
4748 /* firmware file name contains uCode/driver compatibility version */
a0987a8d
RC
4749 const char *name_pre = priv->cfg->fw_name_pre;
4750 const unsigned int api_max = priv->cfg->ucode_api_max;
4751 const unsigned int api_min = priv->cfg->ucode_api_min;
4752 char buf[25];
b481de9c
ZY
4753 u8 *src;
4754 size_t len;
a0987a8d 4755 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
b481de9c
ZY
4756
4757 /* Ask kernel firmware_class module to get the boot firmware off disk.
4758 * request_firmware() is synchronous, file is in memory on return. */
a0987a8d
RC
4759 for (index = api_max; index >= api_min; index--) {
4760 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
4761 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
4762 if (ret < 0) {
15b1687c 4763 IWL_ERR(priv, "%s firmware file req failed: %d\n",
a0987a8d
RC
4764 buf, ret);
4765 if (ret == -ENOENT)
4766 continue;
4767 else
4768 goto error;
4769 } else {
4770 if (index < api_max)
15b1687c
WT
4771 IWL_ERR(priv, "Loaded firmware %s, "
4772 "which is deprecated. "
4773 " Please use API v%u instead.\n",
a0987a8d
RC
4774 buf, api_max);
4775 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
4776 buf, ucode_raw->size);
4777 break;
4778 }
b481de9c
ZY
4779 }
4780
a0987a8d
RC
4781 if (ret < 0)
4782 goto error;
b481de9c
ZY
4783
4784 /* Make sure that we got at least our header! */
4785 if (ucode_raw->size < sizeof(*ucode)) {
15b1687c 4786 IWL_ERR(priv, "File size way too small!\n");
90e759d1 4787 ret = -EINVAL;
b481de9c
ZY
4788 goto err_release;
4789 }
4790
4791 /* Data from ucode file: header followed by uCode images */
4792 ucode = (void *)ucode_raw->data;
4793
c02b3acd 4794 priv->ucode_ver = le32_to_cpu(ucode->ver);
a0987a8d 4795 api_ver = IWL_UCODE_API(priv->ucode_ver);
b481de9c
ZY
4796 inst_size = le32_to_cpu(ucode->inst_size);
4797 data_size = le32_to_cpu(ucode->data_size);
4798 init_size = le32_to_cpu(ucode->init_size);
4799 init_data_size = le32_to_cpu(ucode->init_data_size);
4800 boot_size = le32_to_cpu(ucode->boot_size);
4801
a0987a8d
RC
4802 /* api_ver should match the api version forming part of the
4803 * firmware filename ... but we don't check for that and only rely
4804 * on the API version read from firware header from here on forward */
4805
4806 if (api_ver < api_min || api_ver > api_max) {
15b1687c 4807 IWL_ERR(priv, "Driver unable to support your firmware API. "
a0987a8d
RC
4808 "Driver supports v%u, firmware is v%u.\n",
4809 api_max, api_ver);
4810 priv->ucode_ver = 0;
4811 ret = -EINVAL;
4812 goto err_release;
4813 }
4814 if (api_ver != api_max)
15b1687c 4815 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
a0987a8d
RC
4816 "got %u. New firmware can be obtained "
4817 "from http://www.intellinuxwireless.org.\n",
4818 api_max, api_ver);
4819
978785a3
TW
4820 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
4821 IWL_UCODE_MAJOR(priv->ucode_ver),
4822 IWL_UCODE_MINOR(priv->ucode_ver),
4823 IWL_UCODE_API(priv->ucode_ver),
4824 IWL_UCODE_SERIAL(priv->ucode_ver));
4825
a0987a8d
RC
4826 IWL_DEBUG_INFO("f/w package hdr ucode version raw = 0x%x\n",
4827 priv->ucode_ver);
bc434dd2
IS
4828 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
4829 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", data_size);
4830 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", init_size);
4831 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", init_data_size);
4832 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", boot_size);
b481de9c 4833
a0987a8d 4834
b481de9c
ZY
4835 /* Verify size of file vs. image size info in file's header */
4836 if (ucode_raw->size < sizeof(*ucode) +
4837 inst_size + data_size + init_size +
4838 init_data_size + boot_size) {
4839
4840 IWL_DEBUG_INFO("uCode file size %d too small\n",
4841 (int)ucode_raw->size);
90e759d1 4842 ret = -EINVAL;
b481de9c
ZY
4843 goto err_release;
4844 }
4845
4846 /* Verify that uCode images will fit in card's SRAM */
250bdd21 4847 if (inst_size > IWL39_MAX_INST_SIZE) {
90e759d1
TW
4848 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
4849 inst_size);
4850 ret = -EINVAL;
b481de9c
ZY
4851 goto err_release;
4852 }
4853
250bdd21 4854 if (data_size > IWL39_MAX_DATA_SIZE) {
90e759d1
TW
4855 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
4856 data_size);
4857 ret = -EINVAL;
b481de9c
ZY
4858 goto err_release;
4859 }
250bdd21 4860 if (init_size > IWL39_MAX_INST_SIZE) {
90e759d1
TW
4861 IWL_DEBUG_INFO("uCode init instr len %d too large to fit in\n",
4862 init_size);
4863 ret = -EINVAL;
b481de9c
ZY
4864 goto err_release;
4865 }
250bdd21 4866 if (init_data_size > IWL39_MAX_DATA_SIZE) {
90e759d1
TW
4867 IWL_DEBUG_INFO("uCode init data len %d too large to fit in\n",
4868 init_data_size);
4869 ret = -EINVAL;
b481de9c
ZY
4870 goto err_release;
4871 }
250bdd21 4872 if (boot_size > IWL39_MAX_BSM_SIZE) {
90e759d1
TW
4873 IWL_DEBUG_INFO("uCode boot instr len %d too large to fit in\n",
4874 boot_size);
4875 ret = -EINVAL;
b481de9c
ZY
4876 goto err_release;
4877 }
4878
4879 /* Allocate ucode buffers for card's bus-master loading ... */
4880
4881 /* Runtime instructions and 2 copies of data:
4882 * 1) unmodified from disk
4883 * 2) backup cache for save/restore during power-downs */
4884 priv->ucode_code.len = inst_size;
98c92211 4885 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
b481de9c
ZY
4886
4887 priv->ucode_data.len = data_size;
98c92211 4888 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
b481de9c
ZY
4889
4890 priv->ucode_data_backup.len = data_size;
98c92211 4891 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
b481de9c 4892
90e759d1
TW
4893 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
4894 !priv->ucode_data_backup.v_addr)
4895 goto err_pci_alloc;
b481de9c
ZY
4896
4897 /* Initialization instructions and data */
90e759d1
TW
4898 if (init_size && init_data_size) {
4899 priv->ucode_init.len = init_size;
98c92211 4900 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
90e759d1
TW
4901
4902 priv->ucode_init_data.len = init_data_size;
98c92211 4903 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
90e759d1
TW
4904
4905 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
4906 goto err_pci_alloc;
4907 }
b481de9c
ZY
4908
4909 /* Bootstrap (instructions only, no data) */
90e759d1
TW
4910 if (boot_size) {
4911 priv->ucode_boot.len = boot_size;
98c92211 4912 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
b481de9c 4913
90e759d1
TW
4914 if (!priv->ucode_boot.v_addr)
4915 goto err_pci_alloc;
4916 }
b481de9c
ZY
4917
4918 /* Copy images into buffers for card's bus-master reads ... */
4919
4920 /* Runtime instructions (first block of data in file) */
4921 src = &ucode->data[0];
4922 len = priv->ucode_code.len;
90e759d1 4923 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
b481de9c
ZY
4924 memcpy(priv->ucode_code.v_addr, src, len);
4925 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4926 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
4927
4928 /* Runtime data (2nd block)
bb8c093b 4929 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
b481de9c
ZY
4930 src = &ucode->data[inst_size];
4931 len = priv->ucode_data.len;
90e759d1 4932 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
b481de9c
ZY
4933 memcpy(priv->ucode_data.v_addr, src, len);
4934 memcpy(priv->ucode_data_backup.v_addr, src, len);
4935
4936 /* Initialization instructions (3rd block) */
4937 if (init_size) {
4938 src = &ucode->data[inst_size + data_size];
4939 len = priv->ucode_init.len;
90e759d1
TW
4940 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
4941 len);
b481de9c
ZY
4942 memcpy(priv->ucode_init.v_addr, src, len);
4943 }
4944
4945 /* Initialization data (4th block) */
4946 if (init_data_size) {
4947 src = &ucode->data[inst_size + data_size + init_size];
4948 len = priv->ucode_init_data.len;
4949 IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n",
4950 (int)len);
4951 memcpy(priv->ucode_init_data.v_addr, src, len);
4952 }
4953
4954 /* Bootstrap instructions (5th block) */
4955 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
4956 len = priv->ucode_boot.len;
4957 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n",
4958 (int)len);
4959 memcpy(priv->ucode_boot.v_addr, src, len);
4960
4961 /* We have our copies now, allow OS release its copies */
4962 release_firmware(ucode_raw);
4963 return 0;
4964
4965 err_pci_alloc:
15b1687c 4966 IWL_ERR(priv, "failed to allocate pci memory\n");
90e759d1 4967 ret = -ENOMEM;
bb8c093b 4968 iwl3945_dealloc_ucode_pci(priv);
b481de9c
ZY
4969
4970 err_release:
4971 release_firmware(ucode_raw);
4972
4973 error:
90e759d1 4974 return ret;
b481de9c
ZY
4975}
4976
4977
4978/**
bb8c093b 4979 * iwl3945_set_ucode_ptrs - Set uCode address location
b481de9c
ZY
4980 *
4981 * Tell initialization uCode where to find runtime uCode.
4982 *
4983 * BSM registers initially contain pointers to initialization uCode.
4984 * We need to replace them to load runtime uCode inst and data,
4985 * and to save runtime data when powering down.
4986 */
4a8a4322 4987static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
b481de9c
ZY
4988{
4989 dma_addr_t pinst;
4990 dma_addr_t pdata;
4991 int rc = 0;
4992 unsigned long flags;
4993
4994 /* bits 31:0 for 3945 */
4995 pinst = priv->ucode_code.p_addr;
4996 pdata = priv->ucode_data_backup.p_addr;
4997
4998 spin_lock_irqsave(&priv->lock, flags);
5d49f498 4999 rc = iwl_grab_nic_access(priv);
b481de9c
ZY
5000 if (rc) {
5001 spin_unlock_irqrestore(&priv->lock, flags);
5002 return rc;
5003 }
5004
5005 /* Tell bootstrap uCode where to find image to load */
5d49f498
AK
5006 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
5007 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5008 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
b481de9c
ZY
5009 priv->ucode_data.len);
5010
a96a27f9 5011 /* Inst byte count must be last to set up, bit 31 signals uCode
b481de9c 5012 * that all new ptr/size info is in place */
5d49f498 5013 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
b481de9c
ZY
5014 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
5015
5d49f498 5016 iwl_release_nic_access(priv);
b481de9c
ZY
5017
5018 spin_unlock_irqrestore(&priv->lock, flags);
5019
5020 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
5021
5022 return rc;
5023}
5024
5025/**
bb8c093b 5026 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
b481de9c
ZY
5027 *
5028 * Called after REPLY_ALIVE notification received from "initialize" uCode.
5029 *
b481de9c 5030 * Tell "initialize" uCode to go ahead and load the runtime uCode.
9fbab516 5031 */
4a8a4322 5032static void iwl3945_init_alive_start(struct iwl_priv *priv)
b481de9c
ZY
5033{
5034 /* Check alive response for "valid" sign from uCode */
5035 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
5036 /* We had an error bringing up the hardware, so take it
5037 * all the way back down so we can try again */
5038 IWL_DEBUG_INFO("Initialize Alive failed.\n");
5039 goto restart;
5040 }
5041
5042 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
5043 * This is a paranoid check, because we would not have gotten the
5044 * "initialize" alive if code weren't properly loaded. */
bb8c093b 5045 if (iwl3945_verify_ucode(priv)) {
b481de9c
ZY
5046 /* Runtime instruction load was bad;
5047 * take it all the way back down so we can try again */
5048 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
5049 goto restart;
5050 }
5051
5052 /* Send pointers to protocol/runtime uCode image ... init code will
5053 * load and launch runtime uCode, which will send us another "Alive"
5054 * notification. */
5055 IWL_DEBUG_INFO("Initialization Alive received.\n");
bb8c093b 5056 if (iwl3945_set_ucode_ptrs(priv)) {
b481de9c
ZY
5057 /* Runtime instruction load won't happen;
5058 * take it all the way back down so we can try again */
5059 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
5060 goto restart;
5061 }
5062 return;
5063
5064 restart:
5065 queue_work(priv->workqueue, &priv->restart);
5066}
5067
5068
9bdf5eca
MA
5069/* temporary */
5070static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw,
5071 struct sk_buff *skb);
5072
b481de9c 5073/**
bb8c093b 5074 * iwl3945_alive_start - called after REPLY_ALIVE notification received
b481de9c 5075 * from protocol/runtime uCode (initialization uCode's
bb8c093b 5076 * Alive gets handled by iwl3945_init_alive_start()).
b481de9c 5077 */
4a8a4322 5078static void iwl3945_alive_start(struct iwl_priv *priv)
b481de9c
ZY
5079{
5080 int rc = 0;
5081 int thermal_spin = 0;
5082 u32 rfkill;
5083
5084 IWL_DEBUG_INFO("Runtime Alive received.\n");
5085
5086 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
5087 /* We had an error bringing up the hardware, so take it
5088 * all the way back down so we can try again */
5089 IWL_DEBUG_INFO("Alive failed.\n");
5090 goto restart;
5091 }
5092
5093 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5094 * This is a paranoid check, because we would not have gotten the
5095 * "runtime" alive if code weren't properly loaded. */
bb8c093b 5096 if (iwl3945_verify_ucode(priv)) {
b481de9c
ZY
5097 /* Runtime instruction load was bad;
5098 * take it all the way back down so we can try again */
5099 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
5100 goto restart;
5101 }
5102
bb8c093b 5103 iwl3945_clear_stations_table(priv);
b481de9c 5104
5d49f498 5105 rc = iwl_grab_nic_access(priv);
b481de9c 5106 if (rc) {
39aadf8c 5107 IWL_WARN(priv, "Can not read RFKILL status from adapter\n");
b481de9c
ZY
5108 return;
5109 }
5110
5d49f498 5111 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
b481de9c 5112 IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill);
5d49f498 5113 iwl_release_nic_access(priv);
b481de9c
ZY
5114
5115 if (rfkill & 0x1) {
5116 clear_bit(STATUS_RF_KILL_HW, &priv->status);
a96a27f9 5117 /* if RFKILL is not on, then wait for thermal
b481de9c 5118 * sensor in adapter to kick in */
bb8c093b 5119 while (iwl3945_hw_get_temperature(priv) == 0) {
b481de9c
ZY
5120 thermal_spin++;
5121 udelay(10);
5122 }
5123
5124 if (thermal_spin)
5125 IWL_DEBUG_INFO("Thermal calibration took %dus\n",
5126 thermal_spin * 10);
5127 } else
5128 set_bit(STATUS_RF_KILL_HW, &priv->status);
5129
9fbab516 5130 /* After the ALIVE response, we can send commands to 3945 uCode */
b481de9c
ZY
5131 set_bit(STATUS_ALIVE, &priv->status);
5132
5133 /* Clear out the uCode error bit if it is set */
5134 clear_bit(STATUS_FW_ERROR, &priv->status);
5135
775a6e27 5136 if (iwl_is_rfkill(priv))
b481de9c
ZY
5137 return;
5138
36d6825b 5139 ieee80211_wake_queues(priv->hw);
b481de9c
ZY
5140
5141 priv->active_rate = priv->rates_mask;
5142 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
5143
bb8c093b 5144 iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
b481de9c 5145
bb8c093b
CH
5146 if (iwl3945_is_associated(priv)) {
5147 struct iwl3945_rxon_cmd *active_rxon =
f2c7e521 5148 (struct iwl3945_rxon_cmd *)(&priv->active39_rxon);
b481de9c 5149
f2c7e521
AK
5150 memcpy(&priv->staging39_rxon, &priv->active39_rxon,
5151 sizeof(priv->staging39_rxon));
b481de9c
ZY
5152 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5153 } else {
5154 /* Initialize our rx_config data */
60294de3 5155 iwl3945_connection_init_rx_config(priv, priv->iw_mode);
f2c7e521 5156 memcpy(priv->staging39_rxon.node_addr, priv->mac_addr, ETH_ALEN);
b481de9c
ZY
5157 }
5158
9fbab516 5159 /* Configure Bluetooth device coexistence support */
bb8c093b 5160 iwl3945_send_bt_config(priv);
b481de9c
ZY
5161
5162 /* Configure the adapter for unassociated operation */
bb8c093b 5163 iwl3945_commit_rxon(priv);
b481de9c 5164
b481de9c
ZY
5165 iwl3945_reg_txpower_periodic(priv);
5166
fe00b5a5
RC
5167 iwl3945_led_register(priv);
5168
b481de9c 5169 IWL_DEBUG_INFO("ALIVE processing complete.\n");
a9f46786 5170 set_bit(STATUS_READY, &priv->status);
5a66926a 5171 wake_up_interruptible(&priv->wait_command_queue);
b481de9c
ZY
5172
5173 if (priv->error_recovering)
bb8c093b 5174 iwl3945_error_recovery(priv);
b481de9c 5175
9bdf5eca
MA
5176 /* reassociate for ADHOC mode */
5177 if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
5178 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
5179 priv->vif);
5180 if (beacon)
5181 iwl3945_mac_beacon_update(priv->hw, beacon);
5182 }
5183
b481de9c
ZY
5184 return;
5185
5186 restart:
5187 queue_work(priv->workqueue, &priv->restart);
5188}
5189
4a8a4322 5190static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
b481de9c 5191
4a8a4322 5192static void __iwl3945_down(struct iwl_priv *priv)
b481de9c
ZY
5193{
5194 unsigned long flags;
5195 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
5196 struct ieee80211_conf *conf = NULL;
5197
5198 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
5199
5200 conf = ieee80211_get_hw_conf(priv->hw);
5201
5202 if (!exit_pending)
5203 set_bit(STATUS_EXIT_PENDING, &priv->status);
5204
ab53d8af 5205 iwl3945_led_unregister(priv);
bb8c093b 5206 iwl3945_clear_stations_table(priv);
b481de9c
ZY
5207
5208 /* Unblock any waiting calls */
5209 wake_up_interruptible_all(&priv->wait_command_queue);
5210
b481de9c
ZY
5211 /* Wipe out the EXIT_PENDING status bit if we are not actually
5212 * exiting the module */
5213 if (!exit_pending)
5214 clear_bit(STATUS_EXIT_PENDING, &priv->status);
5215
5216 /* stop and reset the on-board processor */
5d49f498 5217 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
b481de9c
ZY
5218
5219 /* tell the device to stop sending interrupts */
0359facc 5220 spin_lock_irqsave(&priv->lock, flags);
bb8c093b 5221 iwl3945_disable_interrupts(priv);
0359facc
MA
5222 spin_unlock_irqrestore(&priv->lock, flags);
5223 iwl_synchronize_irq(priv);
b481de9c
ZY
5224
5225 if (priv->mac80211_registered)
5226 ieee80211_stop_queues(priv->hw);
5227
bb8c093b 5228 /* If we have not previously called iwl3945_init() then
b481de9c 5229 * clear all bits but the RF Kill and SUSPEND bits and return */
775a6e27 5230 if (!iwl_is_init(priv)) {
b481de9c
ZY
5231 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
5232 STATUS_RF_KILL_HW |
5233 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
5234 STATUS_RF_KILL_SW |
9788864e
RC
5235 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
5236 STATUS_GEO_CONFIGURED |
b481de9c 5237 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
ebef2008
AK
5238 STATUS_IN_SUSPEND |
5239 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
5240 STATUS_EXIT_PENDING;
b481de9c
ZY
5241 goto exit;
5242 }
5243
5244 /* ...otherwise clear out all the status bits but the RF Kill and
5245 * SUSPEND bits and continue taking the NIC down. */
5246 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
5247 STATUS_RF_KILL_HW |
5248 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
5249 STATUS_RF_KILL_SW |
9788864e
RC
5250 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
5251 STATUS_GEO_CONFIGURED |
b481de9c
ZY
5252 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
5253 STATUS_IN_SUSPEND |
5254 test_bit(STATUS_FW_ERROR, &priv->status) <<
ebef2008
AK
5255 STATUS_FW_ERROR |
5256 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
5257 STATUS_EXIT_PENDING;
b481de9c
ZY
5258
5259 spin_lock_irqsave(&priv->lock, flags);
5d49f498 5260 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
b481de9c
ZY
5261 spin_unlock_irqrestore(&priv->lock, flags);
5262
bb8c093b
CH
5263 iwl3945_hw_txq_ctx_stop(priv);
5264 iwl3945_hw_rxq_stop(priv);
b481de9c
ZY
5265
5266 spin_lock_irqsave(&priv->lock, flags);
5d49f498
AK
5267 if (!iwl_grab_nic_access(priv)) {
5268 iwl_write_prph(priv, APMG_CLK_DIS_REG,
b481de9c 5269 APMG_CLK_VAL_DMA_CLK_RQT);
5d49f498 5270 iwl_release_nic_access(priv);
b481de9c
ZY
5271 }
5272 spin_unlock_irqrestore(&priv->lock, flags);
5273
5274 udelay(5);
5275
01ec616d 5276 priv->cfg->ops->lib->apm_ops.reset(priv);
b481de9c 5277 exit:
3d24a9f7 5278 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
b481de9c
ZY
5279
5280 if (priv->ibss_beacon)
5281 dev_kfree_skb(priv->ibss_beacon);
5282 priv->ibss_beacon = NULL;
5283
5284 /* clear out any free frames */
bb8c093b 5285 iwl3945_clear_free_frames(priv);
b481de9c
ZY
5286}
5287
4a8a4322 5288static void iwl3945_down(struct iwl_priv *priv)
b481de9c
ZY
5289{
5290 mutex_lock(&priv->mutex);
bb8c093b 5291 __iwl3945_down(priv);
b481de9c 5292 mutex_unlock(&priv->mutex);
b24d22b1 5293
bb8c093b 5294 iwl3945_cancel_deferred_work(priv);
b481de9c
ZY
5295}
5296
5297#define MAX_HW_RESTARTS 5
5298
4a8a4322 5299static int __iwl3945_up(struct iwl_priv *priv)
b481de9c
ZY
5300{
5301 int rc, i;
5302
5303 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
39aadf8c 5304 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
b481de9c
ZY
5305 return -EIO;
5306 }
5307
5308 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
39aadf8c 5309 IWL_WARN(priv, "Radio disabled by SW RF kill (module "
b481de9c 5310 "parameter)\n");
e655b9f0
ZY
5311 return -ENODEV;
5312 }
5313
e903fbd4 5314 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
15b1687c 5315 IWL_ERR(priv, "ucode not available for device bring up\n");
e903fbd4
RC
5316 return -EIO;
5317 }
5318
e655b9f0 5319 /* If platform's RF_KILL switch is NOT set to KILL */
5d49f498 5320 if (iwl_read32(priv, CSR_GP_CNTRL) &
e655b9f0
ZY
5321 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5322 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5323 else {
5324 set_bit(STATUS_RF_KILL_HW, &priv->status);
5325 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
39aadf8c 5326 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
e655b9f0
ZY
5327 return -ENODEV;
5328 }
b481de9c 5329 }
80fcc9e2 5330
5d49f498 5331 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
b481de9c 5332
bb8c093b 5333 rc = iwl3945_hw_nic_init(priv);
b481de9c 5334 if (rc) {
15b1687c 5335 IWL_ERR(priv, "Unable to int nic\n");
b481de9c
ZY
5336 return rc;
5337 }
5338
5339 /* make sure rfkill handshake bits are cleared */
5d49f498
AK
5340 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5341 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
b481de9c
ZY
5342 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5343
5344 /* clear (again), then enable host interrupts */
5d49f498 5345 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
bb8c093b 5346 iwl3945_enable_interrupts(priv);
b481de9c
ZY
5347
5348 /* really make sure rfkill handshake bits are cleared */
5d49f498
AK
5349 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5350 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
b481de9c
ZY
5351
5352 /* Copy original ucode data image from disk into backup cache.
5353 * This will be used to initialize the on-board processor's
5354 * data SRAM for a clean start when the runtime program first loads. */
5355 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
5a66926a 5356 priv->ucode_data.len);
b481de9c 5357
e655b9f0
ZY
5358 /* We return success when we resume from suspend and rf_kill is on. */
5359 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
5360 return 0;
5361
b481de9c
ZY
5362 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5363
bb8c093b 5364 iwl3945_clear_stations_table(priv);
b481de9c
ZY
5365
5366 /* load bootstrap state machine,
5367 * load bootstrap program into processor's memory,
5368 * prepare to load the "initialize" uCode */
0164b9b4 5369 priv->cfg->ops->lib->load_ucode(priv);
b481de9c
ZY
5370
5371 if (rc) {
15b1687c
WT
5372 IWL_ERR(priv,
5373 "Unable to set up bootstrap uCode: %d\n", rc);
b481de9c
ZY
5374 continue;
5375 }
5376
5377 /* start card; "initialize" will load runtime ucode */
bb8c093b 5378 iwl3945_nic_start(priv);
b481de9c 5379
b481de9c
ZY
5380 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
5381
5382 return 0;
5383 }
5384
5385 set_bit(STATUS_EXIT_PENDING, &priv->status);
bb8c093b 5386 __iwl3945_down(priv);
ebef2008 5387 clear_bit(STATUS_EXIT_PENDING, &priv->status);
b481de9c
ZY
5388
5389 /* tried to restart and config the device for as long as our
5390 * patience could withstand */
15b1687c 5391 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
b481de9c
ZY
5392 return -EIO;
5393}
5394
5395
5396/*****************************************************************************
5397 *
5398 * Workqueue callbacks
5399 *
5400 *****************************************************************************/
5401
bb8c093b 5402static void iwl3945_bg_init_alive_start(struct work_struct *data)
b481de9c 5403{
4a8a4322
AK
5404 struct iwl_priv *priv =
5405 container_of(data, struct iwl_priv, init_alive_start.work);
b481de9c
ZY
5406
5407 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5408 return;
5409
5410 mutex_lock(&priv->mutex);
bb8c093b 5411 iwl3945_init_alive_start(priv);
b481de9c
ZY
5412 mutex_unlock(&priv->mutex);
5413}
5414
bb8c093b 5415static void iwl3945_bg_alive_start(struct work_struct *data)
b481de9c 5416{
4a8a4322
AK
5417 struct iwl_priv *priv =
5418 container_of(data, struct iwl_priv, alive_start.work);
b481de9c
ZY
5419
5420 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5421 return;
5422
5423 mutex_lock(&priv->mutex);
bb8c093b 5424 iwl3945_alive_start(priv);
b481de9c
ZY
5425 mutex_unlock(&priv->mutex);
5426}
5427
bb8c093b 5428static void iwl3945_bg_rf_kill(struct work_struct *work)
b481de9c 5429{
4a8a4322 5430 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
b481de9c
ZY
5431
5432 wake_up_interruptible(&priv->wait_command_queue);
5433
5434 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5435 return;
5436
5437 mutex_lock(&priv->mutex);
5438
775a6e27 5439 if (!iwl_is_rfkill(priv)) {
b481de9c
ZY
5440 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
5441 "HW and/or SW RF Kill no longer active, restarting "
5442 "device\n");
2663516d
HS
5443 if (!test_bit(STATUS_EXIT_PENDING, &priv->status) &&
5444 test_bit(STATUS_ALIVE, &priv->status))
b481de9c
ZY
5445 queue_work(priv->workqueue, &priv->restart);
5446 } else {
5447
5448 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
5449 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
5450 "disabled by SW switch\n");
5451 else
39aadf8c 5452 IWL_WARN(priv, "Radio Frequency Kill Switch is On:\n"
b481de9c
ZY
5453 "Kill switch must be turned off for "
5454 "wireless networking to work.\n");
5455 }
ebef2008 5456
b481de9c 5457 mutex_unlock(&priv->mutex);
80fcc9e2 5458 iwl3945_rfkill_set_hw_state(priv);
b481de9c
ZY
5459}
5460
2663516d
HS
5461static void iwl3945_rfkill_poll(struct work_struct *data)
5462{
5463 struct iwl_priv *priv =
5464 container_of(data, struct iwl_priv, rfkill_poll.work);
5465 unsigned long status = priv->status;
5466
5467 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5468 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5469 else
5470 set_bit(STATUS_RF_KILL_HW, &priv->status);
5471
5472 if (test_bit(STATUS_RF_KILL_HW, &status) != test_bit(STATUS_RF_KILL_HW, &priv->status))
5473 queue_work(priv->workqueue, &priv->rf_kill);
5474
5475 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
5476 round_jiffies_relative(2 * HZ));
5477
5478}
5479
b481de9c
ZY
5480#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
5481
bb8c093b 5482static void iwl3945_bg_scan_check(struct work_struct *data)
b481de9c 5483{
4a8a4322
AK
5484 struct iwl_priv *priv =
5485 container_of(data, struct iwl_priv, scan_check.work);
b481de9c
ZY
5486
5487 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5488 return;
5489
5490 mutex_lock(&priv->mutex);
5491 if (test_bit(STATUS_SCANNING, &priv->status) ||
5492 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
5493 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
5494 "Scan completion watchdog resetting adapter (%dms)\n",
5495 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
15e869d8 5496
b481de9c 5497 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
bb8c093b 5498 iwl3945_send_scan_abort(priv);
b481de9c
ZY
5499 }
5500 mutex_unlock(&priv->mutex);
5501}
5502
bb8c093b 5503static void iwl3945_bg_request_scan(struct work_struct *data)
b481de9c 5504{
4a8a4322
AK
5505 struct iwl_priv *priv =
5506 container_of(data, struct iwl_priv, request_scan);
c2d79b48 5507 struct iwl_host_cmd cmd = {
b481de9c 5508 .id = REPLY_SCAN_CMD,
bb8c093b 5509 .len = sizeof(struct iwl3945_scan_cmd),
b481de9c
ZY
5510 .meta.flags = CMD_SIZE_HUGE,
5511 };
5512 int rc = 0;
bb8c093b 5513 struct iwl3945_scan_cmd *scan;
b481de9c 5514 struct ieee80211_conf *conf = NULL;
f9340520 5515 u8 n_probes = 2;
8318d78a 5516 enum ieee80211_band band;
9387b7ca 5517 DECLARE_SSID_BUF(ssid);
b481de9c
ZY
5518
5519 conf = ieee80211_get_hw_conf(priv->hw);
5520
5521 mutex_lock(&priv->mutex);
5522
775a6e27 5523 if (!iwl_is_ready(priv)) {
39aadf8c 5524 IWL_WARN(priv, "request scan called when driver not ready.\n");
b481de9c
ZY
5525 goto done;
5526 }
5527
a96a27f9 5528 /* Make sure the scan wasn't canceled before this queued work
b481de9c
ZY
5529 * was given the chance to run... */
5530 if (!test_bit(STATUS_SCANNING, &priv->status))
5531 goto done;
5532
5533 /* This should never be called or scheduled if there is currently
5534 * a scan active in the hardware. */
5535 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
5536 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
5537 "Ignoring second request.\n");
5538 rc = -EIO;
5539 goto done;
5540 }
5541
5542 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5543 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
5544 goto done;
5545 }
5546
5547 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
5548 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
5549 goto done;
5550 }
5551
775a6e27 5552 if (iwl_is_rfkill(priv)) {
b481de9c
ZY
5553 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
5554 goto done;
5555 }
5556
5557 if (!test_bit(STATUS_READY, &priv->status)) {
5558 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
5559 goto done;
5560 }
5561
5562 if (!priv->scan_bands) {
5563 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
5564 goto done;
5565 }
5566
f2c7e521
AK
5567 if (!priv->scan39) {
5568 priv->scan39 = kmalloc(sizeof(struct iwl3945_scan_cmd) +
b481de9c 5569 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
f2c7e521 5570 if (!priv->scan39) {
b481de9c
ZY
5571 rc = -ENOMEM;
5572 goto done;
5573 }
5574 }
f2c7e521 5575 scan = priv->scan39;
bb8c093b 5576 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
b481de9c
ZY
5577
5578 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
5579 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
5580
bb8c093b 5581 if (iwl3945_is_associated(priv)) {
b481de9c
ZY
5582 u16 interval = 0;
5583 u32 extra;
5584 u32 suspend_time = 100;
5585 u32 scan_suspend_time = 100;
5586 unsigned long flags;
5587
5588 IWL_DEBUG_INFO("Scanning while associated...\n");
5589
5590 spin_lock_irqsave(&priv->lock, flags);
5591 interval = priv->beacon_int;
5592 spin_unlock_irqrestore(&priv->lock, flags);
5593
5594 scan->suspend_time = 0;
15e869d8 5595 scan->max_out_time = cpu_to_le32(200 * 1024);
b481de9c
ZY
5596 if (!interval)
5597 interval = suspend_time;
5598 /*
5599 * suspend time format:
5600 * 0-19: beacon interval in usec (time before exec.)
5601 * 20-23: 0
5602 * 24-31: number of beacons (suspend between channels)
5603 */
5604
5605 extra = (suspend_time / interval) << 24;
5606 scan_suspend_time = 0xFF0FFFFF &
5607 (extra | ((suspend_time % interval) * 1024));
5608
5609 scan->suspend_time = cpu_to_le32(scan_suspend_time);
5610 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
5611 scan_suspend_time, interval);
5612 }
5613
5614 /* We should add the ability for user to lock to PASSIVE ONLY */
5615 if (priv->one_direct_scan) {
5616 IWL_DEBUG_SCAN
5617 ("Kicking off one direct scan for '%s'\n",
9387b7ca
JL
5618 print_ssid(ssid, priv->direct_ssid,
5619 priv->direct_ssid_len));
b481de9c
ZY
5620 scan->direct_scan[0].id = WLAN_EID_SSID;
5621 scan->direct_scan[0].len = priv->direct_ssid_len;
5622 memcpy(scan->direct_scan[0].ssid,
5623 priv->direct_ssid, priv->direct_ssid_len);
f9340520 5624 n_probes++;
f9340520 5625 } else
786b4557 5626 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
b481de9c
ZY
5627
5628 /* We don't build a direct scan probe request; the uCode will do
5629 * that based on the direct_mask added to each channel entry */
5630 scan->tx_cmd.len = cpu_to_le16(
bb8c093b 5631 iwl3945_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
430cfe95 5632 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
b481de9c 5633 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
3832ec9d 5634 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
b481de9c
ZY
5635 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
5636
5637 /* flags + rate selection */
5638
66b5004d 5639 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
b481de9c
ZY
5640 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
5641 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
5642 scan->good_CRC_th = 0;
8318d78a 5643 band = IEEE80211_BAND_2GHZ;
66b5004d 5644 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
b481de9c
ZY
5645 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
5646 scan->good_CRC_th = IWL_GOOD_CRC_TH;
8318d78a 5647 band = IEEE80211_BAND_5GHZ;
66b5004d 5648 } else {
39aadf8c 5649 IWL_WARN(priv, "Invalid scan band count\n");
b481de9c
ZY
5650 goto done;
5651 }
5652
5653 /* select Rx antennas */
5654 scan->flags |= iwl3945_get_antenna_flags(priv);
5655
05c914fe 5656 if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
b481de9c
ZY
5657 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
5658
f9340520
AK
5659 scan->channel_count =
5660 iwl3945_get_channels_for_scan(priv, band, 1, /* active */
5661 n_probes,
5662 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
b481de9c 5663
14b54336
RC
5664 if (scan->channel_count == 0) {
5665 IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
5666 goto done;
5667 }
5668
b481de9c 5669 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
bb8c093b 5670 scan->channel_count * sizeof(struct iwl3945_scan_channel);
b481de9c
ZY
5671 cmd.data = scan;
5672 scan->len = cpu_to_le16(cmd.len);
5673
5674 set_bit(STATUS_SCAN_HW, &priv->status);
bb8c093b 5675 rc = iwl3945_send_cmd_sync(priv, &cmd);
b481de9c
ZY
5676 if (rc)
5677 goto done;
5678
5679 queue_delayed_work(priv->workqueue, &priv->scan_check,
5680 IWL_SCAN_CHECK_WATCHDOG);
5681
5682 mutex_unlock(&priv->mutex);
5683 return;
5684
5685 done:
2420ebc1
MA
5686 /* can not perform scan make sure we clear scanning
5687 * bits from status so next scan request can be performed.
5688 * if we dont clear scanning status bit here all next scan
5689 * will fail
5690 */
5691 clear_bit(STATUS_SCAN_HW, &priv->status);
5692 clear_bit(STATUS_SCANNING, &priv->status);
5693
01ebd063 5694 /* inform mac80211 scan aborted */
b481de9c
ZY
5695 queue_work(priv->workqueue, &priv->scan_completed);
5696 mutex_unlock(&priv->mutex);
5697}
5698
bb8c093b 5699static void iwl3945_bg_up(struct work_struct *data)
b481de9c 5700{
4a8a4322 5701 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
b481de9c
ZY
5702
5703 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5704 return;
5705
5706 mutex_lock(&priv->mutex);
bb8c093b 5707 __iwl3945_up(priv);
b481de9c 5708 mutex_unlock(&priv->mutex);
80fcc9e2 5709 iwl3945_rfkill_set_hw_state(priv);
b481de9c
ZY
5710}
5711
bb8c093b 5712static void iwl3945_bg_restart(struct work_struct *data)
b481de9c 5713{
4a8a4322 5714 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
b481de9c
ZY
5715
5716 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5717 return;
5718
bb8c093b 5719 iwl3945_down(priv);
b481de9c
ZY
5720 queue_work(priv->workqueue, &priv->up);
5721}
5722
bb8c093b 5723static void iwl3945_bg_rx_replenish(struct work_struct *data)
b481de9c 5724{
4a8a4322
AK
5725 struct iwl_priv *priv =
5726 container_of(data, struct iwl_priv, rx_replenish);
b481de9c
ZY
5727
5728 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5729 return;
5730
5731 mutex_lock(&priv->mutex);
bb8c093b 5732 iwl3945_rx_replenish(priv);
b481de9c
ZY
5733 mutex_unlock(&priv->mutex);
5734}
5735
7878a5a4
MA
5736#define IWL_DELAY_NEXT_SCAN (HZ*2)
5737
4a8a4322 5738static void iwl3945_post_associate(struct iwl_priv *priv)
b481de9c 5739{
b481de9c
ZY
5740 int rc = 0;
5741 struct ieee80211_conf *conf = NULL;
5742
05c914fe 5743 if (priv->iw_mode == NL80211_IFTYPE_AP) {
15b1687c 5744 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
b481de9c
ZY
5745 return;
5746 }
5747
5748
e174961c 5749 IWL_DEBUG_ASSOC("Associated as %d to: %pM\n",
f2c7e521 5750 priv->assoc_id, priv->active39_rxon.bssid_addr);
b481de9c
ZY
5751
5752 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5753 return;
5754
322a9811 5755 if (!priv->vif || !priv->is_open)
6ef89d0a 5756 return;
322a9811 5757
af0053d6 5758 iwl_scan_cancel_timeout(priv, 200);
15e869d8 5759
b481de9c
ZY
5760 conf = ieee80211_get_hw_conf(priv->hw);
5761
f2c7e521 5762 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 5763 iwl3945_commit_rxon(priv);
b481de9c 5764
28afaf91 5765 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
bb8c093b
CH
5766 iwl3945_setup_rxon_timing(priv);
5767 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
5768 sizeof(priv->rxon_timing), &priv->rxon_timing);
5769 if (rc)
39aadf8c 5770 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
b481de9c
ZY
5771 "Attempting to continue.\n");
5772
f2c7e521 5773 priv->staging39_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
b481de9c 5774
f2c7e521 5775 priv->staging39_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
b481de9c
ZY
5776
5777 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
5778 priv->assoc_id, priv->beacon_int);
5779
5780 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
f2c7e521 5781 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c 5782 else
f2c7e521 5783 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
b481de9c 5784
f2c7e521 5785 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) {
b481de9c 5786 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
f2c7e521 5787 priv->staging39_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
b481de9c 5788 else
f2c7e521 5789 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c 5790
05c914fe 5791 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
f2c7e521 5792 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
b481de9c
ZY
5793
5794 }
5795
bb8c093b 5796 iwl3945_commit_rxon(priv);
b481de9c
ZY
5797
5798 switch (priv->iw_mode) {
05c914fe 5799 case NL80211_IFTYPE_STATION:
bb8c093b 5800 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
b481de9c
ZY
5801 break;
5802
05c914fe 5803 case NL80211_IFTYPE_ADHOC:
b481de9c 5804
ce546fd2 5805 priv->assoc_id = 1;
bb8c093b 5806 iwl3945_add_station(priv, priv->bssid, 0, 0);
b481de9c 5807 iwl3945_sync_sta(priv, IWL_STA_ID,
8318d78a 5808 (priv->band == IEEE80211_BAND_5GHZ) ?
b481de9c
ZY
5809 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
5810 CMD_ASYNC);
bb8c093b
CH
5811 iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
5812 iwl3945_send_beacon_cmd(priv);
b481de9c
ZY
5813
5814 break;
5815
5816 default:
15b1687c 5817 IWL_ERR(priv, "%s Should not be called in %d mode\n",
3ac7f146 5818 __func__, priv->iw_mode);
b481de9c
ZY
5819 break;
5820 }
5821
bb8c093b 5822 iwl3945_activate_qos(priv, 0);
292ae174 5823
7878a5a4
MA
5824 /* we have just associated, don't start scan too early */
5825 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
cd56d331
AK
5826}
5827
bb8c093b 5828static void iwl3945_bg_abort_scan(struct work_struct *work)
b481de9c 5829{
4a8a4322 5830 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
b481de9c 5831
775a6e27 5832 if (!iwl_is_ready(priv))
b481de9c
ZY
5833 return;
5834
5835 mutex_lock(&priv->mutex);
5836
5837 set_bit(STATUS_SCAN_ABORTING, &priv->status);
bb8c093b 5838 iwl3945_send_scan_abort(priv);
b481de9c
ZY
5839
5840 mutex_unlock(&priv->mutex);
5841}
5842
e8975581 5843static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed);
76bb77e0 5844
bb8c093b 5845static void iwl3945_bg_scan_completed(struct work_struct *work)
b481de9c 5846{
4a8a4322
AK
5847 struct iwl_priv *priv =
5848 container_of(work, struct iwl_priv, scan_completed);
b481de9c
ZY
5849
5850 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
5851
5852 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5853 return;
5854
a0646470 5855 if (test_bit(STATUS_CONF_PENDING, &priv->status))
e8975581 5856 iwl3945_mac_config(priv->hw, 0);
76bb77e0 5857
b481de9c
ZY
5858 ieee80211_scan_completed(priv->hw);
5859
5860 /* Since setting the TXPOWER may have been deferred while
5861 * performing the scan, fire one off */
5862 mutex_lock(&priv->mutex);
bb8c093b 5863 iwl3945_hw_reg_send_txpower(priv);
b481de9c
ZY
5864 mutex_unlock(&priv->mutex);
5865}
5866
5867/*****************************************************************************
5868 *
5869 * mac80211 entry point functions
5870 *
5871 *****************************************************************************/
5872
5a66926a
ZY
5873#define UCODE_READY_TIMEOUT (2 * HZ)
5874
bb8c093b 5875static int iwl3945_mac_start(struct ieee80211_hw *hw)
b481de9c 5876{
4a8a4322 5877 struct iwl_priv *priv = hw->priv;
5a66926a 5878 int ret;
b481de9c
ZY
5879
5880 IWL_DEBUG_MAC80211("enter\n");
5881
5882 /* we should be verifying the device is ready to be opened */
5883 mutex_lock(&priv->mutex);
5884
f2c7e521 5885 memset(&priv->staging39_rxon, 0, sizeof(struct iwl3945_rxon_cmd));
5a66926a
ZY
5886 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
5887 * ucode filename and max sizes are card-specific. */
5888
5889 if (!priv->ucode_code.len) {
5890 ret = iwl3945_read_ucode(priv);
5891 if (ret) {
15b1687c 5892 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
5a66926a
ZY
5893 mutex_unlock(&priv->mutex);
5894 goto out_release_irq;
5895 }
5896 }
b481de9c 5897
e655b9f0 5898 ret = __iwl3945_up(priv);
b481de9c
ZY
5899
5900 mutex_unlock(&priv->mutex);
5a66926a 5901
80fcc9e2
AG
5902 iwl3945_rfkill_set_hw_state(priv);
5903
e655b9f0
ZY
5904 if (ret)
5905 goto out_release_irq;
5906
5907 IWL_DEBUG_INFO("Start UP work.\n");
5908
5909 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
5910 return 0;
5911
5a66926a
ZY
5912 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
5913 * mac80211 will not be run successfully. */
5914 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
5915 test_bit(STATUS_READY, &priv->status),
5916 UCODE_READY_TIMEOUT);
5917 if (!ret) {
5918 if (!test_bit(STATUS_READY, &priv->status)) {
15b1687c
WT
5919 IWL_ERR(priv,
5920 "Wait for START_ALIVE timeout after %dms.\n",
5921 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5a66926a
ZY
5922 ret = -ETIMEDOUT;
5923 goto out_release_irq;
5924 }
5925 }
5926
2663516d
HS
5927 /* ucode is running and will send rfkill notifications,
5928 * no need to poll the killswitch state anymore */
5929 cancel_delayed_work(&priv->rfkill_poll);
5930
e655b9f0 5931 priv->is_open = 1;
b481de9c
ZY
5932 IWL_DEBUG_MAC80211("leave\n");
5933 return 0;
5a66926a
ZY
5934
5935out_release_irq:
e655b9f0
ZY
5936 priv->is_open = 0;
5937 IWL_DEBUG_MAC80211("leave - failed\n");
5a66926a 5938 return ret;
b481de9c
ZY
5939}
5940
bb8c093b 5941static void iwl3945_mac_stop(struct ieee80211_hw *hw)
b481de9c 5942{
4a8a4322 5943 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5944
5945 IWL_DEBUG_MAC80211("enter\n");
6ef89d0a 5946
e655b9f0
ZY
5947 if (!priv->is_open) {
5948 IWL_DEBUG_MAC80211("leave - skip\n");
5949 return;
5950 }
5951
b481de9c 5952 priv->is_open = 0;
5a66926a 5953
775a6e27 5954 if (iwl_is_ready_rf(priv)) {
e655b9f0
ZY
5955 /* stop mac, cancel any scan request and clear
5956 * RXON_FILTER_ASSOC_MSK BIT
5957 */
5a66926a 5958 mutex_lock(&priv->mutex);
af0053d6 5959 iwl_scan_cancel_timeout(priv, 100);
fde3571f 5960 mutex_unlock(&priv->mutex);
fde3571f
MA
5961 }
5962
5a66926a
ZY
5963 iwl3945_down(priv);
5964
5965 flush_workqueue(priv->workqueue);
2663516d
HS
5966
5967 /* start polling the killswitch state again */
5968 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
5969 round_jiffies_relative(2 * HZ));
6ef89d0a 5970
b481de9c 5971 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
5972}
5973
e039fa4a 5974static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
b481de9c 5975{
4a8a4322 5976 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5977
5978 IWL_DEBUG_MAC80211("enter\n");
5979
b481de9c 5980 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
e039fa4a 5981 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
b481de9c 5982
e039fa4a 5983 if (iwl3945_tx_skb(priv, skb))
b481de9c
ZY
5984 dev_kfree_skb_any(skb);
5985
5986 IWL_DEBUG_MAC80211("leave\n");
637f8837 5987 return NETDEV_TX_OK;
b481de9c
ZY
5988}
5989
bb8c093b 5990static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
b481de9c
ZY
5991 struct ieee80211_if_init_conf *conf)
5992{
4a8a4322 5993 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
5994 unsigned long flags;
5995
32bfd35d 5996 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
b481de9c 5997
32bfd35d
JB
5998 if (priv->vif) {
5999 IWL_DEBUG_MAC80211("leave - vif != NULL\n");
864792e3 6000 return -EOPNOTSUPP;
b481de9c
ZY
6001 }
6002
6003 spin_lock_irqsave(&priv->lock, flags);
32bfd35d 6004 priv->vif = conf->vif;
60294de3 6005 priv->iw_mode = conf->type;
b481de9c
ZY
6006
6007 spin_unlock_irqrestore(&priv->lock, flags);
6008
6009 mutex_lock(&priv->mutex);
864792e3
TW
6010
6011 if (conf->mac_addr) {
e174961c 6012 IWL_DEBUG_MAC80211("Set: %pM\n", conf->mac_addr);
864792e3
TW
6013 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
6014 }
6015
775a6e27 6016 if (iwl_is_ready(priv))
5a66926a 6017 iwl3945_set_mode(priv, conf->type);
b481de9c 6018
b481de9c
ZY
6019 mutex_unlock(&priv->mutex);
6020
5a66926a 6021 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
6022 return 0;
6023}
6024
6025/**
bb8c093b 6026 * iwl3945_mac_config - mac80211 config callback
b481de9c
ZY
6027 *
6028 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
6029 * be set inappropriately and the driver currently sets the hardware up to
6030 * use it whenever needed.
6031 */
e8975581 6032static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
b481de9c 6033{
4a8a4322 6034 struct iwl_priv *priv = hw->priv;
d20b3c65 6035 const struct iwl_channel_info *ch_info;
e8975581 6036 struct ieee80211_conf *conf = &hw->conf;
b481de9c 6037 unsigned long flags;
76bb77e0 6038 int ret = 0;
b481de9c
ZY
6039
6040 mutex_lock(&priv->mutex);
8318d78a 6041 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
b481de9c 6042
775a6e27 6043 if (!iwl_is_ready(priv)) {
b481de9c 6044 IWL_DEBUG_MAC80211("leave - not ready\n");
76bb77e0
ZY
6045 ret = -EIO;
6046 goto out;
b481de9c
ZY
6047 }
6048
df878d8f 6049 if (unlikely(!iwl3945_mod_params.disable_hw_scan &&
b481de9c 6050 test_bit(STATUS_SCANNING, &priv->status))) {
a0646470
ZY
6051 IWL_DEBUG_MAC80211("leave - scanning\n");
6052 set_bit(STATUS_CONF_PENDING, &priv->status);
b481de9c 6053 mutex_unlock(&priv->mutex);
a0646470 6054 return 0;
b481de9c
ZY
6055 }
6056
6057 spin_lock_irqsave(&priv->lock, flags);
6058
8318d78a
JB
6059 ch_info = iwl3945_get_channel_info(priv, conf->channel->band,
6060 conf->channel->hw_value);
b481de9c 6061 if (!is_channel_valid(ch_info)) {
66b5004d 6062 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this band.\n",
8318d78a 6063 conf->channel->hw_value, conf->channel->band);
b481de9c
ZY
6064 IWL_DEBUG_MAC80211("leave - invalid channel\n");
6065 spin_unlock_irqrestore(&priv->lock, flags);
76bb77e0
ZY
6066 ret = -EINVAL;
6067 goto out;
b481de9c
ZY
6068 }
6069
8318d78a 6070 iwl3945_set_rxon_channel(priv, conf->channel->band, conf->channel->hw_value);
b481de9c 6071
8318d78a 6072 iwl3945_set_flags_for_phymode(priv, conf->channel->band);
b481de9c
ZY
6073
6074 /* The list of supported rates and rate mask can be different
6075 * for each phymode; since the phymode may have changed, reset
6076 * the rate mask to what mac80211 lists */
bb8c093b 6077 iwl3945_set_rate(priv);
b481de9c
ZY
6078
6079 spin_unlock_irqrestore(&priv->lock, flags);
6080
6081#ifdef IEEE80211_CONF_CHANNEL_SWITCH
6082 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
bb8c093b 6083 iwl3945_hw_channel_switch(priv, conf->channel);
76bb77e0 6084 goto out;
b481de9c
ZY
6085 }
6086#endif
6087
bb8c093b 6088 iwl3945_radio_kill_sw(priv, !conf->radio_enabled);
b481de9c
ZY
6089
6090 if (!conf->radio_enabled) {
6091 IWL_DEBUG_MAC80211("leave - radio disabled\n");
76bb77e0 6092 goto out;
b481de9c
ZY
6093 }
6094
775a6e27 6095 if (iwl_is_rfkill(priv)) {
b481de9c 6096 IWL_DEBUG_MAC80211("leave - RF kill\n");
76bb77e0
ZY
6097 ret = -EIO;
6098 goto out;
b481de9c
ZY
6099 }
6100
bb8c093b 6101 iwl3945_set_rate(priv);
b481de9c 6102
f2c7e521
AK
6103 if (memcmp(&priv->active39_rxon,
6104 &priv->staging39_rxon, sizeof(priv->staging39_rxon)))
bb8c093b 6105 iwl3945_commit_rxon(priv);
b481de9c
ZY
6106 else
6107 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
6108
6109 IWL_DEBUG_MAC80211("leave\n");
6110
76bb77e0 6111out:
a0646470 6112 clear_bit(STATUS_CONF_PENDING, &priv->status);
b481de9c 6113 mutex_unlock(&priv->mutex);
76bb77e0 6114 return ret;
b481de9c
ZY
6115}
6116
4a8a4322 6117static void iwl3945_config_ap(struct iwl_priv *priv)
b481de9c
ZY
6118{
6119 int rc = 0;
6120
d986bcd1 6121 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
b481de9c
ZY
6122 return;
6123
6124 /* The following should be done only at AP bring up */
5d1e2325 6125 if (!(iwl3945_is_associated(priv))) {
b481de9c
ZY
6126
6127 /* RXON - unassoc (to set timing command) */
f2c7e521 6128 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 6129 iwl3945_commit_rxon(priv);
b481de9c
ZY
6130
6131 /* RXON Timing */
28afaf91 6132 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
bb8c093b
CH
6133 iwl3945_setup_rxon_timing(priv);
6134 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON_TIMING,
b481de9c
ZY
6135 sizeof(priv->rxon_timing), &priv->rxon_timing);
6136 if (rc)
39aadf8c 6137 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
b481de9c
ZY
6138 "Attempting to continue.\n");
6139
6140 /* FIXME: what should be the assoc_id for AP? */
f2c7e521 6141 priv->staging39_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
b481de9c 6142 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
f2c7e521 6143 priv->staging39_rxon.flags |=
b481de9c
ZY
6144 RXON_FLG_SHORT_PREAMBLE_MSK;
6145 else
f2c7e521 6146 priv->staging39_rxon.flags &=
b481de9c
ZY
6147 ~RXON_FLG_SHORT_PREAMBLE_MSK;
6148
f2c7e521 6149 if (priv->staging39_rxon.flags & RXON_FLG_BAND_24G_MSK) {
b481de9c
ZY
6150 if (priv->assoc_capability &
6151 WLAN_CAPABILITY_SHORT_SLOT_TIME)
f2c7e521 6152 priv->staging39_rxon.flags |=
b481de9c
ZY
6153 RXON_FLG_SHORT_SLOT_MSK;
6154 else
f2c7e521 6155 priv->staging39_rxon.flags &=
b481de9c
ZY
6156 ~RXON_FLG_SHORT_SLOT_MSK;
6157
05c914fe 6158 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
f2c7e521 6159 priv->staging39_rxon.flags &=
b481de9c
ZY
6160 ~RXON_FLG_SHORT_SLOT_MSK;
6161 }
6162 /* restore RXON assoc */
f2c7e521 6163 priv->staging39_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
bb8c093b 6164 iwl3945_commit_rxon(priv);
b5323d36 6165 iwl3945_add_station(priv, iwl_bcast_addr, 0, 0);
556f8db7 6166 }
bb8c093b 6167 iwl3945_send_beacon_cmd(priv);
b481de9c
ZY
6168
6169 /* FIXME - we need to add code here to detect a totally new
6170 * configuration, reset the AP, unassoc, rxon timing, assoc,
6171 * clear sta table, add BCAST sta... */
6172}
6173
32bfd35d
JB
6174static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6175 struct ieee80211_vif *vif,
4a8a4322 6176 struct ieee80211_if_conf *conf)
b481de9c 6177{
4a8a4322 6178 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6179 int rc;
6180
6181 if (conf == NULL)
6182 return -EIO;
6183
b716bb91
EG
6184 if (priv->vif != vif) {
6185 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
b716bb91
EG
6186 return 0;
6187 }
6188
9d139c81 6189 /* handle this temporarily here */
05c914fe 6190 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
9d139c81
JB
6191 conf->changed & IEEE80211_IFCC_BEACON) {
6192 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
6193 if (!beacon)
6194 return -ENOMEM;
9bdf5eca 6195 mutex_lock(&priv->mutex);
9d139c81 6196 rc = iwl3945_mac_beacon_update(hw, beacon);
9bdf5eca 6197 mutex_unlock(&priv->mutex);
9d139c81
JB
6198 if (rc)
6199 return rc;
6200 }
6201
775a6e27 6202 if (!iwl_is_alive(priv))
5a66926a
ZY
6203 return -EAGAIN;
6204
b481de9c
ZY
6205 mutex_lock(&priv->mutex);
6206
b481de9c 6207 if (conf->bssid)
e174961c 6208 IWL_DEBUG_MAC80211("bssid: %pM\n", conf->bssid);
b481de9c 6209
4150c572
JB
6210/*
6211 * very dubious code was here; the probe filtering flag is never set:
6212 *
b481de9c
ZY
6213 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
6214 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
4150c572 6215 */
b481de9c 6216
05c914fe 6217 if (priv->iw_mode == NL80211_IFTYPE_AP) {
b481de9c
ZY
6218 if (!conf->bssid) {
6219 conf->bssid = priv->mac_addr;
6220 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
e174961c
JB
6221 IWL_DEBUG_MAC80211("bssid was set to: %pM\n",
6222 conf->bssid);
b481de9c
ZY
6223 }
6224 if (priv->ibss_beacon)
6225 dev_kfree_skb(priv->ibss_beacon);
6226
9d139c81 6227 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
b481de9c
ZY
6228 }
6229
775a6e27 6230 if (iwl_is_rfkill(priv))
fde3571f
MA
6231 goto done;
6232
b481de9c
ZY
6233 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
6234 !is_multicast_ether_addr(conf->bssid)) {
6235 /* If there is currently a HW scan going on in the background
6236 * then we need to cancel it else the RXON below will fail. */
af0053d6 6237 if (iwl_scan_cancel_timeout(priv, 100)) {
39aadf8c 6238 IWL_WARN(priv, "Aborted scan still in progress "
b481de9c
ZY
6239 "after 100ms\n");
6240 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
6241 mutex_unlock(&priv->mutex);
6242 return -EAGAIN;
6243 }
f2c7e521 6244 memcpy(priv->staging39_rxon.bssid_addr, conf->bssid, ETH_ALEN);
b481de9c
ZY
6245
6246 /* TODO: Audit driver for usage of these members and see
6247 * if mac80211 deprecates them (priv->bssid looks like it
6248 * shouldn't be there, but I haven't scanned the IBSS code
6249 * to verify) - jpk */
6250 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
6251
05c914fe 6252 if (priv->iw_mode == NL80211_IFTYPE_AP)
bb8c093b 6253 iwl3945_config_ap(priv);
b481de9c 6254 else {
bb8c093b 6255 rc = iwl3945_commit_rxon(priv);
05c914fe 6256 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
bb8c093b 6257 iwl3945_add_station(priv,
f2c7e521 6258 priv->active39_rxon.bssid_addr, 1, 0);
b481de9c
ZY
6259 }
6260
6261 } else {
af0053d6 6262 iwl_scan_cancel_timeout(priv, 100);
f2c7e521 6263 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 6264 iwl3945_commit_rxon(priv);
b481de9c
ZY
6265 }
6266
fde3571f 6267 done:
b481de9c
ZY
6268 IWL_DEBUG_MAC80211("leave\n");
6269 mutex_unlock(&priv->mutex);
6270
6271 return 0;
6272}
6273
bb8c093b 6274static void iwl3945_configure_filter(struct ieee80211_hw *hw,
4150c572
JB
6275 unsigned int changed_flags,
6276 unsigned int *total_flags,
6277 int mc_count, struct dev_addr_list *mc_list)
6278{
4a8a4322 6279 struct iwl_priv *priv = hw->priv;
f2c7e521 6280 __le32 *filter_flags = &priv->staging39_rxon.filter_flags;
25b3f57c 6281
352bc8de
ZY
6282 IWL_DEBUG_MAC80211("Enter: changed: 0x%x, total: 0x%x\n",
6283 changed_flags, *total_flags);
6284
6285 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
6286 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
6287 *filter_flags |= RXON_FILTER_PROMISC_MSK;
6288 else
6289 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
6290 }
6291 if (changed_flags & FIF_ALLMULTI) {
6292 if (*total_flags & FIF_ALLMULTI)
6293 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
6294 else
6295 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
6296 }
6297 if (changed_flags & FIF_CONTROL) {
6298 if (*total_flags & FIF_CONTROL)
6299 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
6300 else
6301 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
5ec03976 6302 }
352bc8de
ZY
6303 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
6304 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
6305 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
6306 else
6307 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
6308 }
6309
6310 /* We avoid iwl_commit_rxon here to commit the new filter flags
6311 * since mac80211 will call ieee80211_hw_config immediately.
6312 * (mc_list is not supported at this time). Otherwise, we need to
6313 * queue a background iwl_commit_rxon work.
6314 */
6315
6316 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
25b3f57c 6317 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
4150c572
JB
6318}
6319
bb8c093b 6320static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
b481de9c
ZY
6321 struct ieee80211_if_init_conf *conf)
6322{
4a8a4322 6323 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6324
6325 IWL_DEBUG_MAC80211("enter\n");
6326
6327 mutex_lock(&priv->mutex);
6ef89d0a 6328
775a6e27 6329 if (iwl_is_ready_rf(priv)) {
af0053d6 6330 iwl_scan_cancel_timeout(priv, 100);
f2c7e521 6331 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
fde3571f
MA
6332 iwl3945_commit_rxon(priv);
6333 }
32bfd35d
JB
6334 if (priv->vif == conf->vif) {
6335 priv->vif = NULL;
b481de9c 6336 memset(priv->bssid, 0, ETH_ALEN);
b481de9c
ZY
6337 }
6338 mutex_unlock(&priv->mutex);
6339
6340 IWL_DEBUG_MAC80211("leave\n");
b481de9c
ZY
6341}
6342
cd56d331
AK
6343#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
6344
6345static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
6346 struct ieee80211_vif *vif,
6347 struct ieee80211_bss_conf *bss_conf,
6348 u32 changes)
6349{
4a8a4322 6350 struct iwl_priv *priv = hw->priv;
cd56d331
AK
6351
6352 IWL_DEBUG_MAC80211("changes = 0x%X\n", changes);
6353
6354 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
6355 IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n",
6356 bss_conf->use_short_preamble);
6357 if (bss_conf->use_short_preamble)
f2c7e521 6358 priv->staging39_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
cd56d331 6359 else
f2c7e521 6360 priv->staging39_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
cd56d331
AK
6361 }
6362
6363 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
6364 IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
6365 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
f2c7e521 6366 priv->staging39_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
cd56d331 6367 else
f2c7e521 6368 priv->staging39_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
cd56d331
AK
6369 }
6370
6371 if (changes & BSS_CHANGED_ASSOC) {
6372 IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc);
6373 /* This should never happen as this function should
6374 * never be called from interrupt context. */
6375 if (WARN_ON_ONCE(in_interrupt()))
6376 return;
6377 if (bss_conf->assoc) {
6378 priv->assoc_id = bss_conf->aid;
6379 priv->beacon_int = bss_conf->beacon_int;
28afaf91 6380 priv->timestamp = bss_conf->timestamp;
cd56d331
AK
6381 priv->assoc_capability = bss_conf->assoc_capability;
6382 priv->next_scan_jiffies = jiffies +
6383 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
6384 mutex_lock(&priv->mutex);
6385 iwl3945_post_associate(priv);
6386 mutex_unlock(&priv->mutex);
6387 } else {
6388 priv->assoc_id = 0;
6389 IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc);
6390 }
6391 } else if (changes && iwl3945_is_associated(priv) && priv->assoc_id) {
6392 IWL_DEBUG_MAC80211("Associated Changes %d\n", changes);
6393 iwl3945_send_rxon_assoc(priv);
6394 }
6395
6396}
6397
bb8c093b 6398static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
b481de9c
ZY
6399{
6400 int rc = 0;
6401 unsigned long flags;
4a8a4322 6402 struct iwl_priv *priv = hw->priv;
9387b7ca 6403 DECLARE_SSID_BUF(ssid_buf);
b481de9c
ZY
6404
6405 IWL_DEBUG_MAC80211("enter\n");
6406
15e869d8 6407 mutex_lock(&priv->mutex);
b481de9c
ZY
6408 spin_lock_irqsave(&priv->lock, flags);
6409
775a6e27 6410 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
6411 rc = -EIO;
6412 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
6413 goto out_unlock;
6414 }
6415
7878a5a4
MA
6416 /* we don't schedule scan within next_scan_jiffies period */
6417 if (priv->next_scan_jiffies &&
6418 time_after(priv->next_scan_jiffies, jiffies)) {
6419 rc = -EAGAIN;
6420 goto out_unlock;
6421 }
15dbf1b7
BM
6422 /* if we just finished scan ask for delay for a broadcast scan */
6423 if ((len == 0) && priv->last_scan_jiffies &&
6424 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
6425 jiffies)) {
b481de9c
ZY
6426 rc = -EAGAIN;
6427 goto out_unlock;
6428 }
6429 if (len) {
7878a5a4 6430 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
9387b7ca 6431 print_ssid(ssid_buf, ssid, len), (int)len);
b481de9c
ZY
6432
6433 priv->one_direct_scan = 1;
6434 priv->direct_ssid_len = (u8)
6435 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
6436 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
6ef89d0a
MA
6437 } else
6438 priv->one_direct_scan = 0;
b481de9c 6439
bb8c093b 6440 rc = iwl3945_scan_initiate(priv);
b481de9c
ZY
6441
6442 IWL_DEBUG_MAC80211("leave\n");
6443
6444out_unlock:
6445 spin_unlock_irqrestore(&priv->lock, flags);
15e869d8 6446 mutex_unlock(&priv->mutex);
b481de9c
ZY
6447
6448 return rc;
6449}
6450
bb8c093b 6451static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
dc822b5d
JB
6452 struct ieee80211_vif *vif,
6453 struct ieee80211_sta *sta,
6454 struct ieee80211_key_conf *key)
b481de9c 6455{
4a8a4322 6456 struct iwl_priv *priv = hw->priv;
dc822b5d 6457 const u8 *addr;
42986796 6458 int ret;
b481de9c
ZY
6459 u8 sta_id;
6460
6461 IWL_DEBUG_MAC80211("enter\n");
6462
df878d8f 6463 if (iwl3945_mod_params.sw_crypto) {
b481de9c
ZY
6464 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
6465 return -EOPNOTSUPP;
6466 }
6467
42986796 6468 addr = sta ? sta->addr : iwl_bcast_addr;
bb8c093b 6469 sta_id = iwl3945_hw_find_station(priv, addr);
b481de9c 6470 if (sta_id == IWL_INVALID_STATION) {
e174961c
JB
6471 IWL_DEBUG_MAC80211("leave - %pM not in station map.\n",
6472 addr);
b481de9c
ZY
6473 return -EINVAL;
6474 }
6475
6476 mutex_lock(&priv->mutex);
6477
af0053d6 6478 iwl_scan_cancel_timeout(priv, 100);
15e869d8 6479
b481de9c
ZY
6480 switch (cmd) {
6481 case SET_KEY:
42986796
WT
6482 ret = iwl3945_update_sta_key_info(priv, key, sta_id);
6483 if (!ret) {
bb8c093b
CH
6484 iwl3945_set_rxon_hwcrypto(priv, 1);
6485 iwl3945_commit_rxon(priv);
b481de9c
ZY
6486 key->hw_key_idx = sta_id;
6487 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
6488 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
6489 }
6490 break;
6491 case DISABLE_KEY:
42986796
WT
6492 ret = iwl3945_clear_sta_key_info(priv, sta_id);
6493 if (!ret) {
bb8c093b
CH
6494 iwl3945_set_rxon_hwcrypto(priv, 0);
6495 iwl3945_commit_rxon(priv);
b481de9c
ZY
6496 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
6497 }
6498 break;
6499 default:
42986796 6500 ret = -EINVAL;
b481de9c
ZY
6501 }
6502
6503 IWL_DEBUG_MAC80211("leave\n");
6504 mutex_unlock(&priv->mutex);
6505
42986796 6506 return ret;
b481de9c
ZY
6507}
6508
e100bb64 6509static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
b481de9c
ZY
6510 const struct ieee80211_tx_queue_params *params)
6511{
4a8a4322 6512 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6513 unsigned long flags;
6514 int q;
b481de9c
ZY
6515
6516 IWL_DEBUG_MAC80211("enter\n");
6517
775a6e27 6518 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
6519 IWL_DEBUG_MAC80211("leave - RF not ready\n");
6520 return -EIO;
6521 }
6522
6523 if (queue >= AC_NUM) {
6524 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
6525 return 0;
6526 }
6527
b481de9c
ZY
6528 q = AC_NUM - 1 - queue;
6529
6530 spin_lock_irqsave(&priv->lock, flags);
6531
6532 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
6533 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
6534 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
6535 priv->qos_data.def_qos_parm.ac[q].edca_txop =
3330d7be 6536 cpu_to_le16((params->txop * 32));
b481de9c
ZY
6537
6538 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
6539 priv->qos_data.qos_active = 1;
6540
6541 spin_unlock_irqrestore(&priv->lock, flags);
6542
6543 mutex_lock(&priv->mutex);
05c914fe 6544 if (priv->iw_mode == NL80211_IFTYPE_AP)
bb8c093b
CH
6545 iwl3945_activate_qos(priv, 1);
6546 else if (priv->assoc_id && iwl3945_is_associated(priv))
6547 iwl3945_activate_qos(priv, 0);
b481de9c
ZY
6548
6549 mutex_unlock(&priv->mutex);
6550
b481de9c
ZY
6551 IWL_DEBUG_MAC80211("leave\n");
6552 return 0;
6553}
6554
bb8c093b 6555static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
b481de9c
ZY
6556 struct ieee80211_tx_queue_stats *stats)
6557{
4a8a4322 6558 struct iwl_priv *priv = hw->priv;
b481de9c 6559 int i, avail;
188cf6c7 6560 struct iwl_tx_queue *txq;
d20b3c65 6561 struct iwl_queue *q;
b481de9c
ZY
6562 unsigned long flags;
6563
6564 IWL_DEBUG_MAC80211("enter\n");
6565
775a6e27 6566 if (!iwl_is_ready_rf(priv)) {
b481de9c
ZY
6567 IWL_DEBUG_MAC80211("leave - RF not ready\n");
6568 return -EIO;
6569 }
6570
6571 spin_lock_irqsave(&priv->lock, flags);
6572
6573 for (i = 0; i < AC_NUM; i++) {
188cf6c7 6574 txq = &priv->txq[i];
b481de9c 6575 q = &txq->q;
d20b3c65 6576 avail = iwl_queue_space(q);
b481de9c 6577
57ffc589
JB
6578 stats[i].len = q->n_window - avail;
6579 stats[i].limit = q->n_window - q->high_mark;
6580 stats[i].count = q->n_window;
b481de9c
ZY
6581
6582 }
6583 spin_unlock_irqrestore(&priv->lock, flags);
6584
6585 IWL_DEBUG_MAC80211("leave\n");
6586
6587 return 0;
6588}
6589
bb8c093b 6590static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
b481de9c 6591{
4a8a4322 6592 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6593 unsigned long flags;
6594
6595 mutex_lock(&priv->mutex);
6596 IWL_DEBUG_MAC80211("enter\n");
6597
775a6e27 6598 iwl_reset_qos(priv);
292ae174 6599
b481de9c
ZY
6600 spin_lock_irqsave(&priv->lock, flags);
6601 priv->assoc_id = 0;
6602 priv->assoc_capability = 0;
6603 priv->call_post_assoc_from_beacon = 0;
6604
6605 /* new association get rid of ibss beacon skb */
6606 if (priv->ibss_beacon)
6607 dev_kfree_skb(priv->ibss_beacon);
6608
6609 priv->ibss_beacon = NULL;
6610
6611 priv->beacon_int = priv->hw->conf.beacon_int;
28afaf91 6612 priv->timestamp = 0;
05c914fe 6613 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
b481de9c
ZY
6614 priv->beacon_int = 0;
6615
6616 spin_unlock_irqrestore(&priv->lock, flags);
6617
775a6e27 6618 if (!iwl_is_ready_rf(priv)) {
fde3571f
MA
6619 IWL_DEBUG_MAC80211("leave - not ready\n");
6620 mutex_unlock(&priv->mutex);
6621 return;
6622 }
6623
15e869d8
MA
6624 /* we are restarting association process
6625 * clear RXON_FILTER_ASSOC_MSK bit
6626 */
05c914fe 6627 if (priv->iw_mode != NL80211_IFTYPE_AP) {
af0053d6 6628 iwl_scan_cancel_timeout(priv, 100);
f2c7e521 6629 priv->staging39_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
bb8c093b 6630 iwl3945_commit_rxon(priv);
15e869d8
MA
6631 }
6632
b481de9c 6633 /* Per mac80211.h: This is only used in IBSS mode... */
05c914fe 6634 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
15e869d8 6635
b481de9c
ZY
6636 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
6637 mutex_unlock(&priv->mutex);
6638 return;
b481de9c
ZY
6639 }
6640
bb8c093b 6641 iwl3945_set_rate(priv);
b481de9c
ZY
6642
6643 mutex_unlock(&priv->mutex);
6644
6645 IWL_DEBUG_MAC80211("leave\n");
6646
6647}
6648
e039fa4a 6649static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
b481de9c 6650{
4a8a4322 6651 struct iwl_priv *priv = hw->priv;
b481de9c
ZY
6652 unsigned long flags;
6653
b481de9c
ZY
6654 IWL_DEBUG_MAC80211("enter\n");
6655
775a6e27 6656 if (!iwl_is_ready_rf(priv)) {
b481de9c 6657 IWL_DEBUG_MAC80211("leave - RF not ready\n");
b481de9c
ZY
6658 return -EIO;
6659 }
6660
05c914fe 6661 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
b481de9c 6662 IWL_DEBUG_MAC80211("leave - not IBSS\n");
b481de9c
ZY
6663 return -EIO;
6664 }
6665
6666 spin_lock_irqsave(&priv->lock, flags);
6667
6668 if (priv->ibss_beacon)
6669 dev_kfree_skb(priv->ibss_beacon);
6670
6671 priv->ibss_beacon = skb;
6672
6673 priv->assoc_id = 0;
6674
6675 IWL_DEBUG_MAC80211("leave\n");
6676 spin_unlock_irqrestore(&priv->lock, flags);
6677
775a6e27 6678 iwl_reset_qos(priv);
b481de9c 6679
dc4b1e7d 6680 iwl3945_post_associate(priv);
b481de9c 6681
b481de9c
ZY
6682
6683 return 0;
6684}
6685
6686/*****************************************************************************
6687 *
6688 * sysfs attributes
6689 *
6690 *****************************************************************************/
6691
c8b0e6e1 6692#ifdef CONFIG_IWL3945_DEBUG
b481de9c
ZY
6693
6694/*
6695 * The following adds a new attribute to the sysfs representation
6696 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
6697 * used for controlling the debug level.
6698 *
6699 * See the level definitions in iwl for details.
6700 */
40b8ec0b
SO
6701static ssize_t show_debug_level(struct device *d,
6702 struct device_attribute *attr, char *buf)
b481de9c 6703{
4a8a4322 6704 struct iwl_priv *priv = d->driver_data;
40b8ec0b
SO
6705
6706 return sprintf(buf, "0x%08X\n", priv->debug_level);
b481de9c 6707}
40b8ec0b
SO
6708static ssize_t store_debug_level(struct device *d,
6709 struct device_attribute *attr,
b481de9c
ZY
6710 const char *buf, size_t count)
6711{
4a8a4322 6712 struct iwl_priv *priv = d->driver_data;
40b8ec0b
SO
6713 unsigned long val;
6714 int ret;
b481de9c 6715
40b8ec0b
SO
6716 ret = strict_strtoul(buf, 0, &val);
6717 if (ret)
978785a3 6718 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
b481de9c 6719 else
40b8ec0b 6720 priv->debug_level = val;
b481de9c
ZY
6721
6722 return strnlen(buf, count);
6723}
6724
40b8ec0b
SO
6725static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
6726 show_debug_level, store_debug_level);
b481de9c 6727
c8b0e6e1 6728#endif /* CONFIG_IWL3945_DEBUG */
b481de9c 6729
b481de9c
ZY
6730static ssize_t show_temperature(struct device *d,
6731 struct device_attribute *attr, char *buf)
6732{
4a8a4322 6733 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c 6734
775a6e27 6735 if (!iwl_is_alive(priv))
b481de9c
ZY
6736 return -EAGAIN;
6737
bb8c093b 6738 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
b481de9c
ZY
6739}
6740
6741static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
6742
b481de9c
ZY
6743static ssize_t show_tx_power(struct device *d,
6744 struct device_attribute *attr, char *buf)
6745{
4a8a4322 6746 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6747 return sprintf(buf, "%d\n", priv->user_txpower_limit);
6748}
6749
6750static ssize_t store_tx_power(struct device *d,
6751 struct device_attribute *attr,
6752 const char *buf, size_t count)
6753{
4a8a4322 6754 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6755 char *p = (char *)buf;
6756 u32 val;
6757
6758 val = simple_strtoul(p, &p, 10);
6759 if (p == buf)
978785a3 6760 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
b481de9c 6761 else
bb8c093b 6762 iwl3945_hw_reg_set_txpower(priv, val);
b481de9c
ZY
6763
6764 return count;
6765}
6766
6767static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
6768
6769static ssize_t show_flags(struct device *d,
6770 struct device_attribute *attr, char *buf)
6771{
4a8a4322 6772 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c 6773
f2c7e521 6774 return sprintf(buf, "0x%04X\n", priv->active39_rxon.flags);
b481de9c
ZY
6775}
6776
6777static ssize_t store_flags(struct device *d,
6778 struct device_attribute *attr,
6779 const char *buf, size_t count)
6780{
4a8a4322 6781 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6782 u32 flags = simple_strtoul(buf, NULL, 0);
6783
6784 mutex_lock(&priv->mutex);
f2c7e521 6785 if (le32_to_cpu(priv->staging39_rxon.flags) != flags) {
b481de9c 6786 /* Cancel any currently running scans... */
af0053d6 6787 if (iwl_scan_cancel_timeout(priv, 100))
39aadf8c 6788 IWL_WARN(priv, "Could not cancel scan.\n");
b481de9c
ZY
6789 else {
6790 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
6791 flags);
f2c7e521 6792 priv->staging39_rxon.flags = cpu_to_le32(flags);
bb8c093b 6793 iwl3945_commit_rxon(priv);
b481de9c
ZY
6794 }
6795 }
6796 mutex_unlock(&priv->mutex);
6797
6798 return count;
6799}
6800
6801static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
6802
6803static ssize_t show_filter_flags(struct device *d,
6804 struct device_attribute *attr, char *buf)
6805{
4a8a4322 6806 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6807
6808 return sprintf(buf, "0x%04X\n",
f2c7e521 6809 le32_to_cpu(priv->active39_rxon.filter_flags));
b481de9c
ZY
6810}
6811
6812static ssize_t store_filter_flags(struct device *d,
6813 struct device_attribute *attr,
6814 const char *buf, size_t count)
6815{
4a8a4322 6816 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
b481de9c
ZY
6817 u32 filter_flags = simple_strtoul(buf, NULL, 0);
6818
6819 mutex_lock(&priv->mutex);
f2c7e521 6820 if (le32_to_cpu(priv->staging39_rxon.filter_flags) != filter_flags) {
b481de9c 6821 /* Cancel any currently running scans... */
af0053d6 6822 if (iwl_scan_cancel_timeout(priv, 100))
39aadf8c 6823 IWL_WARN(priv, "Could not cancel scan.\n");
b481de9c
ZY
6824 else {
6825 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
6826 "0x%04X\n", filter_flags);
f2c7e521 6827 priv->staging39_rxon.filter_flags =
b481de9c 6828 cpu_to_le32(filter_flags);
bb8c093b 6829 iwl3945_commit_rxon(priv);
b481de9c
ZY
6830 }
6831 }
6832 mutex_unlock(&priv->mutex);
6833
6834 return count;
6835}
6836
6837static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
6838 store_filter_flags);
6839
c8b0e6e1 6840#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
6841
6842static ssize_t show_measurement(struct device *d,
6843 struct device_attribute *attr, char *buf)
6844{
4a8a4322 6845 struct iwl_priv *priv = dev_get_drvdata(d);
600c0e11 6846 struct iwl_spectrum_notification measure_report;
b481de9c 6847 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3ac7f146 6848 u8 *data = (u8 *)&measure_report;
b481de9c
ZY
6849 unsigned long flags;
6850
6851 spin_lock_irqsave(&priv->lock, flags);
6852 if (!(priv->measurement_status & MEASUREMENT_READY)) {
6853 spin_unlock_irqrestore(&priv->lock, flags);
6854 return 0;
6855 }
6856 memcpy(&measure_report, &priv->measure_report, size);
6857 priv->measurement_status = 0;
6858 spin_unlock_irqrestore(&priv->lock, flags);
6859
6860 while (size && (PAGE_SIZE - len)) {
6861 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
6862 PAGE_SIZE - len, 1);
6863 len = strlen(buf);
6864 if (PAGE_SIZE - len)
6865 buf[len++] = '\n';
6866
6867 ofs += 16;
6868 size -= min(size, 16U);
6869 }
6870
6871 return len;
6872}
6873
6874static ssize_t store_measurement(struct device *d,
6875 struct device_attribute *attr,
6876 const char *buf, size_t count)
6877{
4a8a4322 6878 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c 6879 struct ieee80211_measurement_params params = {
f2c7e521 6880 .channel = le16_to_cpu(priv->active39_rxon.channel),
b481de9c
ZY
6881 .start_time = cpu_to_le64(priv->last_tsf),
6882 .duration = cpu_to_le16(1),
6883 };
6884 u8 type = IWL_MEASURE_BASIC;
6885 u8 buffer[32];
6886 u8 channel;
6887
6888 if (count) {
6889 char *p = buffer;
6890 strncpy(buffer, buf, min(sizeof(buffer), count));
6891 channel = simple_strtoul(p, NULL, 0);
6892 if (channel)
6893 params.channel = channel;
6894
6895 p = buffer;
6896 while (*p && *p != ' ')
6897 p++;
6898 if (*p)
6899 type = simple_strtoul(p + 1, NULL, 0);
6900 }
6901
6902 IWL_DEBUG_INFO("Invoking measurement of type %d on "
6903 "channel %d (for '%s')\n", type, params.channel, buf);
bb8c093b 6904 iwl3945_get_measurement(priv, &params, type);
b481de9c
ZY
6905
6906 return count;
6907}
6908
6909static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
6910 show_measurement, store_measurement);
c8b0e6e1 6911#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
b481de9c 6912
b481de9c
ZY
6913static ssize_t store_retry_rate(struct device *d,
6914 struct device_attribute *attr,
6915 const char *buf, size_t count)
6916{
4a8a4322 6917 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6918
6919 priv->retry_rate = simple_strtoul(buf, NULL, 0);
6920 if (priv->retry_rate <= 0)
6921 priv->retry_rate = 1;
6922
6923 return count;
6924}
6925
6926static ssize_t show_retry_rate(struct device *d,
6927 struct device_attribute *attr, char *buf)
6928{
4a8a4322 6929 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6930 return sprintf(buf, "%d", priv->retry_rate);
6931}
6932
6933static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
6934 store_retry_rate);
6935
6936static ssize_t store_power_level(struct device *d,
6937 struct device_attribute *attr,
6938 const char *buf, size_t count)
6939{
4a8a4322 6940 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6941 int rc;
6942 int mode;
6943
6944 mode = simple_strtoul(buf, NULL, 0);
6945 mutex_lock(&priv->mutex);
6946
775a6e27 6947 if (!iwl_is_ready(priv)) {
b481de9c
ZY
6948 rc = -EAGAIN;
6949 goto out;
6950 }
6951
1125eff3
SO
6952 if ((mode < 1) || (mode > IWL39_POWER_LIMIT) ||
6953 (mode == IWL39_POWER_AC))
6954 mode = IWL39_POWER_AC;
b481de9c
ZY
6955 else
6956 mode |= IWL_POWER_ENABLED;
6957
6958 if (mode != priv->power_mode) {
bb8c093b 6959 rc = iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(mode));
b481de9c
ZY
6960 if (rc) {
6961 IWL_DEBUG_MAC80211("failed setting power mode.\n");
6962 goto out;
6963 }
6964 priv->power_mode = mode;
6965 }
6966
6967 rc = count;
6968
6969 out:
6970 mutex_unlock(&priv->mutex);
6971 return rc;
6972}
6973
6974#define MAX_WX_STRING 80
6975
6976/* Values are in microsecond */
6977static const s32 timeout_duration[] = {
6978 350000,
6979 250000,
6980 75000,
6981 37000,
6982 25000,
6983};
6984static const s32 period_duration[] = {
6985 400000,
6986 700000,
6987 1000000,
6988 1000000,
6989 1000000
6990};
6991
6992static ssize_t show_power_level(struct device *d,
6993 struct device_attribute *attr, char *buf)
6994{
4a8a4322 6995 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
6996 int level = IWL_POWER_LEVEL(priv->power_mode);
6997 char *p = buf;
6998
6999 p += sprintf(p, "%d ", level);
7000 switch (level) {
7001 case IWL_POWER_MODE_CAM:
1125eff3 7002 case IWL39_POWER_AC:
b481de9c
ZY
7003 p += sprintf(p, "(AC)");
7004 break;
1125eff3 7005 case IWL39_POWER_BATTERY:
b481de9c
ZY
7006 p += sprintf(p, "(BATTERY)");
7007 break;
7008 default:
7009 p += sprintf(p,
7010 "(Timeout %dms, Period %dms)",
7011 timeout_duration[level - 1] / 1000,
7012 period_duration[level - 1] / 1000);
7013 }
7014
7015 if (!(priv->power_mode & IWL_POWER_ENABLED))
7016 p += sprintf(p, " OFF\n");
7017 else
7018 p += sprintf(p, " \n");
7019
3ac7f146 7020 return p - buf + 1;
b481de9c
ZY
7021
7022}
7023
7024static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
7025 store_power_level);
7026
7027static ssize_t show_channels(struct device *d,
7028 struct device_attribute *attr, char *buf)
7029{
8318d78a
JB
7030 /* all this shit doesn't belong into sysfs anyway */
7031 return 0;
b481de9c
ZY
7032}
7033
7034static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
7035
7036static ssize_t show_statistics(struct device *d,
7037 struct device_attribute *attr, char *buf)
7038{
4a8a4322 7039 struct iwl_priv *priv = dev_get_drvdata(d);
bb8c093b 7040 u32 size = sizeof(struct iwl3945_notif_statistics);
b481de9c 7041 u32 len = 0, ofs = 0;
f2c7e521 7042 u8 *data = (u8 *)&priv->statistics_39;
b481de9c
ZY
7043 int rc = 0;
7044
775a6e27 7045 if (!iwl_is_alive(priv))
b481de9c
ZY
7046 return -EAGAIN;
7047
7048 mutex_lock(&priv->mutex);
bb8c093b 7049 rc = iwl3945_send_statistics_request(priv);
b481de9c
ZY
7050 mutex_unlock(&priv->mutex);
7051
7052 if (rc) {
7053 len = sprintf(buf,
7054 "Error sending statistics request: 0x%08X\n", rc);
7055 return len;
7056 }
7057
7058 while (size && (PAGE_SIZE - len)) {
7059 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
7060 PAGE_SIZE - len, 1);
7061 len = strlen(buf);
7062 if (PAGE_SIZE - len)
7063 buf[len++] = '\n';
7064
7065 ofs += 16;
7066 size -= min(size, 16U);
7067 }
7068
7069 return len;
7070}
7071
7072static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
7073
7074static ssize_t show_antenna(struct device *d,
7075 struct device_attribute *attr, char *buf)
7076{
4a8a4322 7077 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c 7078
775a6e27 7079 if (!iwl_is_alive(priv))
b481de9c
ZY
7080 return -EAGAIN;
7081
7082 return sprintf(buf, "%d\n", priv->antenna);
7083}
7084
7085static ssize_t store_antenna(struct device *d,
7086 struct device_attribute *attr,
7087 const char *buf, size_t count)
7088{
7089 int ant;
4a8a4322 7090 struct iwl_priv *priv = dev_get_drvdata(d);
b481de9c
ZY
7091
7092 if (count == 0)
7093 return 0;
7094
7095 if (sscanf(buf, "%1i", &ant) != 1) {
7096 IWL_DEBUG_INFO("not in hex or decimal form.\n");
7097 return count;
7098 }
7099
7100 if ((ant >= 0) && (ant <= 2)) {
7101 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
bb8c093b 7102 priv->antenna = (enum iwl3945_antenna)ant;
b481de9c
ZY
7103 } else
7104 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
7105
7106
7107 return count;
7108}
7109
7110static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
7111
7112static ssize_t show_status(struct device *d,
7113 struct device_attribute *attr, char *buf)
7114{
4a8a4322 7115 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
775a6e27 7116 if (!iwl_is_alive(priv))
b481de9c
ZY
7117 return -EAGAIN;
7118 return sprintf(buf, "0x%08x\n", (int)priv->status);
7119}
7120
7121static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
7122
7123static ssize_t dump_error_log(struct device *d,
7124 struct device_attribute *attr,
7125 const char *buf, size_t count)
7126{
7127 char *p = (char *)buf;
7128
7129 if (p[0] == '1')
4a8a4322 7130 iwl3945_dump_nic_error_log((struct iwl_priv *)d->driver_data);
b481de9c
ZY
7131
7132 return strnlen(buf, count);
7133}
7134
7135static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
7136
7137static ssize_t dump_event_log(struct device *d,
7138 struct device_attribute *attr,
7139 const char *buf, size_t count)
7140{
7141 char *p = (char *)buf;
7142
7143 if (p[0] == '1')
4a8a4322 7144 iwl3945_dump_nic_event_log((struct iwl_priv *)d->driver_data);
b481de9c
ZY
7145
7146 return strnlen(buf, count);
7147}
7148
7149static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
7150
7151/*****************************************************************************
7152 *
a96a27f9 7153 * driver setup and tear down
b481de9c
ZY
7154 *
7155 *****************************************************************************/
7156
4a8a4322 7157static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
b481de9c
ZY
7158{
7159 priv->workqueue = create_workqueue(DRV_NAME);
7160
7161 init_waitqueue_head(&priv->wait_command_queue);
7162
bb8c093b
CH
7163 INIT_WORK(&priv->up, iwl3945_bg_up);
7164 INIT_WORK(&priv->restart, iwl3945_bg_restart);
7165 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
7166 INIT_WORK(&priv->scan_completed, iwl3945_bg_scan_completed);
7167 INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
7168 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan);
7169 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill);
7170 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
bb8c093b
CH
7171 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
7172 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
7173 INIT_DELAYED_WORK(&priv->scan_check, iwl3945_bg_scan_check);
2663516d 7174 INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll);
bb8c093b
CH
7175
7176 iwl3945_hw_setup_deferred_work(priv);
b481de9c
ZY
7177
7178 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
bb8c093b 7179 iwl3945_irq_tasklet, (unsigned long)priv);
b481de9c
ZY
7180}
7181
4a8a4322 7182static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
b481de9c 7183{
bb8c093b 7184 iwl3945_hw_cancel_deferred_work(priv);
b481de9c 7185
e47eb6ad 7186 cancel_delayed_work_sync(&priv->init_alive_start);
b481de9c
ZY
7187 cancel_delayed_work(&priv->scan_check);
7188 cancel_delayed_work(&priv->alive_start);
b481de9c
ZY
7189 cancel_work_sync(&priv->beacon_update);
7190}
7191
bb8c093b 7192static struct attribute *iwl3945_sysfs_entries[] = {
b481de9c
ZY
7193 &dev_attr_antenna.attr,
7194 &dev_attr_channels.attr,
7195 &dev_attr_dump_errors.attr,
7196 &dev_attr_dump_events.attr,
7197 &dev_attr_flags.attr,
7198 &dev_attr_filter_flags.attr,
c8b0e6e1 7199#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
b481de9c
ZY
7200 &dev_attr_measurement.attr,
7201#endif
7202 &dev_attr_power_level.attr,
b481de9c 7203 &dev_attr_retry_rate.attr,
b481de9c
ZY
7204 &dev_attr_statistics.attr,
7205 &dev_attr_status.attr,
7206 &dev_attr_temperature.attr,
b481de9c 7207 &dev_attr_tx_power.attr,
40b8ec0b
SO
7208#ifdef CONFIG_IWL3945_DEBUG
7209 &dev_attr_debug_level.attr,
7210#endif
b481de9c
ZY
7211 NULL
7212};
7213
bb8c093b 7214static struct attribute_group iwl3945_attribute_group = {
b481de9c 7215 .name = NULL, /* put in device directory */
bb8c093b 7216 .attrs = iwl3945_sysfs_entries,
b481de9c
ZY
7217};
7218
bb8c093b
CH
7219static struct ieee80211_ops iwl3945_hw_ops = {
7220 .tx = iwl3945_mac_tx,
7221 .start = iwl3945_mac_start,
7222 .stop = iwl3945_mac_stop,
7223 .add_interface = iwl3945_mac_add_interface,
7224 .remove_interface = iwl3945_mac_remove_interface,
7225 .config = iwl3945_mac_config,
7226 .config_interface = iwl3945_mac_config_interface,
7227 .configure_filter = iwl3945_configure_filter,
7228 .set_key = iwl3945_mac_set_key,
bb8c093b
CH
7229 .get_tx_stats = iwl3945_mac_get_tx_stats,
7230 .conf_tx = iwl3945_mac_conf_tx,
bb8c093b 7231 .reset_tsf = iwl3945_mac_reset_tsf,
cd56d331 7232 .bss_info_changed = iwl3945_bss_info_changed,
bb8c093b 7233 .hw_scan = iwl3945_mac_hw_scan
b481de9c
ZY
7234};
7235
e52119c5 7236static int iwl3945_init_drv(struct iwl_priv *priv)
90a30a02
KA
7237{
7238 int ret;
7239
7240 priv->retry_rate = 1;
7241 priv->ibss_beacon = NULL;
7242
7243 spin_lock_init(&priv->lock);
c7a7c8ec 7244 spin_lock_init(&priv->power_data_39.lock);
90a30a02
KA
7245 spin_lock_init(&priv->sta_lock);
7246 spin_lock_init(&priv->hcmd_lock);
7247
7248 INIT_LIST_HEAD(&priv->free_frames);
7249
7250 mutex_init(&priv->mutex);
7251
7252 /* Clear the driver's (not device's) station table */
7253 iwl3945_clear_stations_table(priv);
7254
7255 priv->data_retry_limit = -1;
7256 priv->ieee_channels = NULL;
7257 priv->ieee_rates = NULL;
7258 priv->band = IEEE80211_BAND_2GHZ;
7259
7260 priv->iw_mode = NL80211_IFTYPE_STATION;
7261
7262 iwl_reset_qos(priv);
7263
7264 priv->qos_data.qos_active = 0;
7265 priv->qos_data.qos_cap.val = 0;
7266
7267 priv->rates_mask = IWL_RATES_MASK;
7268 /* If power management is turned on, default to AC mode */
c7a7c8ec 7269 priv->power_mode = IWL39_POWER_AC;
90a30a02
KA
7270 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
7271
7272 ret = iwl3945_init_channel_map(priv);
7273 if (ret) {
7274 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
7275 goto err;
7276 }
7277
7278 ret = iwl3945_init_geos(priv);
7279 if (ret) {
7280 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
7281 goto err_free_channel_map;
7282 }
7283
7284 return 0;
7285
7286err_free_channel_map:
7287 iwl3945_free_channel_map(priv);
7288err:
7289 return ret;
7290}
7291
bb8c093b 7292static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
b481de9c
ZY
7293{
7294 int err = 0;
4a8a4322 7295 struct iwl_priv *priv;
b481de9c 7296 struct ieee80211_hw *hw;
c0f20d91 7297 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
0359facc 7298 unsigned long flags;
b481de9c 7299
cee53ddb
KA
7300 /***********************
7301 * 1. Allocating HW data
7302 * ********************/
7303
b481de9c
ZY
7304 /* mac80211 allocates memory for this device instance, including
7305 * space for this driver's private structure */
90a30a02 7306 hw = iwl_alloc_all(cfg, &iwl3945_hw_ops);
b481de9c 7307 if (hw == NULL) {
a3139c59 7308 printk(KERN_ERR DRV_NAME "Can not allocate network device\n");
b481de9c
ZY
7309 err = -ENOMEM;
7310 goto out;
7311 }
b481de9c 7312 priv = hw->priv;
90a30a02 7313 SET_IEEE80211_DEV(hw, &pdev->dev);
6440adb5 7314
df878d8f
KA
7315 if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) ||
7316 (iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) {
15b1687c
WT
7317 IWL_ERR(priv,
7318 "invalid queues_num, should be between %d and %d\n",
7319 IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES);
a3139c59
SO
7320 err = -EINVAL;
7321 goto out;
7322 }
7323
90a30a02
KA
7324 /*
7325 * Disabling hardware scan means that mac80211 will perform scans
7326 * "the hard way", rather than using device's scan.
7327 */
df878d8f 7328 if (iwl3945_mod_params.disable_hw_scan) {
40b8ec0b
SO
7329 IWL_DEBUG_INFO("Disabling hw_scan\n");
7330 iwl3945_hw_ops.hw_scan = NULL;
7331 }
7332
90a30a02 7333
cee53ddb 7334 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
90a30a02
KA
7335 priv->cfg = cfg;
7336 priv->pci_dev = pdev;
cee53ddb 7337
c8b0e6e1 7338#ifdef CONFIG_IWL3945_DEBUG
df878d8f 7339 priv->debug_level = iwl3945_mod_params.debug;
b481de9c
ZY
7340 atomic_set(&priv->restrict_refcnt, 0);
7341#endif
90a30a02
KA
7342 hw->rate_control_algorithm = "iwl-3945-rs";
7343 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
7344
7345 /* Select antenna (may be helpful if only one antenna is connected) */
7346 priv->antenna = (enum iwl3945_antenna)iwl3945_mod_params.antenna;
b481de9c 7347
566bfe5a 7348 /* Tell mac80211 our characteristics */
605a0bd6 7349 hw->flags = IEEE80211_HW_SIGNAL_DBM |
566bfe5a 7350 IEEE80211_HW_NOISE_DBM;
b481de9c 7351
f59ac048 7352 hw->wiphy->interface_modes =
f59ac048
LR
7353 BIT(NL80211_IFTYPE_STATION) |
7354 BIT(NL80211_IFTYPE_ADHOC);
7355
ea4a82dc
LR
7356 hw->wiphy->fw_handles_regulatory = true;
7357
6440adb5 7358 /* 4 EDCA QOS priorities */
b481de9c
ZY
7359 hw->queues = 4;
7360
cee53ddb
KA
7361 /***************************
7362 * 2. Initializing PCI bus
7363 * *************************/
b481de9c
ZY
7364 if (pci_enable_device(pdev)) {
7365 err = -ENODEV;
7366 goto out_ieee80211_free_hw;
7367 }
7368
7369 pci_set_master(pdev);
7370
b481de9c
ZY
7371 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7372 if (!err)
7373 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
7374 if (err) {
978785a3 7375 IWL_WARN(priv, "No suitable DMA available.\n");
b481de9c
ZY
7376 goto out_pci_disable_device;
7377 }
7378
7379 pci_set_drvdata(pdev, priv);
7380 err = pci_request_regions(pdev, DRV_NAME);
7381 if (err)
7382 goto out_pci_disable_device;
6440adb5 7383
cee53ddb
KA
7384 /***********************
7385 * 3. Read REV Register
7386 * ********************/
b481de9c
ZY
7387 priv->hw_base = pci_iomap(pdev, 0, 0);
7388 if (!priv->hw_base) {
7389 err = -ENODEV;
7390 goto out_pci_release_regions;
7391 }
7392
7393 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
7394 (unsigned long long) pci_resource_len(pdev, 0));
7395 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
7396
cee53ddb
KA
7397 /* We disable the RETRY_TIMEOUT register (0x41) to keep
7398 * PCI Tx retries from interfering with C3 CPU state */
7399 pci_write_config_byte(pdev, 0x41, 0x00);
b481de9c 7400
90a30a02
KA
7401 /* amp init */
7402 err = priv->cfg->ops->lib->apm_ops.init(priv);
cee53ddb 7403 if (err < 0) {
90a30a02
KA
7404 IWL_DEBUG_INFO("Failed to init APMG\n");
7405 goto out_iounmap;
cee53ddb 7406 }
b481de9c 7407
cee53ddb
KA
7408 /***********************
7409 * 4. Read EEPROM
7410 * ********************/
90a30a02 7411
cee53ddb
KA
7412 /* Read the EEPROM */
7413 err = iwl3945_eeprom_init(priv);
7414 if (err) {
15b1687c 7415 IWL_ERR(priv, "Unable to init EEPROM\n");
cee53ddb
KA
7416 goto out_remove_sysfs;
7417 }
7418 /* MAC Address location in EEPROM same for 3945/4965 */
7419 get_eeprom_mac(priv, priv->mac_addr);
7420 IWL_DEBUG_INFO("MAC address: %pM\n", priv->mac_addr);
7421 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
b481de9c 7422
cee53ddb
KA
7423 /***********************
7424 * 5. Setup HW Constants
7425 * ********************/
b481de9c 7426 /* Device-specific setup */
3832ec9d 7427 if (iwl3945_hw_set_hw_params(priv)) {
15b1687c 7428 IWL_ERR(priv, "failed to set hw settings\n");
b481de9c
ZY
7429 goto out_iounmap;
7430 }
7431
cee53ddb
KA
7432 /***********************
7433 * 6. Setup priv
7434 * ********************/
cee53ddb 7435
90a30a02 7436 err = iwl3945_init_drv(priv);
b481de9c 7437 if (err) {
90a30a02
KA
7438 IWL_ERR(priv, "initializing driver failed\n");
7439 goto out_free_geos;
b481de9c
ZY
7440 }
7441
978785a3
TW
7442 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
7443 priv->cfg->name);
cee53ddb
KA
7444
7445 /***********************************
7446 * 7. Initialize Module Parameters
7447 * **********************************/
7448
7449 /* Initialize module parameter values here */
7450 /* Disable radio (SW RF KILL) via parameter when loading driver */
df878d8f 7451 if (iwl3945_mod_params.disable) {
cee53ddb
KA
7452 set_bit(STATUS_RF_KILL_SW, &priv->status);
7453 IWL_DEBUG_INFO("Radio disabled.\n");
849e0dce
RC
7454 }
7455
cee53ddb
KA
7456
7457 /***********************
7458 * 8. Setup Services
7459 * ********************/
7460
7461 spin_lock_irqsave(&priv->lock, flags);
7462 iwl3945_disable_interrupts(priv);
7463 spin_unlock_irqrestore(&priv->lock, flags);
7464
2663516d
HS
7465 pci_enable_msi(priv->pci_dev);
7466
7467 err = request_irq(priv->pci_dev->irq, iwl3945_isr, IRQF_SHARED,
7468 DRV_NAME, priv);
7469 if (err) {
7470 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
7471 goto out_disable_msi;
7472 }
7473
cee53ddb 7474 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
849e0dce 7475 if (err) {
15b1687c 7476 IWL_ERR(priv, "failed to create sysfs device attributes\n");
90a30a02 7477 goto out_release_irq;
849e0dce 7478 }
849e0dce 7479
cee53ddb
KA
7480 iwl3945_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
7481 iwl3945_setup_deferred_work(priv);
7482 iwl3945_setup_rx_handlers(priv);
7483
cee53ddb 7484 /*********************************
2663516d 7485 * 9. Setup and Register mac80211
cee53ddb
KA
7486 * *******************************/
7487
5a66926a
ZY
7488 err = ieee80211_register_hw(priv->hw);
7489 if (err) {
15b1687c 7490 IWL_ERR(priv, "Failed to register network device: %d\n", err);
cee53ddb 7491 goto out_remove_sysfs;
5a66926a 7492 }
b481de9c 7493
5a66926a
ZY
7494 priv->hw->conf.beacon_int = 100;
7495 priv->mac80211_registered = 1;
cee53ddb 7496
ebef2008
AK
7497 err = iwl3945_rfkill_init(priv);
7498 if (err)
15b1687c 7499 IWL_ERR(priv, "Unable to initialize RFKILL system. "
ebef2008
AK
7500 "Ignoring error: %d\n", err);
7501
2663516d
HS
7502 /* Start monitoring the killswitch */
7503 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
7504 2 * HZ);
7505
b481de9c
ZY
7506 return 0;
7507
cee53ddb
KA
7508 out_remove_sysfs:
7509 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
849e0dce
RC
7510 out_free_geos:
7511 iwl3945_free_geos(priv);
b481de9c
ZY
7512
7513 out_release_irq:
2663516d 7514 free_irq(priv->pci_dev->irq, priv);
b481de9c
ZY
7515 destroy_workqueue(priv->workqueue);
7516 priv->workqueue = NULL;
3832ec9d 7517 iwl3945_unset_hw_params(priv);
2663516d
HS
7518 out_disable_msi:
7519 pci_disable_msi(priv->pci_dev);
b481de9c
ZY
7520 out_iounmap:
7521 pci_iounmap(pdev, priv->hw_base);
7522 out_pci_release_regions:
7523 pci_release_regions(pdev);
7524 out_pci_disable_device:
7525 pci_disable_device(pdev);
7526 pci_set_drvdata(pdev, NULL);
7527 out_ieee80211_free_hw:
7528 ieee80211_free_hw(priv->hw);
7529 out:
7530 return err;
7531}
7532
c83dbf68 7533static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
b481de9c 7534{
4a8a4322 7535 struct iwl_priv *priv = pci_get_drvdata(pdev);
0359facc 7536 unsigned long flags;
b481de9c
ZY
7537
7538 if (!priv)
7539 return;
7540
7541 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
7542
b481de9c 7543 set_bit(STATUS_EXIT_PENDING, &priv->status);
b24d22b1 7544
d552bfb6
KA
7545 if (priv->mac80211_registered) {
7546 ieee80211_unregister_hw(priv->hw);
7547 priv->mac80211_registered = 0;
7548 } else {
7549 iwl3945_down(priv);
7550 }
b481de9c 7551
0359facc
MA
7552 /* make sure we flush any pending irq or
7553 * tasklet for the driver
7554 */
7555 spin_lock_irqsave(&priv->lock, flags);
7556 iwl3945_disable_interrupts(priv);
7557 spin_unlock_irqrestore(&priv->lock, flags);
7558
7559 iwl_synchronize_irq(priv);
7560
bb8c093b 7561 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
b481de9c 7562
ebef2008 7563 iwl3945_rfkill_unregister(priv);
2663516d
HS
7564 cancel_delayed_work(&priv->rfkill_poll);
7565
bb8c093b 7566 iwl3945_dealloc_ucode_pci(priv);
b481de9c
ZY
7567
7568 if (priv->rxq.bd)
51af3d3f 7569 iwl_rx_queue_free(priv, &priv->rxq);
bb8c093b 7570 iwl3945_hw_txq_ctx_free(priv);
b481de9c 7571
3832ec9d 7572 iwl3945_unset_hw_params(priv);
bb8c093b 7573 iwl3945_clear_stations_table(priv);
b481de9c 7574
6ef89d0a
MA
7575 /*netif_stop_queue(dev); */
7576 flush_workqueue(priv->workqueue);
7577
bb8c093b 7578 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
b481de9c
ZY
7579 * priv->workqueue... so we can't take down the workqueue
7580 * until now... */
7581 destroy_workqueue(priv->workqueue);
7582 priv->workqueue = NULL;
7583
2663516d
HS
7584 free_irq(pdev->irq, priv);
7585 pci_disable_msi(pdev);
7586
b481de9c
ZY
7587 pci_iounmap(pdev, priv->hw_base);
7588 pci_release_regions(pdev);
7589 pci_disable_device(pdev);
7590 pci_set_drvdata(pdev, NULL);
7591
849e0dce
RC
7592 iwl3945_free_channel_map(priv);
7593 iwl3945_free_geos(priv);
f2c7e521 7594 kfree(priv->scan39);
b481de9c
ZY
7595 if (priv->ibss_beacon)
7596 dev_kfree_skb(priv->ibss_beacon);
7597
7598 ieee80211_free_hw(priv->hw);
7599}
7600
7601#ifdef CONFIG_PM
7602
bb8c093b 7603static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state)
b481de9c 7604{
4a8a4322 7605 struct iwl_priv *priv = pci_get_drvdata(pdev);
b481de9c 7606
e655b9f0
ZY
7607 if (priv->is_open) {
7608 set_bit(STATUS_IN_SUSPEND, &priv->status);
7609 iwl3945_mac_stop(priv->hw);
7610 priv->is_open = 1;
7611 }
2663516d
HS
7612 pci_save_state(pdev);
7613 pci_disable_device(pdev);
b481de9c
ZY
7614 pci_set_power_state(pdev, PCI_D3hot);
7615
b481de9c
ZY
7616 return 0;
7617}
7618
bb8c093b 7619static int iwl3945_pci_resume(struct pci_dev *pdev)
b481de9c 7620{
4a8a4322 7621 struct iwl_priv *priv = pci_get_drvdata(pdev);
b481de9c 7622
b481de9c 7623 pci_set_power_state(pdev, PCI_D0);
2663516d
HS
7624 pci_enable_device(pdev);
7625 pci_restore_state(pdev);
b481de9c 7626
e655b9f0
ZY
7627 if (priv->is_open)
7628 iwl3945_mac_start(priv->hw);
b481de9c 7629
e655b9f0 7630 clear_bit(STATUS_IN_SUSPEND, &priv->status);
b481de9c
ZY
7631 return 0;
7632}
7633
7634#endif /* CONFIG_PM */
7635
ebef2008 7636/*************** RFKILL FUNCTIONS **********/
80fcc9e2 7637#ifdef CONFIG_IWL3945_RFKILL
ebef2008
AK
7638/* software rf-kill from user */
7639static int iwl3945_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
7640{
4a8a4322 7641 struct iwl_priv *priv = data;
ebef2008
AK
7642 int err = 0;
7643
80fcc9e2 7644 if (!priv->rfkill)
ebef2008
AK
7645 return 0;
7646
7647 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7648 return 0;
7649
a96a27f9 7650 IWL_DEBUG_RF_KILL("we received soft RFKILL set to state %d\n", state);
ebef2008
AK
7651 mutex_lock(&priv->mutex);
7652
7653 switch (state) {
acdfe9b4 7654 case RFKILL_STATE_UNBLOCKED:
775a6e27 7655 if (iwl_is_rfkill_hw(priv)) {
ebef2008 7656 err = -EBUSY;
80fcc9e2
AG
7657 goto out_unlock;
7658 }
7659 iwl3945_radio_kill_sw(priv, 0);
ebef2008 7660 break;
acdfe9b4 7661 case RFKILL_STATE_SOFT_BLOCKED:
ebef2008 7662 iwl3945_radio_kill_sw(priv, 1);
ebef2008 7663 break;
acdfe9b4 7664 default:
39aadf8c 7665 IWL_WARN(priv, "received unexpected RFKILL state %d\n", state);
acdfe9b4 7666 break;
ebef2008 7667 }
80fcc9e2 7668out_unlock:
ebef2008
AK
7669 mutex_unlock(&priv->mutex);
7670
7671 return err;
7672}
7673
4a8a4322 7674int iwl3945_rfkill_init(struct iwl_priv *priv)
ebef2008
AK
7675{
7676 struct device *device = wiphy_dev(priv->hw->wiphy);
7677 int ret = 0;
7678
7679 BUG_ON(device == NULL);
7680
7681 IWL_DEBUG_RF_KILL("Initializing RFKILL.\n");
80fcc9e2
AG
7682 priv->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN);
7683 if (!priv->rfkill) {
15b1687c 7684 IWL_ERR(priv, "Unable to allocate rfkill device.\n");
ebef2008
AK
7685 ret = -ENOMEM;
7686 goto error;
7687 }
7688
80fcc9e2
AG
7689 priv->rfkill->name = priv->cfg->name;
7690 priv->rfkill->data = priv;
7691 priv->rfkill->state = RFKILL_STATE_UNBLOCKED;
7692 priv->rfkill->toggle_radio = iwl3945_rfkill_soft_rf_kill;
7693 priv->rfkill->user_claim_unsupported = 1;
ebef2008 7694
80fcc9e2
AG
7695 priv->rfkill->dev.class->suspend = NULL;
7696 priv->rfkill->dev.class->resume = NULL;
ebef2008 7697
80fcc9e2 7698 ret = rfkill_register(priv->rfkill);
ebef2008 7699 if (ret) {
15b1687c 7700 IWL_ERR(priv, "Unable to register rfkill: %d\n", ret);
80fcc9e2 7701 goto freed_rfkill;
ebef2008
AK
7702 }
7703
7704 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
7705 return ret;
7706
ebef2008 7707freed_rfkill:
80fcc9e2
AG
7708 if (priv->rfkill != NULL)
7709 rfkill_free(priv->rfkill);
7710 priv->rfkill = NULL;
ebef2008
AK
7711
7712error:
7713 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
7714 return ret;
7715}
7716
4a8a4322 7717void iwl3945_rfkill_unregister(struct iwl_priv *priv)
ebef2008 7718{
80fcc9e2
AG
7719 if (priv->rfkill)
7720 rfkill_unregister(priv->rfkill);
ebef2008 7721
80fcc9e2 7722 priv->rfkill = NULL;
ebef2008
AK
7723}
7724
7725/* set rf-kill to the right state. */
4a8a4322 7726void iwl3945_rfkill_set_hw_state(struct iwl_priv *priv)
ebef2008
AK
7727{
7728
80fcc9e2
AG
7729 if (!priv->rfkill)
7730 return;
7731
775a6e27 7732 if (iwl_is_rfkill_hw(priv)) {
80fcc9e2 7733 rfkill_force_state(priv->rfkill, RFKILL_STATE_HARD_BLOCKED);
ebef2008 7734 return;
80fcc9e2 7735 }
ebef2008 7736
775a6e27 7737 if (!iwl_is_rfkill_sw(priv))
80fcc9e2 7738 rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
ebef2008 7739 else
80fcc9e2 7740 rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
ebef2008
AK
7741}
7742#endif
7743
b481de9c
ZY
7744/*****************************************************************************
7745 *
7746 * driver and module entry point
7747 *
7748 *****************************************************************************/
7749
bb8c093b 7750static struct pci_driver iwl3945_driver = {
b481de9c 7751 .name = DRV_NAME,
bb8c093b
CH
7752 .id_table = iwl3945_hw_card_ids,
7753 .probe = iwl3945_pci_probe,
7754 .remove = __devexit_p(iwl3945_pci_remove),
b481de9c 7755#ifdef CONFIG_PM
bb8c093b
CH
7756 .suspend = iwl3945_pci_suspend,
7757 .resume = iwl3945_pci_resume,
b481de9c
ZY
7758#endif
7759};
7760
bb8c093b 7761static int __init iwl3945_init(void)
b481de9c
ZY
7762{
7763
7764 int ret;
7765 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
7766 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
897e1cf2
RC
7767
7768 ret = iwl3945_rate_control_register();
7769 if (ret) {
a3139c59
SO
7770 printk(KERN_ERR DRV_NAME
7771 "Unable to register rate control algorithm: %d\n", ret);
897e1cf2
RC
7772 return ret;
7773 }
7774
bb8c093b 7775 ret = pci_register_driver(&iwl3945_driver);
b481de9c 7776 if (ret) {
a3139c59 7777 printk(KERN_ERR DRV_NAME "Unable to initialize PCI module\n");
897e1cf2 7778 goto error_register;
b481de9c 7779 }
b481de9c
ZY
7780
7781 return ret;
897e1cf2 7782
897e1cf2
RC
7783error_register:
7784 iwl3945_rate_control_unregister();
7785 return ret;
b481de9c
ZY
7786}
7787
bb8c093b 7788static void __exit iwl3945_exit(void)
b481de9c 7789{
bb8c093b 7790 pci_unregister_driver(&iwl3945_driver);
897e1cf2 7791 iwl3945_rate_control_unregister();
b481de9c
ZY
7792}
7793
a0987a8d 7794MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
25cb6cad 7795
df878d8f 7796module_param_named(antenna, iwl3945_mod_params.antenna, int, 0444);
b481de9c 7797MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
df878d8f 7798module_param_named(disable, iwl3945_mod_params.disable, int, 0444);
b481de9c 7799MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
9c74d9fb
SO
7800module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, 0444);
7801MODULE_PARM_DESC(swcrypto,
7802 "using software crypto (default 1 [software])\n");
df878d8f 7803module_param_named(debug, iwl3945_mod_params.debug, uint, 0444);
b481de9c 7804MODULE_PARM_DESC(debug, "debug output mask");
df878d8f 7805module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, int, 0444);
b481de9c
ZY
7806MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
7807
df878d8f 7808module_param_named(queues_num, iwl3945_mod_params.num_of_queues, int, 0444);
b481de9c
ZY
7809MODULE_PARM_DESC(queues_num, "number of hw queues.");
7810
bb8c093b
CH
7811module_exit(iwl3945_exit);
7812module_init(iwl3945_init);
This page took 1.034183 seconds and 5 git commands to generate.