[NET_SCHED]: explict hold dev tx lock
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl3945-base.c
CommitLineData
b481de9c
ZY
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30/*
31 * NOTE: This file (iwl-base.c) is used to build to multiple hardware targets
32 * by defining IWL to either 3945 or 4965. The Makefile used when building
33 * the base targets will create base-3945.o and base-4965.o
34 *
35 * The eventual goal is to move as many of the #if IWL / #endif blocks out of
36 * this file and into the hardware specific implementation files (iwl-XXXX.c)
37 * and leave only the common (non #ifdef sprinkled) code in this file
38 */
39
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/version.h>
43#include <linux/init.h>
44#include <linux/pci.h>
45#include <linux/dma-mapping.h>
46#include <linux/delay.h>
47#include <linux/skbuff.h>
48#include <linux/netdevice.h>
49#include <linux/wireless.h>
50#include <linux/firmware.h>
51#include <linux/skbuff.h>
52#include <linux/netdevice.h>
53#include <linux/etherdevice.h>
54#include <linux/if_arp.h>
55
56#include <net/ieee80211_radiotap.h>
57#include <net/mac80211.h>
58
59#include <asm/div64.h>
60
61#include "iwlwifi.h"
62#include "iwl-3945.h"
63#include "iwl-helpers.h"
64
65#ifdef CONFIG_IWLWIFI_DEBUG
66u32 iwl_debug_level;
67#endif
68
69/******************************************************************************
70 *
71 * module boiler plate
72 *
73 ******************************************************************************/
74
75/* module parameters */
76int iwl_param_disable_hw_scan;
77int iwl_param_debug;
78int iwl_param_disable; /* def: enable radio */
79int iwl_param_antenna; /* def: 0 = both antennas (use diversity) */
80int iwl_param_hwcrypto; /* def: using software encryption */
81int iwl_param_qos_enable = 1;
82int iwl_param_queues_num = IWL_MAX_NUM_QUEUES;
83
84/*
85 * module name, copyright, version, etc.
86 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
87 */
88
89#define DRV_DESCRIPTION \
90"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
91
92#ifdef CONFIG_IWLWIFI_DEBUG
93#define VD "d"
94#else
95#define VD
96#endif
97
98#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
99#define VS "s"
100#else
101#define VS
102#endif
103
104#define IWLWIFI_VERSION "0.1.15k" VD VS
105#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation"
106#define DRV_VERSION IWLWIFI_VERSION
107
108/* Change firmware file name, using "-" and incrementing number,
109 * *only* when uCode interface or architecture changes so that it
110 * is not compatible with earlier drivers.
111 * This number will also appear in << 8 position of 1st dword of uCode file */
112#define IWL3945_UCODE_API "-1"
113
114MODULE_DESCRIPTION(DRV_DESCRIPTION);
115MODULE_VERSION(DRV_VERSION);
116MODULE_AUTHOR(DRV_COPYRIGHT);
117MODULE_LICENSE("GPL");
118
119__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
120{
121 u16 fc = le16_to_cpu(hdr->frame_control);
122 int hdr_len = ieee80211_get_hdrlen(fc);
123
124 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
125 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
126 return NULL;
127}
128
129static const struct ieee80211_hw_mode *iwl_get_hw_mode(
130 struct iwl_priv *priv, int mode)
131{
132 int i;
133
134 for (i = 0; i < 3; i++)
135 if (priv->modes[i].mode == mode)
136 return &priv->modes[i];
137
138 return NULL;
139}
140
141static int iwl_is_empty_essid(const char *essid, int essid_len)
142{
143 /* Single white space is for Linksys APs */
144 if (essid_len == 1 && essid[0] == ' ')
145 return 1;
146
147 /* Otherwise, if the entire essid is 0, we assume it is hidden */
148 while (essid_len) {
149 essid_len--;
150 if (essid[essid_len] != '\0')
151 return 0;
152 }
153
154 return 1;
155}
156
157static const char *iwl_escape_essid(const char *essid, u8 essid_len)
158{
159 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
160 const char *s = essid;
161 char *d = escaped;
162
163 if (iwl_is_empty_essid(essid, essid_len)) {
164 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
165 return escaped;
166 }
167
168 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
169 while (essid_len--) {
170 if (*s == '\0') {
171 *d++ = '\\';
172 *d++ = '0';
173 s++;
174 } else
175 *d++ = *s++;
176 }
177 *d = '\0';
178 return escaped;
179}
180
181static void iwl_print_hex_dump(int level, void *p, u32 len)
182{
183#ifdef CONFIG_IWLWIFI_DEBUG
184 if (!(iwl_debug_level & level))
185 return;
186
187 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
188 p, len, 1);
189#endif
190}
191
192/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
193 * DMA services
194 *
195 * Theory of operation
196 *
197 * A queue is a circular buffers with 'Read' and 'Write' pointers.
198 * 2 empty entries always kept in the buffer to protect from overflow.
199 *
200 * For Tx queue, there are low mark and high mark limits. If, after queuing
201 * the packet for Tx, free space become < low mark, Tx queue stopped. When
202 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
203 * Tx queue resumed.
204 *
205 * The IPW operates with six queues, one receive queue in the device's
206 * sram, one transmit queue for sending commands to the device firmware,
207 * and four transmit queues for data.
208 ***************************************************/
209
210static int iwl_queue_space(const struct iwl_queue *q)
211{
212 int s = q->last_used - q->first_empty;
213
214 if (q->last_used > q->first_empty)
215 s -= q->n_bd;
216
217 if (s <= 0)
218 s += q->n_window;
219 /* keep some reserve to not confuse empty and full situations */
220 s -= 2;
221 if (s < 0)
222 s = 0;
223 return s;
224}
225
226/* XXX: n_bd must be power-of-two size */
227static inline int iwl_queue_inc_wrap(int index, int n_bd)
228{
229 return ++index & (n_bd - 1);
230}
231
232/* XXX: n_bd must be power-of-two size */
233static inline int iwl_queue_dec_wrap(int index, int n_bd)
234{
235 return --index & (n_bd - 1);
236}
237
238static inline int x2_queue_used(const struct iwl_queue *q, int i)
239{
240 return q->first_empty > q->last_used ?
241 (i >= q->last_used && i < q->first_empty) :
242 !(i < q->last_used && i >= q->first_empty);
243}
244
245static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
246{
247 if (is_huge)
248 return q->n_window;
249
250 return index & (q->n_window - 1);
251}
252
253static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
254 int count, int slots_num, u32 id)
255{
256 q->n_bd = count;
257 q->n_window = slots_num;
258 q->id = id;
259
260 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
261 * and iwl_queue_dec_wrap are broken. */
262 BUG_ON(!is_power_of_2(count));
263
264 /* slots_num must be power-of-two size, otherwise
265 * get_cmd_index is broken. */
266 BUG_ON(!is_power_of_2(slots_num));
267
268 q->low_mark = q->n_window / 4;
269 if (q->low_mark < 4)
270 q->low_mark = 4;
271
272 q->high_mark = q->n_window / 8;
273 if (q->high_mark < 2)
274 q->high_mark = 2;
275
276 q->first_empty = q->last_used = 0;
277
278 return 0;
279}
280
281static int iwl_tx_queue_alloc(struct iwl_priv *priv,
282 struct iwl_tx_queue *txq, u32 id)
283{
284 struct pci_dev *dev = priv->pci_dev;
285
286 if (id != IWL_CMD_QUEUE_NUM) {
287 txq->txb = kmalloc(sizeof(txq->txb[0]) *
288 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
289 if (!txq->txb) {
290 IWL_ERROR("kmalloc for auxilary BD "
291 "structures failed\n");
292 goto error;
293 }
294 } else
295 txq->txb = NULL;
296
297 txq->bd = pci_alloc_consistent(dev,
298 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
299 &txq->q.dma_addr);
300
301 if (!txq->bd) {
302 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
303 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
304 goto error;
305 }
306 txq->q.id = id;
307
308 return 0;
309
310 error:
311 if (txq->txb) {
312 kfree(txq->txb);
313 txq->txb = NULL;
314 }
315
316 return -ENOMEM;
317}
318
319int iwl_tx_queue_init(struct iwl_priv *priv,
320 struct iwl_tx_queue *txq, int slots_num, u32 txq_id)
321{
322 struct pci_dev *dev = priv->pci_dev;
323 int len;
324 int rc = 0;
325
326 /* alocate command space + one big command for scan since scan
327 * command is very huge the system will not have two scan at the
328 * same time */
329 len = sizeof(struct iwl_cmd) * slots_num;
330 if (txq_id == IWL_CMD_QUEUE_NUM)
331 len += IWL_MAX_SCAN_SIZE;
332 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
333 if (!txq->cmd)
334 return -ENOMEM;
335
336 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
337 if (rc) {
338 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
339
340 return -ENOMEM;
341 }
342 txq->need_update = 0;
343
344 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
345 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
346 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
347 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
348
349 iwl_hw_tx_queue_init(priv, txq);
350
351 return 0;
352}
353
354/**
355 * iwl_tx_queue_free - Deallocate DMA queue.
356 * @txq: Transmit queue to deallocate.
357 *
358 * Empty queue by removing and destroying all BD's.
359 * Free all buffers. txq itself is not freed.
360 *
361 */
362void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
363{
364 struct iwl_queue *q = &txq->q;
365 struct pci_dev *dev = priv->pci_dev;
366 int len;
367
368 if (q->n_bd == 0)
369 return;
370
371 /* first, empty all BD's */
372 for (; q->first_empty != q->last_used;
373 q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd))
374 iwl_hw_txq_free_tfd(priv, txq);
375
376 len = sizeof(struct iwl_cmd) * q->n_window;
377 if (q->id == IWL_CMD_QUEUE_NUM)
378 len += IWL_MAX_SCAN_SIZE;
379
380 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
381
382 /* free buffers belonging to queue itself */
383 if (txq->q.n_bd)
384 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
385 txq->q.n_bd, txq->bd, txq->q.dma_addr);
386
387 if (txq->txb) {
388 kfree(txq->txb);
389 txq->txb = NULL;
390 }
391
392 /* 0 fill whole structure */
393 memset(txq, 0, sizeof(*txq));
394}
395
396const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
397
398/*************** STATION TABLE MANAGEMENT ****
399 *
400 * NOTE: This needs to be overhauled to better synchronize between
401 * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c
402 *
403 * mac80211 should also be examined to determine if sta_info is duplicating
404 * the functionality provided here
405 */
406
407/**************************************************************/
408static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
409{
410 int index = IWL_INVALID_STATION;
411 int i;
412 unsigned long flags;
413
414 spin_lock_irqsave(&priv->sta_lock, flags);
415
416 if (is_ap)
417 index = IWL_AP_ID;
418 else if (is_broadcast_ether_addr(addr))
419 index = priv->hw_setting.bcast_sta_id;
420 else
421 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
422 if (priv->stations[i].used &&
423 !compare_ether_addr(priv->stations[i].sta.sta.addr,
424 addr)) {
425 index = i;
426 break;
427 }
428
429 if (unlikely(index == IWL_INVALID_STATION))
430 goto out;
431
432 if (priv->stations[index].used) {
433 priv->stations[index].used = 0;
434 priv->num_stations--;
435 }
436
437 BUG_ON(priv->num_stations < 0);
438
439out:
440 spin_unlock_irqrestore(&priv->sta_lock, flags);
441 return 0;
442}
443
444static void iwl_clear_stations_table(struct iwl_priv *priv)
445{
446 unsigned long flags;
447
448 spin_lock_irqsave(&priv->sta_lock, flags);
449
450 priv->num_stations = 0;
451 memset(priv->stations, 0, sizeof(priv->stations));
452
453 spin_unlock_irqrestore(&priv->sta_lock, flags);
454}
455
456
457u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
458{
459 int i;
460 int index = IWL_INVALID_STATION;
461 struct iwl_station_entry *station;
462 unsigned long flags_spin;
0795af57 463 DECLARE_MAC_BUF(mac);
b481de9c
ZY
464
465 spin_lock_irqsave(&priv->sta_lock, flags_spin);
466 if (is_ap)
467 index = IWL_AP_ID;
468 else if (is_broadcast_ether_addr(addr))
469 index = priv->hw_setting.bcast_sta_id;
470 else
471 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
472 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
473 addr)) {
474 index = i;
475 break;
476 }
477
478 if (!priv->stations[i].used &&
479 index == IWL_INVALID_STATION)
480 index = i;
481 }
482
483 /* These twh conditions has the same outcome but keep them separate
484 since they have different meaning */
485 if (unlikely(index == IWL_INVALID_STATION)) {
486 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
487 return index;
488 }
489
490 if (priv->stations[index].used &&
491 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
492 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
493 return index;
494 }
495
0795af57 496 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
b481de9c
ZY
497 station = &priv->stations[index];
498 station->used = 1;
499 priv->num_stations++;
500
501 memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
502 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
503 station->sta.mode = 0;
504 station->sta.sta.sta_id = index;
505 station->sta.station_flags = 0;
506
507 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
508 iwl_send_add_station(priv, &station->sta, flags);
509 return index;
510
511}
512
513/*************** DRIVER STATUS FUNCTIONS *****/
514
515static inline int iwl_is_ready(struct iwl_priv *priv)
516{
517 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
518 * set but EXIT_PENDING is not */
519 return test_bit(STATUS_READY, &priv->status) &&
520 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
521 !test_bit(STATUS_EXIT_PENDING, &priv->status);
522}
523
524static inline int iwl_is_alive(struct iwl_priv *priv)
525{
526 return test_bit(STATUS_ALIVE, &priv->status);
527}
528
529static inline int iwl_is_init(struct iwl_priv *priv)
530{
531 return test_bit(STATUS_INIT, &priv->status);
532}
533
534static inline int iwl_is_rfkill(struct iwl_priv *priv)
535{
536 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
537 test_bit(STATUS_RF_KILL_SW, &priv->status);
538}
539
540static inline int iwl_is_ready_rf(struct iwl_priv *priv)
541{
542
543 if (iwl_is_rfkill(priv))
544 return 0;
545
546 return iwl_is_ready(priv);
547}
548
549/*************** HOST COMMAND QUEUE FUNCTIONS *****/
550
551#define IWL_CMD(x) case x : return #x
552
553static const char *get_cmd_string(u8 cmd)
554{
555 switch (cmd) {
556 IWL_CMD(REPLY_ALIVE);
557 IWL_CMD(REPLY_ERROR);
558 IWL_CMD(REPLY_RXON);
559 IWL_CMD(REPLY_RXON_ASSOC);
560 IWL_CMD(REPLY_QOS_PARAM);
561 IWL_CMD(REPLY_RXON_TIMING);
562 IWL_CMD(REPLY_ADD_STA);
563 IWL_CMD(REPLY_REMOVE_STA);
564 IWL_CMD(REPLY_REMOVE_ALL_STA);
565 IWL_CMD(REPLY_3945_RX);
566 IWL_CMD(REPLY_TX);
567 IWL_CMD(REPLY_RATE_SCALE);
568 IWL_CMD(REPLY_LEDS_CMD);
569 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
570 IWL_CMD(RADAR_NOTIFICATION);
571 IWL_CMD(REPLY_QUIET_CMD);
572 IWL_CMD(REPLY_CHANNEL_SWITCH);
573 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
574 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
575 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
576 IWL_CMD(POWER_TABLE_CMD);
577 IWL_CMD(PM_SLEEP_NOTIFICATION);
578 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
579 IWL_CMD(REPLY_SCAN_CMD);
580 IWL_CMD(REPLY_SCAN_ABORT_CMD);
581 IWL_CMD(SCAN_START_NOTIFICATION);
582 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
583 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
584 IWL_CMD(BEACON_NOTIFICATION);
585 IWL_CMD(REPLY_TX_BEACON);
586 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
587 IWL_CMD(QUIET_NOTIFICATION);
588 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
589 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
590 IWL_CMD(REPLY_BT_CONFIG);
591 IWL_CMD(REPLY_STATISTICS_CMD);
592 IWL_CMD(STATISTICS_NOTIFICATION);
593 IWL_CMD(REPLY_CARD_STATE_CMD);
594 IWL_CMD(CARD_STATE_NOTIFICATION);
595 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
596 default:
597 return "UNKNOWN";
598
599 }
600}
601
602#define HOST_COMPLETE_TIMEOUT (HZ / 2)
603
604/**
605 * iwl_enqueue_hcmd - enqueue a uCode command
606 * @priv: device private data point
607 * @cmd: a point to the ucode command structure
608 *
609 * The function returns < 0 values to indicate the operation is
610 * failed. On success, it turns the index (> 0) of command in the
611 * command queue.
612 */
613static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
614{
615 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
616 struct iwl_queue *q = &txq->q;
617 struct iwl_tfd_frame *tfd;
618 u32 *control_flags;
619 struct iwl_cmd *out_cmd;
620 u32 idx;
621 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
622 dma_addr_t phys_addr;
623 int pad;
624 u16 count;
625 int ret;
626 unsigned long flags;
627
628 /* If any of the command structures end up being larger than
629 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
630 * we will need to increase the size of the TFD entries */
631 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
632 !(cmd->meta.flags & CMD_SIZE_HUGE));
633
634 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
635 IWL_ERROR("No space for Tx\n");
636 return -ENOSPC;
637 }
638
639 spin_lock_irqsave(&priv->hcmd_lock, flags);
640
641 tfd = &txq->bd[q->first_empty];
642 memset(tfd, 0, sizeof(*tfd));
643
644 control_flags = (u32 *) tfd;
645
646 idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE);
647 out_cmd = &txq->cmd[idx];
648
649 out_cmd->hdr.cmd = cmd->id;
650 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
651 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
652
653 /* At this point, the out_cmd now has all of the incoming cmd
654 * information */
655
656 out_cmd->hdr.flags = 0;
657 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
658 INDEX_TO_SEQ(q->first_empty));
659 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
660 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
661
662 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
663 offsetof(struct iwl_cmd, hdr);
664 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
665
666 pad = U32_PAD(cmd->len);
667 count = TFD_CTL_COUNT_GET(*control_flags);
668 *control_flags = TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad);
669
670 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
671 "%d bytes at %d[%d]:%d\n",
672 get_cmd_string(out_cmd->hdr.cmd),
673 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
674 fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM);
675
676 txq->need_update = 1;
677 q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
678 ret = iwl_tx_queue_update_write_ptr(priv, txq);
679
680 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
681 return ret ? ret : idx;
682}
683
684int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
685{
686 int ret;
687
688 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
689
690 /* An asynchronous command can not expect an SKB to be set. */
691 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
692
693 /* An asynchronous command MUST have a callback. */
694 BUG_ON(!cmd->meta.u.callback);
695
696 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
697 return -EBUSY;
698
699 ret = iwl_enqueue_hcmd(priv, cmd);
700 if (ret < 0) {
701 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
702 get_cmd_string(cmd->id), ret);
703 return ret;
704 }
705 return 0;
706}
707
708int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
709{
710 int cmd_idx;
711 int ret;
712 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
713
714 BUG_ON(cmd->meta.flags & CMD_ASYNC);
715
716 /* A synchronous command can not have a callback set. */
717 BUG_ON(cmd->meta.u.callback != NULL);
718
719 if (atomic_xchg(&entry, 1)) {
720 IWL_ERROR("Error sending %s: Already sending a host command\n",
721 get_cmd_string(cmd->id));
722 return -EBUSY;
723 }
724
725 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
726
727 if (cmd->meta.flags & CMD_WANT_SKB)
728 cmd->meta.source = &cmd->meta;
729
730 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
731 if (cmd_idx < 0) {
732 ret = cmd_idx;
733 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
734 get_cmd_string(cmd->id), ret);
735 goto out;
736 }
737
738 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
739 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
740 HOST_COMPLETE_TIMEOUT);
741 if (!ret) {
742 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
743 IWL_ERROR("Error sending %s: time out after %dms.\n",
744 get_cmd_string(cmd->id),
745 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
746
747 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
748 ret = -ETIMEDOUT;
749 goto cancel;
750 }
751 }
752
753 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
754 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
755 get_cmd_string(cmd->id));
756 ret = -ECANCELED;
757 goto fail;
758 }
759 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
760 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
761 get_cmd_string(cmd->id));
762 ret = -EIO;
763 goto fail;
764 }
765 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
766 IWL_ERROR("Error: Response NULL in '%s'\n",
767 get_cmd_string(cmd->id));
768 ret = -EIO;
769 goto out;
770 }
771
772 ret = 0;
773 goto out;
774
775cancel:
776 if (cmd->meta.flags & CMD_WANT_SKB) {
777 struct iwl_cmd *qcmd;
778
779 /* Cancel the CMD_WANT_SKB flag for the cmd in the
780 * TX cmd queue. Otherwise in case the cmd comes
781 * in later, it will possibly set an invalid
782 * address (cmd->meta.source). */
783 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
784 qcmd->meta.flags &= ~CMD_WANT_SKB;
785 }
786fail:
787 if (cmd->meta.u.skb) {
788 dev_kfree_skb_any(cmd->meta.u.skb);
789 cmd->meta.u.skb = NULL;
790 }
791out:
792 atomic_set(&entry, 0);
793 return ret;
794}
795
796int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
797{
798 /* A command can not be asynchronous AND expect an SKB to be set. */
799 BUG_ON((cmd->meta.flags & CMD_ASYNC) &&
800 (cmd->meta.flags & CMD_WANT_SKB));
801
802 if (cmd->meta.flags & CMD_ASYNC)
803 return iwl_send_cmd_async(priv, cmd);
804
805 return iwl_send_cmd_sync(priv, cmd);
806}
807
808int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
809{
810 struct iwl_host_cmd cmd = {
811 .id = id,
812 .len = len,
813 .data = data,
814 };
815
816 return iwl_send_cmd_sync(priv, &cmd);
817}
818
819static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val)
820{
821 struct iwl_host_cmd cmd = {
822 .id = id,
823 .len = sizeof(val),
824 .data = &val,
825 };
826
827 return iwl_send_cmd_sync(priv, &cmd);
828}
829
830int iwl_send_statistics_request(struct iwl_priv *priv)
831{
832 return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
833}
834
835/**
836 * iwl_rxon_add_station - add station into station table.
837 *
838 * there is only one AP station with id= IWL_AP_ID
839 * NOTE: mutex must be held before calling the this fnction
840*/
841static int iwl_rxon_add_station(struct iwl_priv *priv,
842 const u8 *addr, int is_ap)
843{
844 u8 rc;
845
846 /* Remove this station if it happens to already exist */
847 iwl_remove_station(priv, addr, is_ap);
848
849 rc = iwl_add_station(priv, addr, is_ap, 0);
850
851 return rc;
852}
853
854/**
855 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
856 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
857 * @channel: Any channel valid for the requested phymode
858
859 * In addition to setting the staging RXON, priv->phymode is also set.
860 *
861 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
862 * in the staging RXON flag structure based on the phymode
863 */
864static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel)
865{
866 if (!iwl_get_channel_info(priv, phymode, channel)) {
867 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
868 channel, phymode);
869 return -EINVAL;
870 }
871
872 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
873 (priv->phymode == phymode))
874 return 0;
875
876 priv->staging_rxon.channel = cpu_to_le16(channel);
877 if (phymode == MODE_IEEE80211A)
878 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
879 else
880 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
881
882 priv->phymode = phymode;
883
884 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
885
886 return 0;
887}
888
889/**
890 * iwl_check_rxon_cmd - validate RXON structure is valid
891 *
892 * NOTE: This is really only useful during development and can eventually
893 * be #ifdef'd out once the driver is stable and folks aren't actively
894 * making changes
895 */
896static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
897{
898 int error = 0;
899 int counter = 1;
900
901 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
902 error |= le32_to_cpu(rxon->flags &
903 (RXON_FLG_TGJ_NARROW_BAND_MSK |
904 RXON_FLG_RADAR_DETECT_MSK));
905 if (error)
906 IWL_WARNING("check 24G fields %d | %d\n",
907 counter++, error);
908 } else {
909 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
910 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
911 if (error)
912 IWL_WARNING("check 52 fields %d | %d\n",
913 counter++, error);
914 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
915 if (error)
916 IWL_WARNING("check 52 CCK %d | %d\n",
917 counter++, error);
918 }
919 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
920 if (error)
921 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
922
923 /* make sure basic rates 6Mbps and 1Mbps are supported */
924 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
925 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
926 if (error)
927 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
928
929 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
930 if (error)
931 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
932
933 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
934 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
935 if (error)
936 IWL_WARNING("check CCK and short slot %d | %d\n",
937 counter++, error);
938
939 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
940 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
941 if (error)
942 IWL_WARNING("check CCK & auto detect %d | %d\n",
943 counter++, error);
944
945 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
946 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
947 if (error)
948 IWL_WARNING("check TGG and auto detect %d | %d\n",
949 counter++, error);
950
951 if ((rxon->flags & RXON_FLG_DIS_DIV_MSK))
952 error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK |
953 RXON_FLG_ANT_A_MSK)) == 0);
954 if (error)
955 IWL_WARNING("check antenna %d %d\n", counter++, error);
956
957 if (error)
958 IWL_WARNING("Tuning to channel %d\n",
959 le16_to_cpu(rxon->channel));
960
961 if (error) {
962 IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n");
963 return -1;
964 }
965 return 0;
966}
967
968/**
969 * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit
970 * @priv: staging_rxon is comapred to active_rxon
971 *
972 * If the RXON structure is changing sufficient to require a new
973 * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1
974 * to indicate a new tune is required.
975 */
976static int iwl_full_rxon_required(struct iwl_priv *priv)
977{
978
979 /* These items are only settable from the full RXON command */
980 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
981 compare_ether_addr(priv->staging_rxon.bssid_addr,
982 priv->active_rxon.bssid_addr) ||
983 compare_ether_addr(priv->staging_rxon.node_addr,
984 priv->active_rxon.node_addr) ||
985 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
986 priv->active_rxon.wlap_bssid_addr) ||
987 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
988 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
989 (priv->staging_rxon.air_propagation !=
990 priv->active_rxon.air_propagation) ||
991 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
992 return 1;
993
994 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
995 * be updated with the RXON_ASSOC command -- however only some
996 * flag transitions are allowed using RXON_ASSOC */
997
998 /* Check if we are not switching bands */
999 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
1000 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
1001 return 1;
1002
1003 /* Check if we are switching association toggle */
1004 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
1005 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1006 return 1;
1007
1008 return 0;
1009}
1010
1011static int iwl_send_rxon_assoc(struct iwl_priv *priv)
1012{
1013 int rc = 0;
1014 struct iwl_rx_packet *res = NULL;
1015 struct iwl_rxon_assoc_cmd rxon_assoc;
1016 struct iwl_host_cmd cmd = {
1017 .id = REPLY_RXON_ASSOC,
1018 .len = sizeof(rxon_assoc),
1019 .meta.flags = CMD_WANT_SKB,
1020 .data = &rxon_assoc,
1021 };
1022 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1023 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
1024
1025 if ((rxon1->flags == rxon2->flags) &&
1026 (rxon1->filter_flags == rxon2->filter_flags) &&
1027 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1028 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1029 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1030 return 0;
1031 }
1032
1033 rxon_assoc.flags = priv->staging_rxon.flags;
1034 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1035 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1036 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1037 rxon_assoc.reserved = 0;
1038
1039 rc = iwl_send_cmd_sync(priv, &cmd);
1040 if (rc)
1041 return rc;
1042
1043 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1044 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1045 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1046 rc = -EIO;
1047 }
1048
1049 priv->alloc_rxb_skb--;
1050 dev_kfree_skb_any(cmd.meta.u.skb);
1051
1052 return rc;
1053}
1054
1055/**
1056 * iwl_commit_rxon - commit staging_rxon to hardware
1057 *
1058 * The RXON command in staging_rxon is commited to the hardware and
1059 * the active_rxon structure is updated with the new data. This
1060 * function correctly transitions out of the RXON_ASSOC_MSK state if
1061 * a HW tune is required based on the RXON structure changes.
1062 */
1063static int iwl_commit_rxon(struct iwl_priv *priv)
1064{
1065 /* cast away the const for active_rxon in this function */
1066 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
1067 int rc = 0;
0795af57 1068 DECLARE_MAC_BUF(mac);
b481de9c
ZY
1069
1070 if (!iwl_is_alive(priv))
1071 return -1;
1072
1073 /* always get timestamp with Rx frame */
1074 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1075
1076 /* select antenna */
1077 priv->staging_rxon.flags &=
1078 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1079 priv->staging_rxon.flags |= iwl3945_get_antenna_flags(priv);
1080
1081 rc = iwl_check_rxon_cmd(&priv->staging_rxon);
1082 if (rc) {
1083 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
1084 return -EINVAL;
1085 }
1086
1087 /* If we don't need to send a full RXON, we can use
1088 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1089 * and other flags for the current radio configuration. */
1090 if (!iwl_full_rxon_required(priv)) {
1091 rc = iwl_send_rxon_assoc(priv);
1092 if (rc) {
1093 IWL_ERROR("Error setting RXON_ASSOC "
1094 "configuration (%d).\n", rc);
1095 return rc;
1096 }
1097
1098 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1099
1100 return 0;
1101 }
1102
1103 /* If we are currently associated and the new config requires
1104 * an RXON_ASSOC and the new config wants the associated mask enabled,
1105 * we must clear the associated from the active configuration
1106 * before we apply the new config */
1107 if (iwl_is_associated(priv) &&
1108 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1109 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1110 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1111
1112 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1113 sizeof(struct iwl_rxon_cmd),
1114 &priv->active_rxon);
1115
1116 /* If the mask clearing failed then we set
1117 * active_rxon back to what it was previously */
1118 if (rc) {
1119 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1120 IWL_ERROR("Error clearing ASSOC_MSK on current "
1121 "configuration (%d).\n", rc);
1122 return rc;
1123 }
1124
1125 /* The RXON bit toggling will have cleared out the
1126 * station table in the uCode, so blank it in the driver
1127 * as well */
1128 iwl_clear_stations_table(priv);
1129 } else if (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) {
1130 /* When switching from non-associated to associated, the
1131 * uCode clears out the station table; so clear it in the
1132 * driver as well */
1133 iwl_clear_stations_table(priv);
1134 }
1135
1136 IWL_DEBUG_INFO("Sending RXON\n"
1137 "* with%s RXON_FILTER_ASSOC_MSK\n"
1138 "* channel = %d\n"
0795af57 1139 "* bssid = %s\n",
b481de9c
ZY
1140 ((priv->staging_rxon.filter_flags &
1141 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1142 le16_to_cpu(priv->staging_rxon.channel),
0795af57 1143 print_mac(mac, priv->staging_rxon.bssid_addr));
b481de9c
ZY
1144
1145 /* Apply the new configuration */
1146 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1147 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
1148 if (rc) {
1149 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1150 return rc;
1151 }
1152
1153 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1154
1155 /* If we issue a new RXON command which required a tune then we must
1156 * send a new TXPOWER command or we won't be able to Tx any frames */
1157 rc = iwl_hw_reg_send_txpower(priv);
1158 if (rc) {
1159 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1160 return rc;
1161 }
1162
1163 /* Add the broadcast address so we can send broadcast frames */
1164 if (iwl_rxon_add_station(priv, BROADCAST_ADDR, 0) ==
1165 IWL_INVALID_STATION) {
1166 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1167 return -EIO;
1168 }
1169
1170 /* If we have set the ASSOC_MSK and we are in BSS mode then
1171 * add the IWL_AP_ID to the station rate table */
1172 if (iwl_is_associated(priv) &&
1173 (priv->iw_mode == IEEE80211_IF_TYPE_STA))
1174 if (iwl_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
1175 == IWL_INVALID_STATION) {
1176 IWL_ERROR("Error adding AP address for transmit.\n");
1177 return -EIO;
1178 }
1179
1180 /* Init the hardware's rate fallback order based on the
1181 * phymode */
1182 rc = iwl3945_init_hw_rate_table(priv);
1183 if (rc) {
1184 IWL_ERROR("Error setting HW rate table: %02X\n", rc);
1185 return -EIO;
1186 }
1187
1188 return 0;
1189}
1190
1191static int iwl_send_bt_config(struct iwl_priv *priv)
1192{
1193 struct iwl_bt_cmd bt_cmd = {
1194 .flags = 3,
1195 .lead_time = 0xAA,
1196 .max_kill = 1,
1197 .kill_ack_mask = 0,
1198 .kill_cts_mask = 0,
1199 };
1200
1201 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1202 sizeof(struct iwl_bt_cmd), &bt_cmd);
1203}
1204
1205static int iwl_send_scan_abort(struct iwl_priv *priv)
1206{
1207 int rc = 0;
1208 struct iwl_rx_packet *res;
1209 struct iwl_host_cmd cmd = {
1210 .id = REPLY_SCAN_ABORT_CMD,
1211 .meta.flags = CMD_WANT_SKB,
1212 };
1213
1214 /* If there isn't a scan actively going on in the hardware
1215 * then we are in between scan bands and not actually
1216 * actively scanning, so don't send the abort command */
1217 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1218 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1219 return 0;
1220 }
1221
1222 rc = iwl_send_cmd_sync(priv, &cmd);
1223 if (rc) {
1224 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1225 return rc;
1226 }
1227
1228 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1229 if (res->u.status != CAN_ABORT_STATUS) {
1230 /* The scan abort will return 1 for success or
1231 * 2 for "failure". A failure condition can be
1232 * due to simply not being in an active scan which
1233 * can occur if we send the scan abort before we
1234 * the microcode has notified us that a scan is
1235 * completed. */
1236 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1237 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1238 clear_bit(STATUS_SCAN_HW, &priv->status);
1239 }
1240
1241 dev_kfree_skb_any(cmd.meta.u.skb);
1242
1243 return rc;
1244}
1245
1246static int iwl_card_state_sync_callback(struct iwl_priv *priv,
1247 struct iwl_cmd *cmd,
1248 struct sk_buff *skb)
1249{
1250 return 1;
1251}
1252
1253/*
1254 * CARD_STATE_CMD
1255 *
1256 * Use: Sets the internal card state to enable, disable, or halt
1257 *
1258 * When in the 'enable' state the card operates as normal.
1259 * When in the 'disable' state, the card enters into a low power mode.
1260 * When in the 'halt' state, the card is shut down and must be fully
1261 * restarted to come back on.
1262 */
1263static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1264{
1265 struct iwl_host_cmd cmd = {
1266 .id = REPLY_CARD_STATE_CMD,
1267 .len = sizeof(u32),
1268 .data = &flags,
1269 .meta.flags = meta_flag,
1270 };
1271
1272 if (meta_flag & CMD_ASYNC)
1273 cmd.meta.u.callback = iwl_card_state_sync_callback;
1274
1275 return iwl_send_cmd(priv, &cmd);
1276}
1277
1278static int iwl_add_sta_sync_callback(struct iwl_priv *priv,
1279 struct iwl_cmd *cmd, struct sk_buff *skb)
1280{
1281 struct iwl_rx_packet *res = NULL;
1282
1283 if (!skb) {
1284 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1285 return 1;
1286 }
1287
1288 res = (struct iwl_rx_packet *)skb->data;
1289 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1290 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1291 res->hdr.flags);
1292 return 1;
1293 }
1294
1295 switch (res->u.add_sta.status) {
1296 case ADD_STA_SUCCESS_MSK:
1297 break;
1298 default:
1299 break;
1300 }
1301
1302 /* We didn't cache the SKB; let the caller free it */
1303 return 1;
1304}
1305
1306int iwl_send_add_station(struct iwl_priv *priv,
1307 struct iwl_addsta_cmd *sta, u8 flags)
1308{
1309 struct iwl_rx_packet *res = NULL;
1310 int rc = 0;
1311 struct iwl_host_cmd cmd = {
1312 .id = REPLY_ADD_STA,
1313 .len = sizeof(struct iwl_addsta_cmd),
1314 .meta.flags = flags,
1315 .data = sta,
1316 };
1317
1318 if (flags & CMD_ASYNC)
1319 cmd.meta.u.callback = iwl_add_sta_sync_callback;
1320 else
1321 cmd.meta.flags |= CMD_WANT_SKB;
1322
1323 rc = iwl_send_cmd(priv, &cmd);
1324
1325 if (rc || (flags & CMD_ASYNC))
1326 return rc;
1327
1328 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1329 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1330 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1331 res->hdr.flags);
1332 rc = -EIO;
1333 }
1334
1335 if (rc == 0) {
1336 switch (res->u.add_sta.status) {
1337 case ADD_STA_SUCCESS_MSK:
1338 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1339 break;
1340 default:
1341 rc = -EIO;
1342 IWL_WARNING("REPLY_ADD_STA failed\n");
1343 break;
1344 }
1345 }
1346
1347 priv->alloc_rxb_skb--;
1348 dev_kfree_skb_any(cmd.meta.u.skb);
1349
1350 return rc;
1351}
1352
1353static int iwl_update_sta_key_info(struct iwl_priv *priv,
1354 struct ieee80211_key_conf *keyconf,
1355 u8 sta_id)
1356{
1357 unsigned long flags;
1358 __le16 key_flags = 0;
1359
1360 switch (keyconf->alg) {
1361 case ALG_CCMP:
1362 key_flags |= STA_KEY_FLG_CCMP;
1363 key_flags |= cpu_to_le16(
1364 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1365 key_flags &= ~STA_KEY_FLG_INVALID;
1366 break;
1367 case ALG_TKIP:
1368 case ALG_WEP:
1369 return -EINVAL;
1370 default:
1371 return -EINVAL;
1372 }
1373 spin_lock_irqsave(&priv->sta_lock, flags);
1374 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1375 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1376 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1377 keyconf->keylen);
1378
1379 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1380 keyconf->keylen);
1381 priv->stations[sta_id].sta.key.key_flags = key_flags;
1382 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1383 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1384
1385 spin_unlock_irqrestore(&priv->sta_lock, flags);
1386
1387 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
1388 iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1389 return 0;
1390}
1391
1392static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
1393{
1394 unsigned long flags;
1395
1396 spin_lock_irqsave(&priv->sta_lock, flags);
1397 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
1398 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo));
1399 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1400 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1401 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1402 spin_unlock_irqrestore(&priv->sta_lock, flags);
1403
1404 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
1405 iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1406 return 0;
1407}
1408
1409static void iwl_clear_free_frames(struct iwl_priv *priv)
1410{
1411 struct list_head *element;
1412
1413 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1414 priv->frames_count);
1415
1416 while (!list_empty(&priv->free_frames)) {
1417 element = priv->free_frames.next;
1418 list_del(element);
1419 kfree(list_entry(element, struct iwl_frame, list));
1420 priv->frames_count--;
1421 }
1422
1423 if (priv->frames_count) {
1424 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1425 priv->frames_count);
1426 priv->frames_count = 0;
1427 }
1428}
1429
1430static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
1431{
1432 struct iwl_frame *frame;
1433 struct list_head *element;
1434 if (list_empty(&priv->free_frames)) {
1435 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1436 if (!frame) {
1437 IWL_ERROR("Could not allocate frame!\n");
1438 return NULL;
1439 }
1440
1441 priv->frames_count++;
1442 return frame;
1443 }
1444
1445 element = priv->free_frames.next;
1446 list_del(element);
1447 return list_entry(element, struct iwl_frame, list);
1448}
1449
1450static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
1451{
1452 memset(frame, 0, sizeof(*frame));
1453 list_add(&frame->list, &priv->free_frames);
1454}
1455
1456unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
1457 struct ieee80211_hdr *hdr,
1458 const u8 *dest, int left)
1459{
1460
1461 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
1462 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1463 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1464 return 0;
1465
1466 if (priv->ibss_beacon->len > left)
1467 return 0;
1468
1469 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1470
1471 return priv->ibss_beacon->len;
1472}
1473
1474static int iwl_rate_index_from_plcp(int plcp)
1475{
1476 int i = 0;
1477
1478 for (i = 0; i < IWL_RATE_COUNT; i++)
1479 if (iwl_rates[i].plcp == plcp)
1480 return i;
1481 return -1;
1482}
1483
1484static u8 iwl_rate_get_lowest_plcp(int rate_mask)
1485{
1486 u8 i;
1487
1488 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1489 i = iwl_rates[i].next_ieee) {
1490 if (rate_mask & (1 << i))
1491 return iwl_rates[i].plcp;
1492 }
1493
1494 return IWL_RATE_INVALID;
1495}
1496
1497static int iwl_send_beacon_cmd(struct iwl_priv *priv)
1498{
1499 struct iwl_frame *frame;
1500 unsigned int frame_size;
1501 int rc;
1502 u8 rate;
1503
1504 frame = iwl_get_free_frame(priv);
1505
1506 if (!frame) {
1507 IWL_ERROR("Could not obtain free frame buffer for beacon "
1508 "command.\n");
1509 return -ENOMEM;
1510 }
1511
1512 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
1513 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic &
1514 0xFF0);
1515 if (rate == IWL_INVALID_RATE)
1516 rate = IWL_RATE_6M_PLCP;
1517 } else {
1518 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
1519 if (rate == IWL_INVALID_RATE)
1520 rate = IWL_RATE_1M_PLCP;
1521 }
1522
1523 frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate);
1524
1525 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1526 &frame->u.cmd[0]);
1527
1528 iwl_free_frame(priv, frame);
1529
1530 return rc;
1531}
1532
1533/******************************************************************************
1534 *
1535 * EEPROM related functions
1536 *
1537 ******************************************************************************/
1538
1539static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac)
1540{
1541 memcpy(mac, priv->eeprom.mac_address, 6);
1542}
1543
1544/**
1545 * iwl_eeprom_init - read EEPROM contents
1546 *
1547 * Load the EEPROM from adapter into priv->eeprom
1548 *
1549 * NOTE: This routine uses the non-debug IO access functions.
1550 */
1551int iwl_eeprom_init(struct iwl_priv *priv)
1552{
1553 u16 *e = (u16 *)&priv->eeprom;
1554 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
1555 u32 r;
1556 int sz = sizeof(priv->eeprom);
1557 int rc;
1558 int i;
1559 u16 addr;
1560
1561 /* The EEPROM structure has several padding buffers within it
1562 * and when adding new EEPROM maps is subject to programmer errors
1563 * which may be very difficult to identify without explicitly
1564 * checking the resulting size of the eeprom map. */
1565 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1566
1567 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1568 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1569 return -ENOENT;
1570 }
1571
1572 rc = iwl_eeprom_aqcuire_semaphore(priv);
1573 if (rc < 0) {
1574 IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n");
1575 return -ENOENT;
1576 }
1577
1578 /* eeprom is an array of 16bit values */
1579 for (addr = 0; addr < sz; addr += sizeof(u16)) {
1580 _iwl_write32(priv, CSR_EEPROM_REG, addr << 1);
1581 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1582
1583 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1584 i += IWL_EEPROM_ACCESS_DELAY) {
1585 r = _iwl_read_restricted(priv, CSR_EEPROM_REG);
1586 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1587 break;
1588 udelay(IWL_EEPROM_ACCESS_DELAY);
1589 }
1590
1591 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1592 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1593 return -ETIMEDOUT;
1594 }
1595 e[addr / 2] = le16_to_cpu(r >> 16);
1596 }
1597
1598 return 0;
1599}
1600
1601/******************************************************************************
1602 *
1603 * Misc. internal state and helper functions
1604 *
1605 ******************************************************************************/
1606#ifdef CONFIG_IWLWIFI_DEBUG
1607
1608/**
1609 * iwl_report_frame - dump frame to syslog during debug sessions
1610 *
1611 * hack this function to show different aspects of received frames,
1612 * including selective frame dumps.
1613 * group100 parameter selects whether to show 1 out of 100 good frames.
1614 *
1615 * TODO: ieee80211_hdr stuff is common to 3945 and 4965, so frame type
1616 * info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats)
1617 * is 3945-specific and gives bad output for 4965. Need to split the
1618 * functionality, keep common stuff here.
1619 */
1620void iwl_report_frame(struct iwl_priv *priv,
1621 struct iwl_rx_packet *pkt,
1622 struct ieee80211_hdr *header, int group100)
1623{
1624 u32 to_us;
1625 u32 print_summary = 0;
1626 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1627 u32 hundred = 0;
1628 u32 dataframe = 0;
1629 u16 fc;
1630 u16 seq_ctl;
1631 u16 channel;
1632 u16 phy_flags;
1633 int rate_sym;
1634 u16 length;
1635 u16 status;
1636 u16 bcn_tmr;
1637 u32 tsf_low;
1638 u64 tsf;
1639 u8 rssi;
1640 u8 agc;
1641 u16 sig_avg;
1642 u16 noise_diff;
1643 struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1644 struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1645 struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt);
1646 u8 *data = IWL_RX_DATA(pkt);
1647
1648 /* MAC header */
1649 fc = le16_to_cpu(header->frame_control);
1650 seq_ctl = le16_to_cpu(header->seq_ctrl);
1651
1652 /* metadata */
1653 channel = le16_to_cpu(rx_hdr->channel);
1654 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1655 rate_sym = rx_hdr->rate;
1656 length = le16_to_cpu(rx_hdr->len);
1657
1658 /* end-of-frame status and timestamp */
1659 status = le32_to_cpu(rx_end->status);
1660 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1661 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1662 tsf = le64_to_cpu(rx_end->timestamp);
1663
1664 /* signal statistics */
1665 rssi = rx_stats->rssi;
1666 agc = rx_stats->agc;
1667 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1668 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1669
1670 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1671
1672 /* if data frame is to us and all is good,
1673 * (optionally) print summary for only 1 out of every 100 */
1674 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1675 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1676 dataframe = 1;
1677 if (!group100)
1678 print_summary = 1; /* print each frame */
1679 else if (priv->framecnt_to_us < 100) {
1680 priv->framecnt_to_us++;
1681 print_summary = 0;
1682 } else {
1683 priv->framecnt_to_us = 0;
1684 print_summary = 1;
1685 hundred = 1;
1686 }
1687 } else {
1688 /* print summary for all other frames */
1689 print_summary = 1;
1690 }
1691
1692 if (print_summary) {
1693 char *title;
1694 u32 rate;
1695
1696 if (hundred)
1697 title = "100Frames";
1698 else if (fc & IEEE80211_FCTL_RETRY)
1699 title = "Retry";
1700 else if (ieee80211_is_assoc_response(fc))
1701 title = "AscRsp";
1702 else if (ieee80211_is_reassoc_response(fc))
1703 title = "RasRsp";
1704 else if (ieee80211_is_probe_response(fc)) {
1705 title = "PrbRsp";
1706 print_dump = 1; /* dump frame contents */
1707 } else if (ieee80211_is_beacon(fc)) {
1708 title = "Beacon";
1709 print_dump = 1; /* dump frame contents */
1710 } else if (ieee80211_is_atim(fc))
1711 title = "ATIM";
1712 else if (ieee80211_is_auth(fc))
1713 title = "Auth";
1714 else if (ieee80211_is_deauth(fc))
1715 title = "DeAuth";
1716 else if (ieee80211_is_disassoc(fc))
1717 title = "DisAssoc";
1718 else
1719 title = "Frame";
1720
1721 rate = iwl_rate_index_from_plcp(rate_sym);
1722 if (rate == -1)
1723 rate = 0;
1724 else
1725 rate = iwl_rates[rate].ieee / 2;
1726
1727 /* print frame summary.
1728 * MAC addresses show just the last byte (for brevity),
1729 * but you can hack it to show more, if you'd like to. */
1730 if (dataframe)
1731 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1732 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1733 title, fc, header->addr1[5],
1734 length, rssi, channel, rate);
1735 else {
1736 /* src/dst addresses assume managed mode */
1737 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1738 "src=0x%02x, rssi=%u, tim=%lu usec, "
1739 "phy=0x%02x, chnl=%d\n",
1740 title, fc, header->addr1[5],
1741 header->addr3[5], rssi,
1742 tsf_low - priv->scan_start_tsf,
1743 phy_flags, channel);
1744 }
1745 }
1746 if (print_dump)
1747 iwl_print_hex_dump(IWL_DL_RX, data, length);
1748}
1749#endif
1750
1751static void iwl_unset_hw_setting(struct iwl_priv *priv)
1752{
1753 if (priv->hw_setting.shared_virt)
1754 pci_free_consistent(priv->pci_dev,
1755 sizeof(struct iwl_shared),
1756 priv->hw_setting.shared_virt,
1757 priv->hw_setting.shared_phys);
1758}
1759
1760/**
1761 * iwl_supported_rate_to_ie - fill in the supported rate in IE field
1762 *
1763 * return : set the bit for each supported rate insert in ie
1764 */
1765static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1766 u16 basic_rate, int max_count)
1767{
1768 u16 ret_rates = 0, bit;
1769 int i;
1770 u8 *rates;
1771
1772 rates = &(ie[1]);
1773
1774 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1775 if (bit & supported_rate) {
1776 ret_rates |= bit;
1777 rates[*ie] = iwl_rates[i].ieee |
1778 ((bit & basic_rate) ? 0x80 : 0x00);
1779 *ie = *ie + 1;
1780 if (*ie >= max_count)
1781 break;
1782 }
1783 }
1784
1785 return ret_rates;
1786}
1787
1788/**
1789 * iwl_fill_probe_req - fill in all required fields and IE for probe request
1790 */
1791static u16 iwl_fill_probe_req(struct iwl_priv *priv,
1792 struct ieee80211_mgmt *frame,
1793 int left, int is_direct)
1794{
1795 int len = 0;
1796 u8 *pos = NULL;
1797 u16 ret_rates;
1798
1799 /* Make sure there is enough space for the probe request,
1800 * two mandatory IEs and the data */
1801 left -= 24;
1802 if (left < 0)
1803 return 0;
1804 len += 24;
1805
1806 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1807 memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN);
1808 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
1809 memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN);
1810 frame->seq_ctrl = 0;
1811
1812 /* fill in our indirect SSID IE */
1813 /* ...next IE... */
1814
1815 left -= 2;
1816 if (left < 0)
1817 return 0;
1818 len += 2;
1819 pos = &(frame->u.probe_req.variable[0]);
1820 *pos++ = WLAN_EID_SSID;
1821 *pos++ = 0;
1822
1823 /* fill in our direct SSID IE... */
1824 if (is_direct) {
1825 /* ...next IE... */
1826 left -= 2 + priv->essid_len;
1827 if (left < 0)
1828 return 0;
1829 /* ... fill it in... */
1830 *pos++ = WLAN_EID_SSID;
1831 *pos++ = priv->essid_len;
1832 memcpy(pos, priv->essid, priv->essid_len);
1833 pos += priv->essid_len;
1834 len += 2 + priv->essid_len;
1835 }
1836
1837 /* fill in supported rate */
1838 /* ...next IE... */
1839 left -= 2;
1840 if (left < 0)
1841 return 0;
1842 /* ... fill it in... */
1843 *pos++ = WLAN_EID_SUPP_RATES;
1844 *pos = 0;
1845 ret_rates = priv->active_rate = priv->rates_mask;
1846 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1847
1848 iwl_supported_rate_to_ie(pos, priv->active_rate,
1849 priv->active_rate_basic, left);
1850 len += 2 + *pos;
1851 pos += (*pos) + 1;
1852 ret_rates = ~ret_rates & priv->active_rate;
1853
1854 if (ret_rates == 0)
1855 goto fill_end;
1856
1857 /* fill in supported extended rate */
1858 /* ...next IE... */
1859 left -= 2;
1860 if (left < 0)
1861 return 0;
1862 /* ... fill it in... */
1863 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1864 *pos = 0;
1865 iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left);
1866 if (*pos > 0)
1867 len += 2 + *pos;
1868
1869 fill_end:
1870 return (u16)len;
1871}
1872
1873/*
1874 * QoS support
1875*/
1876#ifdef CONFIG_IWLWIFI_QOS
1877static int iwl_send_qos_params_command(struct iwl_priv *priv,
1878 struct iwl_qosparam_cmd *qos)
1879{
1880
1881 return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
1882 sizeof(struct iwl_qosparam_cmd), qos);
1883}
1884
1885static void iwl_reset_qos(struct iwl_priv *priv)
1886{
1887 u16 cw_min = 15;
1888 u16 cw_max = 1023;
1889 u8 aifs = 2;
1890 u8 is_legacy = 0;
1891 unsigned long flags;
1892 int i;
1893
1894 spin_lock_irqsave(&priv->lock, flags);
1895 priv->qos_data.qos_active = 0;
1896
1897 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
1898 if (priv->qos_data.qos_enable)
1899 priv->qos_data.qos_active = 1;
1900 if (!(priv->active_rate & 0xfff0)) {
1901 cw_min = 31;
1902 is_legacy = 1;
1903 }
1904 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
1905 if (priv->qos_data.qos_enable)
1906 priv->qos_data.qos_active = 1;
1907 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
1908 cw_min = 31;
1909 is_legacy = 1;
1910 }
1911
1912 if (priv->qos_data.qos_active)
1913 aifs = 3;
1914
1915 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
1916 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
1917 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
1918 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
1919 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
1920
1921 if (priv->qos_data.qos_active) {
1922 i = 1;
1923 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
1924 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
1925 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
1926 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1927 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1928
1929 i = 2;
1930 priv->qos_data.def_qos_parm.ac[i].cw_min =
1931 cpu_to_le16((cw_min + 1) / 2 - 1);
1932 priv->qos_data.def_qos_parm.ac[i].cw_max =
1933 cpu_to_le16(cw_max);
1934 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1935 if (is_legacy)
1936 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1937 cpu_to_le16(6016);
1938 else
1939 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1940 cpu_to_le16(3008);
1941 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1942
1943 i = 3;
1944 priv->qos_data.def_qos_parm.ac[i].cw_min =
1945 cpu_to_le16((cw_min + 1) / 4 - 1);
1946 priv->qos_data.def_qos_parm.ac[i].cw_max =
1947 cpu_to_le16((cw_max + 1) / 2 - 1);
1948 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1949 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1950 if (is_legacy)
1951 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1952 cpu_to_le16(3264);
1953 else
1954 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1955 cpu_to_le16(1504);
1956 } else {
1957 for (i = 1; i < 4; i++) {
1958 priv->qos_data.def_qos_parm.ac[i].cw_min =
1959 cpu_to_le16(cw_min);
1960 priv->qos_data.def_qos_parm.ac[i].cw_max =
1961 cpu_to_le16(cw_max);
1962 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
1963 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1964 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1965 }
1966 }
1967 IWL_DEBUG_QOS("set QoS to default \n");
1968
1969 spin_unlock_irqrestore(&priv->lock, flags);
1970}
1971
1972static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
1973{
1974 unsigned long flags;
1975
1976 if (priv == NULL)
1977 return;
1978
1979 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1980 return;
1981
1982 if (!priv->qos_data.qos_enable)
1983 return;
1984
1985 spin_lock_irqsave(&priv->lock, flags);
1986 priv->qos_data.def_qos_parm.qos_flags = 0;
1987
1988 if (priv->qos_data.qos_cap.q_AP.queue_request &&
1989 !priv->qos_data.qos_cap.q_AP.txop_request)
1990 priv->qos_data.def_qos_parm.qos_flags |=
1991 QOS_PARAM_FLG_TXOP_TYPE_MSK;
1992
1993 if (priv->qos_data.qos_active)
1994 priv->qos_data.def_qos_parm.qos_flags |=
1995 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
1996
1997 spin_unlock_irqrestore(&priv->lock, flags);
1998
1999 if (force || iwl_is_associated(priv)) {
2000 IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n",
2001 priv->qos_data.qos_active);
2002
2003 iwl_send_qos_params_command(priv,
2004 &(priv->qos_data.def_qos_parm));
2005 }
2006}
2007
2008#endif /* CONFIG_IWLWIFI_QOS */
2009/*
2010 * Power management (not Tx power!) functions
2011 */
2012#define MSEC_TO_USEC 1024
2013
2014#define NOSLP __constant_cpu_to_le32(0)
2015#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK
2016#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2017#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2018 __constant_cpu_to_le32(X1), \
2019 __constant_cpu_to_le32(X2), \
2020 __constant_cpu_to_le32(X3), \
2021 __constant_cpu_to_le32(X4)}
2022
2023
2024/* default power management (not Tx power) table values */
2025/* for tim 0-10 */
2026static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
2027 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2028 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2029 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2030 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2031 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2032 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2033};
2034
2035/* for tim > 10 */
2036static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
2037 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2038 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2039 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2040 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2041 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2042 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2043 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2044 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2045 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2046 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2047};
2048
2049int iwl_power_init_handle(struct iwl_priv *priv)
2050{
2051 int rc = 0, i;
2052 struct iwl_power_mgr *pow_data;
2053 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC;
2054 u16 pci_pm;
2055
2056 IWL_DEBUG_POWER("Initialize power \n");
2057
2058 pow_data = &(priv->power_data);
2059
2060 memset(pow_data, 0, sizeof(*pow_data));
2061
2062 pow_data->active_index = IWL_POWER_RANGE_0;
2063 pow_data->dtim_val = 0xffff;
2064
2065 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2066 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2067
2068 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2069 if (rc != 0)
2070 return 0;
2071 else {
2072 struct iwl_powertable_cmd *cmd;
2073
2074 IWL_DEBUG_POWER("adjust power command flags\n");
2075
2076 for (i = 0; i < IWL_POWER_AC; i++) {
2077 cmd = &pow_data->pwr_range_0[i].cmd;
2078
2079 if (pci_pm & 0x1)
2080 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2081 else
2082 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2083 }
2084 }
2085 return rc;
2086}
2087
2088static int iwl_update_power_cmd(struct iwl_priv *priv,
2089 struct iwl_powertable_cmd *cmd, u32 mode)
2090{
2091 int rc = 0, i;
2092 u8 skip;
2093 u32 max_sleep = 0;
2094 struct iwl_power_vec_entry *range;
2095 u8 period = 0;
2096 struct iwl_power_mgr *pow_data;
2097
2098 if (mode > IWL_POWER_INDEX_5) {
2099 IWL_DEBUG_POWER("Error invalid power mode \n");
2100 return -1;
2101 }
2102 pow_data = &(priv->power_data);
2103
2104 if (pow_data->active_index == IWL_POWER_RANGE_0)
2105 range = &pow_data->pwr_range_0[0];
2106 else
2107 range = &pow_data->pwr_range_1[1];
2108
2109 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd));
2110
2111#ifdef IWL_MAC80211_DISABLE
2112 if (priv->assoc_network != NULL) {
2113 unsigned long flags;
2114
2115 period = priv->assoc_network->tim.tim_period;
2116 }
2117#endif /*IWL_MAC80211_DISABLE */
2118 skip = range[mode].no_dtim;
2119
2120 if (period == 0) {
2121 period = 1;
2122 skip = 0;
2123 }
2124
2125 if (skip == 0) {
2126 max_sleep = period;
2127 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2128 } else {
2129 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2130 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2131 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2132 }
2133
2134 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2135 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2136 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2137 }
2138
2139 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2140 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2141 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2142 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2143 le32_to_cpu(cmd->sleep_interval[0]),
2144 le32_to_cpu(cmd->sleep_interval[1]),
2145 le32_to_cpu(cmd->sleep_interval[2]),
2146 le32_to_cpu(cmd->sleep_interval[3]),
2147 le32_to_cpu(cmd->sleep_interval[4]));
2148
2149 return rc;
2150}
2151
2152static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode)
2153{
2154 u32 final_mode = mode;
2155 int rc;
2156 struct iwl_powertable_cmd cmd;
2157
2158 /* If on battery, set to 3,
2159 * if plugged into AC power, set to CAM ("continuosly aware mode"),
2160 * else user level */
2161 switch (mode) {
2162 case IWL_POWER_BATTERY:
2163 final_mode = IWL_POWER_INDEX_3;
2164 break;
2165 case IWL_POWER_AC:
2166 final_mode = IWL_POWER_MODE_CAM;
2167 break;
2168 default:
2169 final_mode = mode;
2170 break;
2171 }
2172
2173 iwl_update_power_cmd(priv, &cmd, final_mode);
2174
2175 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
2176
2177 if (final_mode == IWL_POWER_MODE_CAM)
2178 clear_bit(STATUS_POWER_PMI, &priv->status);
2179 else
2180 set_bit(STATUS_POWER_PMI, &priv->status);
2181
2182 return rc;
2183}
2184
2185int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
2186{
2187 /* Filter incoming packets to determine if they are targeted toward
2188 * this network, discarding packets coming from ourselves */
2189 switch (priv->iw_mode) {
2190 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
2191 /* packets from our adapter are dropped (echo) */
2192 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2193 return 0;
2194 /* {broad,multi}cast packets to our IBSS go through */
2195 if (is_multicast_ether_addr(header->addr1))
2196 return !compare_ether_addr(header->addr3, priv->bssid);
2197 /* packets to our adapter go through */
2198 return !compare_ether_addr(header->addr1, priv->mac_addr);
2199 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2200 /* packets from our adapter are dropped (echo) */
2201 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2202 return 0;
2203 /* {broad,multi}cast packets to our BSS go through */
2204 if (is_multicast_ether_addr(header->addr1))
2205 return !compare_ether_addr(header->addr2, priv->bssid);
2206 /* packets to our adapter go through */
2207 return !compare_ether_addr(header->addr1, priv->mac_addr);
2208 }
2209
2210 return 1;
2211}
2212
2213#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2214
2215const char *iwl_get_tx_fail_reason(u32 status)
2216{
2217 switch (status & TX_STATUS_MSK) {
2218 case TX_STATUS_SUCCESS:
2219 return "SUCCESS";
2220 TX_STATUS_ENTRY(SHORT_LIMIT);
2221 TX_STATUS_ENTRY(LONG_LIMIT);
2222 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2223 TX_STATUS_ENTRY(MGMNT_ABORT);
2224 TX_STATUS_ENTRY(NEXT_FRAG);
2225 TX_STATUS_ENTRY(LIFE_EXPIRE);
2226 TX_STATUS_ENTRY(DEST_PS);
2227 TX_STATUS_ENTRY(ABORTED);
2228 TX_STATUS_ENTRY(BT_RETRY);
2229 TX_STATUS_ENTRY(STA_INVALID);
2230 TX_STATUS_ENTRY(FRAG_DROPPED);
2231 TX_STATUS_ENTRY(TID_DISABLE);
2232 TX_STATUS_ENTRY(FRAME_FLUSHED);
2233 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2234 TX_STATUS_ENTRY(TX_LOCKED);
2235 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2236 }
2237
2238 return "UNKNOWN";
2239}
2240
2241/**
2242 * iwl_scan_cancel - Cancel any currently executing HW scan
2243 *
2244 * NOTE: priv->mutex is not required before calling this function
2245 */
2246static int iwl_scan_cancel(struct iwl_priv *priv)
2247{
2248 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2249 clear_bit(STATUS_SCANNING, &priv->status);
2250 return 0;
2251 }
2252
2253 if (test_bit(STATUS_SCANNING, &priv->status)) {
2254 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2255 IWL_DEBUG_SCAN("Queuing scan abort.\n");
2256 set_bit(STATUS_SCAN_ABORTING, &priv->status);
2257 queue_work(priv->workqueue, &priv->abort_scan);
2258
2259 } else
2260 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2261
2262 return test_bit(STATUS_SCANNING, &priv->status);
2263 }
2264
2265 return 0;
2266}
2267
2268/**
2269 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
2270 * @ms: amount of time to wait (in milliseconds) for scan to abort
2271 *
2272 * NOTE: priv->mutex must be held before calling this function
2273 */
2274static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
2275{
2276 unsigned long now = jiffies;
2277 int ret;
2278
2279 ret = iwl_scan_cancel(priv);
2280 if (ret && ms) {
2281 mutex_unlock(&priv->mutex);
2282 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2283 test_bit(STATUS_SCANNING, &priv->status))
2284 msleep(1);
2285 mutex_lock(&priv->mutex);
2286
2287 return test_bit(STATUS_SCANNING, &priv->status);
2288 }
2289
2290 return ret;
2291}
2292
2293static void iwl_sequence_reset(struct iwl_priv *priv)
2294{
2295 /* Reset ieee stats */
2296
2297 /* We don't reset the net_device_stats (ieee->stats) on
2298 * re-association */
2299
2300 priv->last_seq_num = -1;
2301 priv->last_frag_num = -1;
2302 priv->last_packet_time = 0;
2303
2304 iwl_scan_cancel(priv);
2305}
2306
2307#define MAX_UCODE_BEACON_INTERVAL 1024
2308#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2309
2310static __le16 iwl_adjust_beacon_interval(u16 beacon_val)
2311{
2312 u16 new_val = 0;
2313 u16 beacon_factor = 0;
2314
2315 beacon_factor =
2316 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2317 / MAX_UCODE_BEACON_INTERVAL;
2318 new_val = beacon_val / beacon_factor;
2319
2320 return cpu_to_le16(new_val);
2321}
2322
2323static void iwl_setup_rxon_timing(struct iwl_priv *priv)
2324{
2325 u64 interval_tm_unit;
2326 u64 tsf, result;
2327 unsigned long flags;
2328 struct ieee80211_conf *conf = NULL;
2329 u16 beacon_int = 0;
2330
2331 conf = ieee80211_get_hw_conf(priv->hw);
2332
2333 spin_lock_irqsave(&priv->lock, flags);
2334 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2335 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2336
2337 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2338
2339 tsf = priv->timestamp1;
2340 tsf = ((tsf << 32) | priv->timestamp0);
2341
2342 beacon_int = priv->beacon_int;
2343 spin_unlock_irqrestore(&priv->lock, flags);
2344
2345 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2346 if (beacon_int == 0) {
2347 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2348 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2349 } else {
2350 priv->rxon_timing.beacon_interval =
2351 cpu_to_le16(beacon_int);
2352 priv->rxon_timing.beacon_interval =
2353 iwl_adjust_beacon_interval(
2354 le16_to_cpu(priv->rxon_timing.beacon_interval));
2355 }
2356
2357 priv->rxon_timing.atim_window = 0;
2358 } else {
2359 priv->rxon_timing.beacon_interval =
2360 iwl_adjust_beacon_interval(conf->beacon_int);
2361 /* TODO: we need to get atim_window from upper stack
2362 * for now we set to 0 */
2363 priv->rxon_timing.atim_window = 0;
2364 }
2365
2366 interval_tm_unit =
2367 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2368 result = do_div(tsf, interval_tm_unit);
2369 priv->rxon_timing.beacon_init_val =
2370 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2371
2372 IWL_DEBUG_ASSOC
2373 ("beacon interval %d beacon timer %d beacon tim %d\n",
2374 le16_to_cpu(priv->rxon_timing.beacon_interval),
2375 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2376 le16_to_cpu(priv->rxon_timing.atim_window));
2377}
2378
2379static int iwl_scan_initiate(struct iwl_priv *priv)
2380{
2381 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2382 IWL_ERROR("APs don't scan.\n");
2383 return 0;
2384 }
2385
2386 if (!iwl_is_ready_rf(priv)) {
2387 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2388 return -EIO;
2389 }
2390
2391 if (test_bit(STATUS_SCANNING, &priv->status)) {
2392 IWL_DEBUG_SCAN("Scan already in progress.\n");
2393 return -EAGAIN;
2394 }
2395
2396 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2397 IWL_DEBUG_SCAN("Scan request while abort pending. "
2398 "Queuing.\n");
2399 return -EAGAIN;
2400 }
2401
2402 IWL_DEBUG_INFO("Starting scan...\n");
2403 priv->scan_bands = 2;
2404 set_bit(STATUS_SCANNING, &priv->status);
2405 priv->scan_start = jiffies;
2406 priv->scan_pass_start = priv->scan_start;
2407
2408 queue_work(priv->workqueue, &priv->request_scan);
2409
2410 return 0;
2411}
2412
2413static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
2414{
2415 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
2416
2417 if (hw_decrypt)
2418 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2419 else
2420 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2421
2422 return 0;
2423}
2424
2425static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode)
2426{
2427 if (phymode == MODE_IEEE80211A) {
2428 priv->staging_rxon.flags &=
2429 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2430 | RXON_FLG_CCK_MSK);
2431 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2432 } else {
2433 /* Copied from iwl_bg_post_associate() */
2434 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2435 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2436 else
2437 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2438
2439 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2440 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2441
2442 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2443 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2444 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2445 }
2446}
2447
2448/*
2449 * initilize rxon structure with default values fromm eeprom
2450 */
2451static void iwl_connection_init_rx_config(struct iwl_priv *priv)
2452{
2453 const struct iwl_channel_info *ch_info;
2454
2455 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2456
2457 switch (priv->iw_mode) {
2458 case IEEE80211_IF_TYPE_AP:
2459 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2460 break;
2461
2462 case IEEE80211_IF_TYPE_STA:
2463 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2464 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2465 break;
2466
2467 case IEEE80211_IF_TYPE_IBSS:
2468 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2469 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2470 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2471 RXON_FILTER_ACCEPT_GRP_MSK;
2472 break;
2473
2474 case IEEE80211_IF_TYPE_MNTR:
2475 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2476 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2477 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2478 break;
2479 }
2480
2481#if 0
2482 /* TODO: Figure out when short_preamble would be set and cache from
2483 * that */
2484 if (!hw_to_local(priv->hw)->short_preamble)
2485 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2486 else
2487 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2488#endif
2489
2490 ch_info = iwl_get_channel_info(priv, priv->phymode,
2491 le16_to_cpu(priv->staging_rxon.channel));
2492
2493 if (!ch_info)
2494 ch_info = &priv->channel_info[0];
2495
2496 /*
2497 * in some case A channels are all non IBSS
2498 * in this case force B/G channel
2499 */
2500 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2501 !(is_channel_ibss(ch_info)))
2502 ch_info = &priv->channel_info[0];
2503
2504 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2505 if (is_channel_a_band(ch_info))
2506 priv->phymode = MODE_IEEE80211A;
2507 else
2508 priv->phymode = MODE_IEEE80211G;
2509
2510 iwl_set_flags_for_phymode(priv, priv->phymode);
2511
2512 priv->staging_rxon.ofdm_basic_rates =
2513 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2514 priv->staging_rxon.cck_basic_rates =
2515 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2516}
2517
2518static int iwl_set_mode(struct iwl_priv *priv, int mode)
2519{
2520 if (!iwl_is_ready_rf(priv))
2521 return -EAGAIN;
2522
2523 if (mode == IEEE80211_IF_TYPE_IBSS) {
2524 const struct iwl_channel_info *ch_info;
2525
2526 ch_info = iwl_get_channel_info(priv,
2527 priv->phymode,
2528 le16_to_cpu(priv->staging_rxon.channel));
2529
2530 if (!ch_info || !is_channel_ibss(ch_info)) {
2531 IWL_ERROR("channel %d not IBSS channel\n",
2532 le16_to_cpu(priv->staging_rxon.channel));
2533 return -EINVAL;
2534 }
2535 }
2536
2537 cancel_delayed_work(&priv->scan_check);
2538 if (iwl_scan_cancel_timeout(priv, 100)) {
2539 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2540 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2541 return -EAGAIN;
2542 }
2543
2544 priv->iw_mode = mode;
2545
2546 iwl_connection_init_rx_config(priv);
2547 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2548
2549 iwl_clear_stations_table(priv);
2550
2551 iwl_commit_rxon(priv);
2552
2553 return 0;
2554}
2555
2556static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2557 struct ieee80211_tx_control *ctl,
2558 struct iwl_cmd *cmd,
2559 struct sk_buff *skb_frag,
2560 int last_frag)
2561{
2562 struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
2563
2564 switch (keyinfo->alg) {
2565 case ALG_CCMP:
2566 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2567 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2568 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2569 break;
2570
2571 case ALG_TKIP:
2572#if 0
2573 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2574
2575 if (last_frag)
2576 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2577 8);
2578 else
2579 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2580#endif
2581 break;
2582
2583 case ALG_WEP:
2584 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2585 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2586
2587 if (keyinfo->keylen == 13)
2588 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2589
2590 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2591
2592 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2593 "with key %d\n", ctl->key_idx);
2594 break;
2595
2596 case ALG_NONE:
2597 IWL_DEBUG_TX("Tx packet in the clear (encrypt requested).\n");
2598 break;
2599
2600 default:
2601 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2602 break;
2603 }
2604}
2605
2606/*
2607 * handle build REPLY_TX command notification.
2608 */
2609static void iwl_build_tx_cmd_basic(struct iwl_priv *priv,
2610 struct iwl_cmd *cmd,
2611 struct ieee80211_tx_control *ctrl,
2612 struct ieee80211_hdr *hdr,
2613 int is_unicast, u8 std_id)
2614{
2615 __le16 *qc;
2616 u16 fc = le16_to_cpu(hdr->frame_control);
2617 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2618
2619 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2620 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2621 tx_flags |= TX_CMD_FLG_ACK_MSK;
2622 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2623 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2624 if (ieee80211_is_probe_response(fc) &&
2625 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2626 tx_flags |= TX_CMD_FLG_TSF_MSK;
2627 } else {
2628 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2629 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2630 }
2631
2632 cmd->cmd.tx.sta_id = std_id;
2633 if (ieee80211_get_morefrag(hdr))
2634 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2635
2636 qc = ieee80211_get_qos_ctrl(hdr);
2637 if (qc) {
2638 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2639 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2640 } else
2641 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2642
2643 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2644 tx_flags |= TX_CMD_FLG_RTS_MSK;
2645 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2646 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2647 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2648 tx_flags |= TX_CMD_FLG_CTS_MSK;
2649 }
2650
2651 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2652 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2653
2654 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2655 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2656 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2657 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
2658 cmd->cmd.tx.timeout.pm_frame_timeout =
2659 cpu_to_le16(3);
2660 else
2661 cmd->cmd.tx.timeout.pm_frame_timeout =
2662 cpu_to_le16(2);
2663 } else
2664 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2665
2666 cmd->cmd.tx.driver_txop = 0;
2667 cmd->cmd.tx.tx_flags = tx_flags;
2668 cmd->cmd.tx.next_frame_len = 0;
2669}
2670
2671static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2672{
2673 int sta_id;
2674 u16 fc = le16_to_cpu(hdr->frame_control);
2675
2676 /* If this frame is broadcast or not data then use the broadcast
2677 * station id */
2678 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2679 is_multicast_ether_addr(hdr->addr1))
2680 return priv->hw_setting.bcast_sta_id;
2681
2682 switch (priv->iw_mode) {
2683
2684 /* If this frame is part of a BSS network (we're a station), then
2685 * we use the AP's station id */
2686 case IEEE80211_IF_TYPE_STA:
2687 return IWL_AP_ID;
2688
2689 /* If we are an AP, then find the station, or use BCAST */
2690 case IEEE80211_IF_TYPE_AP:
2691 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2692 if (sta_id != IWL_INVALID_STATION)
2693 return sta_id;
2694 return priv->hw_setting.bcast_sta_id;
2695
2696 /* If this frame is part of a IBSS network, then we use the
2697 * target specific station id */
0795af57
JP
2698 case IEEE80211_IF_TYPE_IBSS: {
2699 DECLARE_MAC_BUF(mac);
2700
b481de9c
ZY
2701 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2702 if (sta_id != IWL_INVALID_STATION)
2703 return sta_id;
2704
2705 sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
2706
2707 if (sta_id != IWL_INVALID_STATION)
2708 return sta_id;
2709
0795af57 2710 IWL_DEBUG_DROP("Station %s not in station map. "
b481de9c 2711 "Defaulting to broadcast...\n",
0795af57 2712 print_mac(mac, hdr->addr1));
b481de9c
ZY
2713 iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2714 return priv->hw_setting.bcast_sta_id;
0795af57 2715 }
b481de9c
ZY
2716 default:
2717 IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode);
2718 return priv->hw_setting.bcast_sta_id;
2719 }
2720}
2721
2722/*
2723 * start REPLY_TX command process
2724 */
2725static int iwl_tx_skb(struct iwl_priv *priv,
2726 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2727{
2728 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2729 struct iwl_tfd_frame *tfd;
2730 u32 *control_flags;
2731 int txq_id = ctl->queue;
2732 struct iwl_tx_queue *txq = NULL;
2733 struct iwl_queue *q = NULL;
2734 dma_addr_t phys_addr;
2735 dma_addr_t txcmd_phys;
2736 struct iwl_cmd *out_cmd = NULL;
2737 u16 len, idx, len_org;
2738 u8 id, hdr_len, unicast;
2739 u8 sta_id;
2740 u16 seq_number = 0;
2741 u16 fc;
2742 __le16 *qc;
2743 u8 wait_write_ptr = 0;
2744 unsigned long flags;
2745 int rc;
2746
2747 spin_lock_irqsave(&priv->lock, flags);
2748 if (iwl_is_rfkill(priv)) {
2749 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2750 goto drop_unlock;
2751 }
2752
2753 if (!priv->interface_id) {
2754 IWL_DEBUG_DROP("Dropping - !priv->interface_id\n");
2755 goto drop_unlock;
2756 }
2757
2758 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2759 IWL_ERROR("ERROR: No TX rate available.\n");
2760 goto drop_unlock;
2761 }
2762
2763 unicast = !is_multicast_ether_addr(hdr->addr1);
2764 id = 0;
2765
2766 fc = le16_to_cpu(hdr->frame_control);
2767
2768#ifdef CONFIG_IWLWIFI_DEBUG
2769 if (ieee80211_is_auth(fc))
2770 IWL_DEBUG_TX("Sending AUTH frame\n");
2771 else if (ieee80211_is_assoc_request(fc))
2772 IWL_DEBUG_TX("Sending ASSOC frame\n");
2773 else if (ieee80211_is_reassoc_request(fc))
2774 IWL_DEBUG_TX("Sending REASSOC frame\n");
2775#endif
2776
2777 if (!iwl_is_associated(priv) &&
2778 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
2779 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
2780 goto drop_unlock;
2781 }
2782
2783 spin_unlock_irqrestore(&priv->lock, flags);
2784
2785 hdr_len = ieee80211_get_hdrlen(fc);
2786 sta_id = iwl_get_sta_id(priv, hdr);
2787 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
2788 DECLARE_MAC_BUF(mac);
2789
2790 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2791 print_mac(mac, hdr->addr1));
b481de9c
ZY
2792 goto drop;
2793 }
2794
2795 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2796
2797 qc = ieee80211_get_qos_ctrl(hdr);
2798 if (qc) {
2799 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2800 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2801 IEEE80211_SCTL_SEQ;
2802 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2803 (hdr->seq_ctrl &
2804 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2805 seq_number += 0x10;
2806 }
2807 txq = &priv->txq[txq_id];
2808 q = &txq->q;
2809
2810 spin_lock_irqsave(&priv->lock, flags);
2811
2812 tfd = &txq->bd[q->first_empty];
2813 memset(tfd, 0, sizeof(*tfd));
2814 control_flags = (u32 *) tfd;
2815 idx = get_cmd_index(q, q->first_empty, 0);
2816
2817 memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info));
2818 txq->txb[q->first_empty].skb[0] = skb;
2819 memcpy(&(txq->txb[q->first_empty].status.control),
2820 ctl, sizeof(struct ieee80211_tx_control));
2821 out_cmd = &txq->cmd[idx];
2822 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2823 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
2824 out_cmd->hdr.cmd = REPLY_TX;
2825 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2826 INDEX_TO_SEQ(q->first_empty)));
2827 /* copy frags header */
2828 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2829
2830 /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */
2831 len = priv->hw_setting.tx_cmd_len +
2832 sizeof(struct iwl_cmd_header) + hdr_len;
2833
2834 len_org = len;
2835 len = (len + 3) & ~3;
2836
2837 if (len_org != len)
2838 len_org = 1;
2839 else
2840 len_org = 0;
2841
2842 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
2843 offsetof(struct iwl_cmd, hdr);
2844
2845 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2846
2847 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
2848 iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
2849
2850 /* 802.11 null functions have no payload... */
2851 len = skb->len - hdr_len;
2852 if (len) {
2853 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2854 len, PCI_DMA_TODEVICE);
2855 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
2856 }
2857
2858 /* If there is no payload, then only one TFD is used */
2859 if (!len)
2860 *control_flags = TFD_CTL_COUNT_SET(1);
2861 else
2862 *control_flags = TFD_CTL_COUNT_SET(2) |
2863 TFD_CTL_PAD_SET(U32_PAD(len));
2864
2865 len = (u16)skb->len;
2866 out_cmd->cmd.tx.len = cpu_to_le16(len);
2867
2868 /* TODO need this for burst mode later on */
2869 iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
2870
2871 /* set is_hcca to 0; it probably will never be implemented */
2872 iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
2873
2874 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
2875 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
2876
2877 if (!ieee80211_get_morefrag(hdr)) {
2878 txq->need_update = 1;
2879 if (qc) {
2880 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2881 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2882 }
2883 } else {
2884 wait_write_ptr = 1;
2885 txq->need_update = 0;
2886 }
2887
2888 iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
2889 sizeof(out_cmd->cmd.tx));
2890
2891 iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2892 ieee80211_get_hdrlen(fc));
2893
2894 q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
2895 rc = iwl_tx_queue_update_write_ptr(priv, txq);
2896 spin_unlock_irqrestore(&priv->lock, flags);
2897
2898 if (rc)
2899 return rc;
2900
2901 if ((iwl_queue_space(q) < q->high_mark)
2902 && priv->mac80211_registered) {
2903 if (wait_write_ptr) {
2904 spin_lock_irqsave(&priv->lock, flags);
2905 txq->need_update = 1;
2906 iwl_tx_queue_update_write_ptr(priv, txq);
2907 spin_unlock_irqrestore(&priv->lock, flags);
2908 }
2909
2910 ieee80211_stop_queue(priv->hw, ctl->queue);
2911 }
2912
2913 return 0;
2914
2915drop_unlock:
2916 spin_unlock_irqrestore(&priv->lock, flags);
2917drop:
2918 return -1;
2919}
2920
2921static void iwl_set_rate(struct iwl_priv *priv)
2922{
2923 const struct ieee80211_hw_mode *hw = NULL;
2924 struct ieee80211_rate *rate;
2925 int i;
2926
2927 hw = iwl_get_hw_mode(priv, priv->phymode);
2928
2929 priv->active_rate = 0;
2930 priv->active_rate_basic = 0;
2931
2932 IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
2933 hw->mode == MODE_IEEE80211A ?
2934 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
2935
2936 for (i = 0; i < hw->num_rates; i++) {
2937 rate = &(hw->rates[i]);
2938 if ((rate->val < IWL_RATE_COUNT) &&
2939 (rate->flags & IEEE80211_RATE_SUPPORTED)) {
2940 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
2941 rate->val, iwl_rates[rate->val].plcp,
2942 (rate->flags & IEEE80211_RATE_BASIC) ?
2943 "*" : "");
2944 priv->active_rate |= (1 << rate->val);
2945 if (rate->flags & IEEE80211_RATE_BASIC)
2946 priv->active_rate_basic |= (1 << rate->val);
2947 } else
2948 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
2949 rate->val, iwl_rates[rate->val].plcp);
2950 }
2951
2952 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
2953 priv->active_rate, priv->active_rate_basic);
2954
2955 /*
2956 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
2957 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
2958 * OFDM
2959 */
2960 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
2961 priv->staging_rxon.cck_basic_rates =
2962 ((priv->active_rate_basic &
2963 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
2964 else
2965 priv->staging_rxon.cck_basic_rates =
2966 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2967
2968 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
2969 priv->staging_rxon.ofdm_basic_rates =
2970 ((priv->active_rate_basic &
2971 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
2972 IWL_FIRST_OFDM_RATE) & 0xFF;
2973 else
2974 priv->staging_rxon.ofdm_basic_rates =
2975 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2976}
2977
2978static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
2979{
2980 unsigned long flags;
2981
2982 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2983 return;
2984
2985 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
2986 disable_radio ? "OFF" : "ON");
2987
2988 if (disable_radio) {
2989 iwl_scan_cancel(priv);
2990 /* FIXME: This is a workaround for AP */
2991 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
2992 spin_lock_irqsave(&priv->lock, flags);
2993 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
2994 CSR_UCODE_SW_BIT_RFKILL);
2995 spin_unlock_irqrestore(&priv->lock, flags);
2996 iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
2997 set_bit(STATUS_RF_KILL_SW, &priv->status);
2998 }
2999 return;
3000 }
3001
3002 spin_lock_irqsave(&priv->lock, flags);
3003 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3004
3005 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3006 spin_unlock_irqrestore(&priv->lock, flags);
3007
3008 /* wake up ucode */
3009 msleep(10);
3010
3011 spin_lock_irqsave(&priv->lock, flags);
3012 iwl_read32(priv, CSR_UCODE_DRV_GP1);
3013 if (!iwl_grab_restricted_access(priv))
3014 iwl_release_restricted_access(priv);
3015 spin_unlock_irqrestore(&priv->lock, flags);
3016
3017 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3018 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3019 "disabled by HW switch\n");
3020 return;
3021 }
3022
3023 queue_work(priv->workqueue, &priv->restart);
3024 return;
3025}
3026
3027void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
3028 u32 decrypt_res, struct ieee80211_rx_status *stats)
3029{
3030 u16 fc =
3031 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3032
3033 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3034 return;
3035
3036 if (!(fc & IEEE80211_FCTL_PROTECTED))
3037 return;
3038
3039 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3040 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3041 case RX_RES_STATUS_SEC_TYPE_TKIP:
3042 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3043 RX_RES_STATUS_BAD_ICV_MIC)
3044 stats->flag |= RX_FLAG_MMIC_ERROR;
3045 case RX_RES_STATUS_SEC_TYPE_WEP:
3046 case RX_RES_STATUS_SEC_TYPE_CCMP:
3047 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3048 RX_RES_STATUS_DECRYPT_OK) {
3049 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3050 stats->flag |= RX_FLAG_DECRYPTED;
3051 }
3052 break;
3053
3054 default:
3055 break;
3056 }
3057}
3058
3059void iwl_handle_data_packet_monitor(struct iwl_priv *priv,
3060 struct iwl_rx_mem_buffer *rxb,
3061 void *data, short len,
3062 struct ieee80211_rx_status *stats,
3063 u16 phy_flags)
3064{
3065 struct iwl_rt_rx_hdr *iwl_rt;
3066
3067 /* First cache any information we need before we overwrite
3068 * the information provided in the skb from the hardware */
3069 s8 signal = stats->ssi;
3070 s8 noise = 0;
3071 int rate = stats->rate;
3072 u64 tsf = stats->mactime;
3073 __le16 phy_flags_hw = cpu_to_le16(phy_flags);
3074
3075 /* We received data from the HW, so stop the watchdog */
3076 if (len > IWL_RX_BUF_SIZE - sizeof(*iwl_rt)) {
3077 IWL_DEBUG_DROP("Dropping too large packet in monitor\n");
3078 return;
3079 }
3080
3081 /* copy the frame data to write after where the radiotap header goes */
3082 iwl_rt = (void *)rxb->skb->data;
3083 memmove(iwl_rt->payload, data, len);
3084
3085 iwl_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3086 iwl_rt->rt_hdr.it_pad = 0; /* always good to zero */
3087
3088 /* total header + data */
3089 iwl_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl_rt));
3090
3091 /* Set the size of the skb to the size of the frame */
3092 skb_put(rxb->skb, sizeof(*iwl_rt) + len);
3093
3094 /* Big bitfield of all the fields we provide in radiotap */
3095 iwl_rt->rt_hdr.it_present =
3096 cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3097 (1 << IEEE80211_RADIOTAP_FLAGS) |
3098 (1 << IEEE80211_RADIOTAP_RATE) |
3099 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3100 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3101 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3102 (1 << IEEE80211_RADIOTAP_ANTENNA));
3103
3104 /* Zero the flags, we'll add to them as we go */
3105 iwl_rt->rt_flags = 0;
3106
3107 iwl_rt->rt_tsf = cpu_to_le64(tsf);
3108
3109 /* Convert to dBm */
3110 iwl_rt->rt_dbmsignal = signal;
3111 iwl_rt->rt_dbmnoise = noise;
3112
3113 /* Convert the channel frequency and set the flags */
3114 iwl_rt->rt_channelMHz = cpu_to_le16(stats->freq);
3115 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3116 iwl_rt->rt_chbitmask =
3117 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
3118 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3119 iwl_rt->rt_chbitmask =
3120 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
3121 else /* 802.11g */
3122 iwl_rt->rt_chbitmask =
3123 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ));
3124
3125 rate = iwl_rate_index_from_plcp(rate);
3126 if (rate == -1)
3127 iwl_rt->rt_rate = 0;
3128 else
3129 iwl_rt->rt_rate = iwl_rates[rate].ieee;
3130
3131 /* antenna number */
3132 iwl_rt->rt_antenna =
3133 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
3134
3135 /* set the preamble flag if we have it */
3136 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3137 iwl_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3138
3139 IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
3140
3141 stats->flag |= RX_FLAG_RADIOTAP;
3142 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3143 rxb->skb = NULL;
3144}
3145
3146
3147#define IWL_PACKET_RETRY_TIME HZ
3148
3149int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
3150{
3151 u16 sc = le16_to_cpu(header->seq_ctrl);
3152 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
3153 u16 frag = sc & IEEE80211_SCTL_FRAG;
3154 u16 *last_seq, *last_frag;
3155 unsigned long *last_time;
3156
3157 switch (priv->iw_mode) {
3158 case IEEE80211_IF_TYPE_IBSS:{
3159 struct list_head *p;
3160 struct iwl_ibss_seq *entry = NULL;
3161 u8 *mac = header->addr2;
3162 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
3163
3164 __list_for_each(p, &priv->ibss_mac_hash[index]) {
3165 entry =
3166 list_entry(p, struct iwl_ibss_seq, list);
3167 if (!compare_ether_addr(entry->mac, mac))
3168 break;
3169 }
3170 if (p == &priv->ibss_mac_hash[index]) {
3171 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
3172 if (!entry) {
3173 IWL_ERROR
3174 ("Cannot malloc new mac entry\n");
3175 return 0;
3176 }
3177 memcpy(entry->mac, mac, ETH_ALEN);
3178 entry->seq_num = seq;
3179 entry->frag_num = frag;
3180 entry->packet_time = jiffies;
3181 list_add(&entry->list,
3182 &priv->ibss_mac_hash[index]);
3183 return 0;
3184 }
3185 last_seq = &entry->seq_num;
3186 last_frag = &entry->frag_num;
3187 last_time = &entry->packet_time;
3188 break;
3189 }
3190 case IEEE80211_IF_TYPE_STA:
3191 last_seq = &priv->last_seq_num;
3192 last_frag = &priv->last_frag_num;
3193 last_time = &priv->last_packet_time;
3194 break;
3195 default:
3196 return 0;
3197 }
3198 if ((*last_seq == seq) &&
3199 time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
3200 if (*last_frag == frag)
3201 goto drop;
3202 if (*last_frag + 1 != frag)
3203 /* out-of-order fragment */
3204 goto drop;
3205 } else
3206 *last_seq = seq;
3207
3208 *last_frag = frag;
3209 *last_time = jiffies;
3210 return 0;
3211
3212 drop:
3213 return 1;
3214}
3215
3216#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
3217
3218#include "iwl-spectrum.h"
3219
3220#define BEACON_TIME_MASK_LOW 0x00FFFFFF
3221#define BEACON_TIME_MASK_HIGH 0xFF000000
3222#define TIME_UNIT 1024
3223
3224/*
3225 * extended beacon time format
3226 * time in usec will be changed into a 32-bit value in 8:24 format
3227 * the high 1 byte is the beacon counts
3228 * the lower 3 bytes is the time in usec within one beacon interval
3229 */
3230
3231static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval)
3232{
3233 u32 quot;
3234 u32 rem;
3235 u32 interval = beacon_interval * 1024;
3236
3237 if (!interval || !usec)
3238 return 0;
3239
3240 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
3241 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
3242
3243 return (quot << 24) + rem;
3244}
3245
3246/* base is usually what we get from ucode with each received frame,
3247 * the same as HW timer counter counting down
3248 */
3249
3250static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
3251{
3252 u32 base_low = base & BEACON_TIME_MASK_LOW;
3253 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
3254 u32 interval = beacon_interval * TIME_UNIT;
3255 u32 res = (base & BEACON_TIME_MASK_HIGH) +
3256 (addon & BEACON_TIME_MASK_HIGH);
3257
3258 if (base_low > addon_low)
3259 res += base_low - addon_low;
3260 else if (base_low < addon_low) {
3261 res += interval + base_low - addon_low;
3262 res += (1 << 24);
3263 } else
3264 res += (1 << 24);
3265
3266 return cpu_to_le32(res);
3267}
3268
3269static int iwl_get_measurement(struct iwl_priv *priv,
3270 struct ieee80211_measurement_params *params,
3271 u8 type)
3272{
3273 struct iwl_spectrum_cmd spectrum;
3274 struct iwl_rx_packet *res;
3275 struct iwl_host_cmd cmd = {
3276 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3277 .data = (void *)&spectrum,
3278 .meta.flags = CMD_WANT_SKB,
3279 };
3280 u32 add_time = le64_to_cpu(params->start_time);
3281 int rc;
3282 int spectrum_resp_status;
3283 int duration = le16_to_cpu(params->duration);
3284
3285 if (iwl_is_associated(priv))
3286 add_time =
3287 iwl_usecs_to_beacons(
3288 le64_to_cpu(params->start_time) - priv->last_tsf,
3289 le16_to_cpu(priv->rxon_timing.beacon_interval));
3290
3291 memset(&spectrum, 0, sizeof(spectrum));
3292
3293 spectrum.channel_count = cpu_to_le16(1);
3294 spectrum.flags =
3295 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
3296 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
3297 cmd.len = sizeof(spectrum);
3298 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3299
3300 if (iwl_is_associated(priv))
3301 spectrum.start_time =
3302 iwl_add_beacon_time(priv->last_beacon_time,
3303 add_time,
3304 le16_to_cpu(priv->rxon_timing.beacon_interval));
3305 else
3306 spectrum.start_time = 0;
3307
3308 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
3309 spectrum.channels[0].channel = params->channel;
3310 spectrum.channels[0].type = type;
3311 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
3312 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3313 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3314
3315 rc = iwl_send_cmd_sync(priv, &cmd);
3316 if (rc)
3317 return rc;
3318
3319 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
3320 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
3321 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
3322 rc = -EIO;
3323 }
3324
3325 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
3326 switch (spectrum_resp_status) {
3327 case 0: /* Command will be handled */
3328 if (res->u.spectrum.id != 0xff) {
3329 IWL_DEBUG_INFO
3330 ("Replaced existing measurement: %d\n",
3331 res->u.spectrum.id);
3332 priv->measurement_status &= ~MEASUREMENT_READY;
3333 }
3334 priv->measurement_status |= MEASUREMENT_ACTIVE;
3335 rc = 0;
3336 break;
3337
3338 case 1: /* Command will not be handled */
3339 rc = -EAGAIN;
3340 break;
3341 }
3342
3343 dev_kfree_skb_any(cmd.meta.u.skb);
3344
3345 return rc;
3346}
3347#endif
3348
3349static void iwl_txstatus_to_ieee(struct iwl_priv *priv,
3350 struct iwl_tx_info *tx_sta)
3351{
3352
3353 tx_sta->status.ack_signal = 0;
3354 tx_sta->status.excessive_retries = 0;
3355 tx_sta->status.queue_length = 0;
3356 tx_sta->status.queue_number = 0;
3357
3358 if (in_interrupt())
3359 ieee80211_tx_status_irqsafe(priv->hw,
3360 tx_sta->skb[0], &(tx_sta->status));
3361 else
3362 ieee80211_tx_status(priv->hw,
3363 tx_sta->skb[0], &(tx_sta->status));
3364
3365 tx_sta->skb[0] = NULL;
3366}
3367
3368/**
3369 * iwl_tx_queue_reclaim - Reclaim Tx queue entries no more used by NIC.
3370 *
3371 * When FW advances 'R' index, all entries between old and
3372 * new 'R' index need to be reclaimed. As result, some free space
3373 * forms. If there is enough free space (> low mark), wake Tx queue.
3374 */
3375int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
3376{
3377 struct iwl_tx_queue *txq = &priv->txq[txq_id];
3378 struct iwl_queue *q = &txq->q;
3379 int nfreed = 0;
3380
3381 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3382 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3383 "is out of range [0-%d] %d %d.\n", txq_id,
3384 index, q->n_bd, q->first_empty, q->last_used);
3385 return 0;
3386 }
3387
3388 for (index = iwl_queue_inc_wrap(index, q->n_bd);
3389 q->last_used != index;
3390 q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) {
3391 if (txq_id != IWL_CMD_QUEUE_NUM) {
3392 iwl_txstatus_to_ieee(priv,
3393 &(txq->txb[txq->q.last_used]));
3394 iwl_hw_txq_free_tfd(priv, txq);
3395 } else if (nfreed > 1) {
3396 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
3397 q->first_empty, q->last_used);
3398 queue_work(priv->workqueue, &priv->restart);
3399 }
3400 nfreed++;
3401 }
3402
3403 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
3404 (txq_id != IWL_CMD_QUEUE_NUM) &&
3405 priv->mac80211_registered)
3406 ieee80211_wake_queue(priv->hw, txq_id);
3407
3408
3409 return nfreed;
3410}
3411
3412static int iwl_is_tx_success(u32 status)
3413{
3414 return (status & 0xFF) == 0x1;
3415}
3416
3417/******************************************************************************
3418 *
3419 * Generic RX handler implementations
3420 *
3421 ******************************************************************************/
3422static void iwl_rx_reply_tx(struct iwl_priv *priv,
3423 struct iwl_rx_mem_buffer *rxb)
3424{
3425 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3426 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3427 int txq_id = SEQ_TO_QUEUE(sequence);
3428 int index = SEQ_TO_INDEX(sequence);
3429 struct iwl_tx_queue *txq = &priv->txq[txq_id];
3430 struct ieee80211_tx_status *tx_status;
3431 struct iwl_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
3432 u32 status = le32_to_cpu(tx_resp->status);
3433
3434 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3435 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3436 "is out of range [0-%d] %d %d\n", txq_id,
3437 index, txq->q.n_bd, txq->q.first_empty,
3438 txq->q.last_used);
3439 return;
3440 }
3441
3442 tx_status = &(txq->txb[txq->q.last_used].status);
3443
3444 tx_status->retry_count = tx_resp->failure_frame;
3445 tx_status->queue_number = status;
3446 tx_status->queue_length = tx_resp->bt_kill_count;
3447 tx_status->queue_length |= tx_resp->failure_rts;
3448
3449 tx_status->flags =
3450 iwl_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
3451
3452 tx_status->control.tx_rate = iwl_rate_index_from_plcp(tx_resp->rate);
3453
3454 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
3455 txq_id, iwl_get_tx_fail_reason(status), status,
3456 tx_resp->rate, tx_resp->failure_frame);
3457
3458 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3459 if (index != -1)
3460 iwl_tx_queue_reclaim(priv, txq_id, index);
3461
3462 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3463 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3464}
3465
3466
3467static void iwl_rx_reply_alive(struct iwl_priv *priv,
3468 struct iwl_rx_mem_buffer *rxb)
3469{
3470 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3471 struct iwl_alive_resp *palive;
3472 struct delayed_work *pwork;
3473
3474 palive = &pkt->u.alive_frame;
3475
3476 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3477 "0x%01X 0x%01X\n",
3478 palive->is_valid, palive->ver_type,
3479 palive->ver_subtype);
3480
3481 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3482 IWL_DEBUG_INFO("Initialization Alive received.\n");
3483 memcpy(&priv->card_alive_init,
3484 &pkt->u.alive_frame,
3485 sizeof(struct iwl_init_alive_resp));
3486 pwork = &priv->init_alive_start;
3487 } else {
3488 IWL_DEBUG_INFO("Runtime Alive received.\n");
3489 memcpy(&priv->card_alive, &pkt->u.alive_frame,
3490 sizeof(struct iwl_alive_resp));
3491 pwork = &priv->alive_start;
3492 iwl_disable_events(priv);
3493 }
3494
3495 /* We delay the ALIVE response by 5ms to
3496 * give the HW RF Kill time to activate... */
3497 if (palive->is_valid == UCODE_VALID_OK)
3498 queue_delayed_work(priv->workqueue, pwork,
3499 msecs_to_jiffies(5));
3500 else
3501 IWL_WARNING("uCode did not respond OK.\n");
3502}
3503
3504static void iwl_rx_reply_add_sta(struct iwl_priv *priv,
3505 struct iwl_rx_mem_buffer *rxb)
3506{
3507 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3508
3509 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3510 return;
3511}
3512
3513static void iwl_rx_reply_error(struct iwl_priv *priv,
3514 struct iwl_rx_mem_buffer *rxb)
3515{
3516 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3517
3518 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3519 "seq 0x%04X ser 0x%08X\n",
3520 le32_to_cpu(pkt->u.err_resp.error_type),
3521 get_cmd_string(pkt->u.err_resp.cmd_id),
3522 pkt->u.err_resp.cmd_id,
3523 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3524 le32_to_cpu(pkt->u.err_resp.error_info));
3525}
3526
3527#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3528
3529static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
3530{
3531 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3532 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
3533 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
3534 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3535 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3536 rxon->channel = csa->channel;
3537 priv->staging_rxon.channel = csa->channel;
3538}
3539
3540static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
3541 struct iwl_rx_mem_buffer *rxb)
3542{
3543#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
3544 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3545 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
3546
3547 if (!report->state) {
3548 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3549 "Spectrum Measure Notification: Start\n");
3550 return;
3551 }
3552
3553 memcpy(&priv->measure_report, report, sizeof(*report));
3554 priv->measurement_status |= MEASUREMENT_READY;
3555#endif
3556}
3557
3558static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
3559 struct iwl_rx_mem_buffer *rxb)
3560{
3561#ifdef CONFIG_IWLWIFI_DEBUG
3562 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3563 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
3564 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3565 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3566#endif
3567}
3568
3569static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
3570 struct iwl_rx_mem_buffer *rxb)
3571{
3572 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3573 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3574 "notification for %s:\n",
3575 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
3576 iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
3577}
3578
3579static void iwl_bg_beacon_update(struct work_struct *work)
3580{
3581 struct iwl_priv *priv =
3582 container_of(work, struct iwl_priv, beacon_update);
3583 struct sk_buff *beacon;
3584
3585 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3586 beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL);
3587
3588 if (!beacon) {
3589 IWL_ERROR("update beacon failed\n");
3590 return;
3591 }
3592
3593 mutex_lock(&priv->mutex);
3594 /* new beacon skb is allocated every time; dispose previous.*/
3595 if (priv->ibss_beacon)
3596 dev_kfree_skb(priv->ibss_beacon);
3597
3598 priv->ibss_beacon = beacon;
3599 mutex_unlock(&priv->mutex);
3600
3601 iwl_send_beacon_cmd(priv);
3602}
3603
3604static void iwl_rx_beacon_notif(struct iwl_priv *priv,
3605 struct iwl_rx_mem_buffer *rxb)
3606{
3607#ifdef CONFIG_IWLWIFI_DEBUG
3608 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3609 struct iwl_beacon_notif *beacon = &(pkt->u.beacon_status);
3610 u8 rate = beacon->beacon_notify_hdr.rate;
3611
3612 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3613 "tsf %d %d rate %d\n",
3614 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3615 beacon->beacon_notify_hdr.failure_frame,
3616 le32_to_cpu(beacon->ibss_mgr_status),
3617 le32_to_cpu(beacon->high_tsf),
3618 le32_to_cpu(beacon->low_tsf), rate);
3619#endif
3620
3621 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
3622 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3623 queue_work(priv->workqueue, &priv->beacon_update);
3624}
3625
3626/* Service response to REPLY_SCAN_CMD (0x80) */
3627static void iwl_rx_reply_scan(struct iwl_priv *priv,
3628 struct iwl_rx_mem_buffer *rxb)
3629{
3630#ifdef CONFIG_IWLWIFI_DEBUG
3631 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3632 struct iwl_scanreq_notification *notif =
3633 (struct iwl_scanreq_notification *)pkt->u.raw;
3634
3635 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3636#endif
3637}
3638
3639/* Service SCAN_START_NOTIFICATION (0x82) */
3640static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
3641 struct iwl_rx_mem_buffer *rxb)
3642{
3643 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3644 struct iwl_scanstart_notification *notif =
3645 (struct iwl_scanstart_notification *)pkt->u.raw;
3646 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3647 IWL_DEBUG_SCAN("Scan start: "
3648 "%d [802.11%s] "
3649 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3650 notif->channel,
3651 notif->band ? "bg" : "a",
3652 notif->tsf_high,
3653 notif->tsf_low, notif->status, notif->beacon_timer);
3654}
3655
3656/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3657static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
3658 struct iwl_rx_mem_buffer *rxb)
3659{
3660 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3661 struct iwl_scanresults_notification *notif =
3662 (struct iwl_scanresults_notification *)pkt->u.raw;
3663
3664 IWL_DEBUG_SCAN("Scan ch.res: "
3665 "%d [802.11%s] "
3666 "(TSF: 0x%08X:%08X) - %d "
3667 "elapsed=%lu usec (%dms since last)\n",
3668 notif->channel,
3669 notif->band ? "bg" : "a",
3670 le32_to_cpu(notif->tsf_high),
3671 le32_to_cpu(notif->tsf_low),
3672 le32_to_cpu(notif->statistics[0]),
3673 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3674 jiffies_to_msecs(elapsed_jiffies
3675 (priv->last_scan_jiffies, jiffies)));
3676
3677 priv->last_scan_jiffies = jiffies;
3678}
3679
3680/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
3681static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
3682 struct iwl_rx_mem_buffer *rxb)
3683{
3684 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3685 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
3686
3687 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
3688 scan_notif->scanned_channels,
3689 scan_notif->tsf_low,
3690 scan_notif->tsf_high, scan_notif->status);
3691
3692 /* The HW is no longer scanning */
3693 clear_bit(STATUS_SCAN_HW, &priv->status);
3694
3695 /* The scan completion notification came in, so kill that timer... */
3696 cancel_delayed_work(&priv->scan_check);
3697
3698 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
3699 (priv->scan_bands == 2) ? "2.4" : "5.2",
3700 jiffies_to_msecs(elapsed_jiffies
3701 (priv->scan_pass_start, jiffies)));
3702
3703 /* Remove this scanned band from the list
3704 * of pending bands to scan */
3705 priv->scan_bands--;
3706
3707 /* If a request to abort was given, or the scan did not succeed
3708 * then we reset the scan state machine and terminate,
3709 * re-queuing another scan if one has been requested */
3710 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3711 IWL_DEBUG_INFO("Aborted scan completed.\n");
3712 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
3713 } else {
3714 /* If there are more bands on this scan pass reschedule */
3715 if (priv->scan_bands > 0)
3716 goto reschedule;
3717 }
3718
3719 priv->last_scan_jiffies = jiffies;
3720 IWL_DEBUG_INFO("Setting scan to off\n");
3721
3722 clear_bit(STATUS_SCANNING, &priv->status);
3723
3724 IWL_DEBUG_INFO("Scan took %dms\n",
3725 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
3726
3727 queue_work(priv->workqueue, &priv->scan_completed);
3728
3729 return;
3730
3731reschedule:
3732 priv->scan_pass_start = jiffies;
3733 queue_work(priv->workqueue, &priv->request_scan);
3734}
3735
3736/* Handle notification from uCode that card's power state is changing
3737 * due to software, hardware, or critical temperature RFKILL */
3738static void iwl_rx_card_state_notif(struct iwl_priv *priv,
3739 struct iwl_rx_mem_buffer *rxb)
3740{
3741 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3742 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3743 unsigned long status = priv->status;
3744
3745 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
3746 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3747 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
3748
3749 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
3750 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3751
3752 if (flags & HW_CARD_DISABLED)
3753 set_bit(STATUS_RF_KILL_HW, &priv->status);
3754 else
3755 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3756
3757
3758 if (flags & SW_CARD_DISABLED)
3759 set_bit(STATUS_RF_KILL_SW, &priv->status);
3760 else
3761 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3762
3763 iwl_scan_cancel(priv);
3764
3765 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
3766 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
3767 (test_bit(STATUS_RF_KILL_SW, &status) !=
3768 test_bit(STATUS_RF_KILL_SW, &priv->status)))
3769 queue_work(priv->workqueue, &priv->rf_kill);
3770 else
3771 wake_up_interruptible(&priv->wait_command_queue);
3772}
3773
3774/**
3775 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
3776 *
3777 * Setup the RX handlers for each of the reply types sent from the uCode
3778 * to the host.
3779 *
3780 * This function chains into the hardware specific files for them to setup
3781 * any hardware specific handlers as well.
3782 */
3783static void iwl_setup_rx_handlers(struct iwl_priv *priv)
3784{
3785 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
3786 priv->rx_handlers[REPLY_ADD_STA] = iwl_rx_reply_add_sta;
3787 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
3788 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
3789 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
3790 iwl_rx_spectrum_measure_notif;
3791 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
3792 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
3793 iwl_rx_pm_debug_statistics_notif;
3794 priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
3795
3796 /* NOTE: iwl_rx_statistics is different based on whether
3797 * the build is for the 3945 or the 4965. See the
3798 * corresponding implementation in iwl-XXXX.c
3799 *
3800 * The same handler is used for both the REPLY to a
3801 * discrete statistics request from the host as well as
3802 * for the periodic statistics notification from the uCode
3803 */
3804 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_hw_rx_statistics;
3805 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_hw_rx_statistics;
3806
3807 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan;
3808 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif;
3809 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
3810 iwl_rx_scan_results_notif;
3811 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
3812 iwl_rx_scan_complete_notif;
3813 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
3814 priv->rx_handlers[REPLY_TX] = iwl_rx_reply_tx;
3815
3816 /* Setup hardware specific Rx handlers */
3817 iwl_hw_rx_handler_setup(priv);
3818}
3819
3820/**
3821 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3822 * @rxb: Rx buffer to reclaim
3823 *
3824 * If an Rx buffer has an async callback associated with it the callback
3825 * will be executed. The attached skb (if present) will only be freed
3826 * if the callback returns 1
3827 */
3828static void iwl_tx_cmd_complete(struct iwl_priv *priv,
3829 struct iwl_rx_mem_buffer *rxb)
3830{
3831 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3832 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3833 int txq_id = SEQ_TO_QUEUE(sequence);
3834 int index = SEQ_TO_INDEX(sequence);
3835 int huge = sequence & SEQ_HUGE_FRAME;
3836 int cmd_index;
3837 struct iwl_cmd *cmd;
3838
3839 /* If a Tx command is being handled and it isn't in the actual
3840 * command queue then there a command routing bug has been introduced
3841 * in the queue management code. */
3842 if (txq_id != IWL_CMD_QUEUE_NUM)
3843 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
3844 txq_id, pkt->hdr.cmd);
3845 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
3846
3847 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
3848 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
3849
3850 /* Input error checking is done when commands are added to queue. */
3851 if (cmd->meta.flags & CMD_WANT_SKB) {
3852 cmd->meta.source->u.skb = rxb->skb;
3853 rxb->skb = NULL;
3854 } else if (cmd->meta.u.callback &&
3855 !cmd->meta.u.callback(priv, cmd, rxb->skb))
3856 rxb->skb = NULL;
3857
3858 iwl_tx_queue_reclaim(priv, txq_id, index);
3859
3860 if (!(cmd->meta.flags & CMD_ASYNC)) {
3861 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3862 wake_up_interruptible(&priv->wait_command_queue);
3863 }
3864}
3865
3866/************************** RX-FUNCTIONS ****************************/
3867/*
3868 * Rx theory of operation
3869 *
3870 * The host allocates 32 DMA target addresses and passes the host address
3871 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3872 * 0 to 31
3873 *
3874 * Rx Queue Indexes
3875 * The host/firmware share two index registers for managing the Rx buffers.
3876 *
3877 * The READ index maps to the first position that the firmware may be writing
3878 * to -- the driver can read up to (but not including) this position and get
3879 * good data.
3880 * The READ index is managed by the firmware once the card is enabled.
3881 *
3882 * The WRITE index maps to the last position the driver has read from -- the
3883 * position preceding WRITE is the last slot the firmware can place a packet.
3884 *
3885 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3886 * WRITE = READ.
3887 *
3888 * During initialization the host sets up the READ queue position to the first
3889 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3890 *
3891 * When the firmware places a packet in a buffer it will advance the READ index
3892 * and fire the RX interrupt. The driver can then query the READ index and
3893 * process as many packets as possible, moving the WRITE index forward as it
3894 * resets the Rx queue buffers with new memory.
3895 *
3896 * The management in the driver is as follows:
3897 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
3898 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3899 * to replensish the iwl->rxq->rx_free.
3900 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
3901 * iwl->rxq is replenished and the READ INDEX is updated (updating the
3902 * 'processed' and 'read' driver indexes as well)
3903 * + A received packet is processed and handed to the kernel network stack,
3904 * detached from the iwl->rxq. The driver 'processed' index is updated.
3905 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
3906 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
3907 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
3908 * were enough free buffers and RX_STALLED is set it is cleared.
3909 *
3910 *
3911 * Driver sequence:
3912 *
3913 * iwl_rx_queue_alloc() Allocates rx_free
3914 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
3915 * iwl_rx_queue_restock
3916 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
3917 * queue, updates firmware pointers, and updates
3918 * the WRITE index. If insufficient rx_free buffers
3919 * are available, schedules iwl_rx_replenish
3920 *
3921 * -- enable interrupts --
3922 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
3923 * READ INDEX, detaching the SKB from the pool.
3924 * Moves the packet buffer from queue to rx_used.
3925 * Calls iwl_rx_queue_restock to refill any empty
3926 * slots.
3927 * ...
3928 *
3929 */
3930
3931/**
3932 * iwl_rx_queue_space - Return number of free slots available in queue.
3933 */
3934static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
3935{
3936 int s = q->read - q->write;
3937 if (s <= 0)
3938 s += RX_QUEUE_SIZE;
3939 /* keep some buffer to not confuse full and empty queue */
3940 s -= 2;
3941 if (s < 0)
3942 s = 0;
3943 return s;
3944}
3945
3946/**
3947 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
3948 *
3949 * NOTE: This function has 3945 and 4965 specific code sections
3950 * but is declared in base due to the majority of the
3951 * implementation being the same (only a numeric constant is
3952 * different)
3953 *
3954 */
3955int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
3956{
3957 u32 reg = 0;
3958 int rc = 0;
3959 unsigned long flags;
3960
3961 spin_lock_irqsave(&q->lock, flags);
3962
3963 if (q->need_update == 0)
3964 goto exit_unlock;
3965
3966 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3967 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
3968
3969 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3970 iwl_set_bit(priv, CSR_GP_CNTRL,
3971 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3972 goto exit_unlock;
3973 }
3974
3975 rc = iwl_grab_restricted_access(priv);
3976 if (rc)
3977 goto exit_unlock;
3978
3979 iwl_write_restricted(priv, FH_RSCSR_CHNL0_WPTR,
3980 q->write & ~0x7);
3981 iwl_release_restricted_access(priv);
3982 } else
3983 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
3984
3985
3986 q->need_update = 0;
3987
3988 exit_unlock:
3989 spin_unlock_irqrestore(&q->lock, flags);
3990 return rc;
3991}
3992
3993/**
3994 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer pointer.
3995 *
3996 * NOTE: This function has 3945 and 4965 specific code paths in it.
3997 */
3998static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
3999 dma_addr_t dma_addr)
4000{
4001 return cpu_to_le32((u32)dma_addr);
4002}
4003
4004/**
4005 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
4006 *
4007 * If there are slots in the RX queue that need to be restocked,
4008 * and we have free pre-allocated buffers, fill the ranks as much
4009 * as we can pulling from rx_free.
4010 *
4011 * This moves the 'write' index forward to catch up with 'processed', and
4012 * also updates the memory address in the firmware to reference the new
4013 * target buffer.
4014 */
4015int iwl_rx_queue_restock(struct iwl_priv *priv)
4016{
4017 struct iwl_rx_queue *rxq = &priv->rxq;
4018 struct list_head *element;
4019 struct iwl_rx_mem_buffer *rxb;
4020 unsigned long flags;
4021 int write, rc;
4022
4023 spin_lock_irqsave(&rxq->lock, flags);
4024 write = rxq->write & ~0x7;
4025 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
4026 element = rxq->rx_free.next;
4027 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
4028 list_del(element);
4029 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
4030 rxq->queue[rxq->write] = rxb;
4031 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
4032 rxq->free_count--;
4033 }
4034 spin_unlock_irqrestore(&rxq->lock, flags);
4035 /* If the pre-allocated buffer pool is dropping low, schedule to
4036 * refill it */
4037 if (rxq->free_count <= RX_LOW_WATERMARK)
4038 queue_work(priv->workqueue, &priv->rx_replenish);
4039
4040
4041 /* If we've added more space for the firmware to place data, tell it */
4042 if ((write != (rxq->write & ~0x7))
4043 || (abs(rxq->write - rxq->read) > 7)) {
4044 spin_lock_irqsave(&rxq->lock, flags);
4045 rxq->need_update = 1;
4046 spin_unlock_irqrestore(&rxq->lock, flags);
4047 rc = iwl_rx_queue_update_write_ptr(priv, rxq);
4048 if (rc)
4049 return rc;
4050 }
4051
4052 return 0;
4053}
4054
4055/**
4056 * iwl_rx_replensih - Move all used packet from rx_used to rx_free
4057 *
4058 * When moving to rx_free an SKB is allocated for the slot.
4059 *
4060 * Also restock the Rx queue via iwl_rx_queue_restock.
4061 * This is called as a scheduled work item (except for during intialization)
4062 */
4063void iwl_rx_replenish(void *data)
4064{
4065 struct iwl_priv *priv = data;
4066 struct iwl_rx_queue *rxq = &priv->rxq;
4067 struct list_head *element;
4068 struct iwl_rx_mem_buffer *rxb;
4069 unsigned long flags;
4070 spin_lock_irqsave(&rxq->lock, flags);
4071 while (!list_empty(&rxq->rx_used)) {
4072 element = rxq->rx_used.next;
4073 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
4074 rxb->skb =
4075 alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC);
4076 if (!rxb->skb) {
4077 if (net_ratelimit())
4078 printk(KERN_CRIT DRV_NAME
4079 ": Can not allocate SKB buffers\n");
4080 /* We don't reschedule replenish work here -- we will
4081 * call the restock method and if it still needs
4082 * more buffers it will schedule replenish */
4083 break;
4084 }
4085 priv->alloc_rxb_skb++;
4086 list_del(element);
4087 rxb->dma_addr =
4088 pci_map_single(priv->pci_dev, rxb->skb->data,
4089 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4090 list_add_tail(&rxb->list, &rxq->rx_free);
4091 rxq->free_count++;
4092 }
4093 spin_unlock_irqrestore(&rxq->lock, flags);
4094
4095 spin_lock_irqsave(&priv->lock, flags);
4096 iwl_rx_queue_restock(priv);
4097 spin_unlock_irqrestore(&priv->lock, flags);
4098}
4099
4100/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4101 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
4102 * This free routine walks the list of POOL entries and if SKB is set to
4103 * non NULL it is unmapped and freed
4104 */
4105void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
4106{
4107 int i;
4108 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4109 if (rxq->pool[i].skb != NULL) {
4110 pci_unmap_single(priv->pci_dev,
4111 rxq->pool[i].dma_addr,
4112 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4113 dev_kfree_skb(rxq->pool[i].skb);
4114 }
4115 }
4116
4117 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
4118 rxq->dma_addr);
4119 rxq->bd = NULL;
4120}
4121
4122int iwl_rx_queue_alloc(struct iwl_priv *priv)
4123{
4124 struct iwl_rx_queue *rxq = &priv->rxq;
4125 struct pci_dev *dev = priv->pci_dev;
4126 int i;
4127
4128 spin_lock_init(&rxq->lock);
4129 INIT_LIST_HEAD(&rxq->rx_free);
4130 INIT_LIST_HEAD(&rxq->rx_used);
4131 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
4132 if (!rxq->bd)
4133 return -ENOMEM;
4134 /* Fill the rx_used queue with _all_ of the Rx buffers */
4135 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4136 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4137 /* Set us so that we have processed and used all buffers, but have
4138 * not restocked the Rx queue with fresh buffers */
4139 rxq->read = rxq->write = 0;
4140 rxq->free_count = 0;
4141 rxq->need_update = 0;
4142 return 0;
4143}
4144
4145void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
4146{
4147 unsigned long flags;
4148 int i;
4149 spin_lock_irqsave(&rxq->lock, flags);
4150 INIT_LIST_HEAD(&rxq->rx_free);
4151 INIT_LIST_HEAD(&rxq->rx_used);
4152 /* Fill the rx_used queue with _all_ of the Rx buffers */
4153 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
4154 /* In the reset function, these buffers may have been allocated
4155 * to an SKB, so we need to unmap and free potential storage */
4156 if (rxq->pool[i].skb != NULL) {
4157 pci_unmap_single(priv->pci_dev,
4158 rxq->pool[i].dma_addr,
4159 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4160 priv->alloc_rxb_skb--;
4161 dev_kfree_skb(rxq->pool[i].skb);
4162 rxq->pool[i].skb = NULL;
4163 }
4164 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4165 }
4166
4167 /* Set us so that we have processed and used all buffers, but have
4168 * not restocked the Rx queue with fresh buffers */
4169 rxq->read = rxq->write = 0;
4170 rxq->free_count = 0;
4171 spin_unlock_irqrestore(&rxq->lock, flags);
4172}
4173
4174/* Convert linear signal-to-noise ratio into dB */
4175static u8 ratio2dB[100] = {
4176/* 0 1 2 3 4 5 6 7 8 9 */
4177 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
4178 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4179 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4180 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4181 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4182 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4183 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4184 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4185 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4186 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
4187};
4188
4189/* Calculates a relative dB value from a ratio of linear
4190 * (i.e. not dB) signal levels.
4191 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
4192int iwl_calc_db_from_ratio(int sig_ratio)
4193{
4194 /* Anything above 1000:1 just report as 60 dB */
4195 if (sig_ratio > 1000)
4196 return 60;
4197
4198 /* Above 100:1, divide by 10 and use table,
4199 * add 20 dB to make up for divide by 10 */
4200 if (sig_ratio > 100)
4201 return (20 + (int)ratio2dB[sig_ratio/10]);
4202
4203 /* We shouldn't see this */
4204 if (sig_ratio < 1)
4205 return 0;
4206
4207 /* Use table for ratios 1:1 - 99:1 */
4208 return (int)ratio2dB[sig_ratio];
4209}
4210
4211#define PERFECT_RSSI (-20) /* dBm */
4212#define WORST_RSSI (-95) /* dBm */
4213#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4214
4215/* Calculate an indication of rx signal quality (a percentage, not dBm!).
4216 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4217 * about formulas used below. */
4218int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
4219{
4220 int sig_qual;
4221 int degradation = PERFECT_RSSI - rssi_dbm;
4222
4223 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4224 * as indicator; formula is (signal dbm - noise dbm).
4225 * SNR at or above 40 is a great signal (100%).
4226 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4227 * Weakest usable signal is usually 10 - 15 dB SNR. */
4228 if (noise_dbm) {
4229 if (rssi_dbm - noise_dbm >= 40)
4230 return 100;
4231 else if (rssi_dbm < noise_dbm)
4232 return 0;
4233 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4234
4235 /* Else use just the signal level.
4236 * This formula is a least squares fit of data points collected and
4237 * compared with a reference system that had a percentage (%) display
4238 * for signal quality. */
4239 } else
4240 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4241 (15 * RSSI_RANGE + 62 * degradation)) /
4242 (RSSI_RANGE * RSSI_RANGE);
4243
4244 if (sig_qual > 100)
4245 sig_qual = 100;
4246 else if (sig_qual < 1)
4247 sig_qual = 0;
4248
4249 return sig_qual;
4250}
4251
4252/**
4253 * iwl_rx_handle - Main entry function for receiving responses from the uCode
4254 *
4255 * Uses the priv->rx_handlers callback function array to invoke
4256 * the appropriate handlers, including command responses,
4257 * frame-received notifications, and other notifications.
4258 */
4259static void iwl_rx_handle(struct iwl_priv *priv)
4260{
4261 struct iwl_rx_mem_buffer *rxb;
4262 struct iwl_rx_packet *pkt;
4263 struct iwl_rx_queue *rxq = &priv->rxq;
4264 u32 r, i;
4265 int reclaim;
4266 unsigned long flags;
4267
4268 r = iwl_hw_get_rx_read(priv);
4269 i = rxq->read;
4270
4271 /* Rx interrupt, but nothing sent from uCode */
4272 if (i == r)
4273 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4274
4275 while (i != r) {
4276 rxb = rxq->queue[i];
4277
4278 /* If an RXB doesn't have a queue slot associated with it
4279 * then a bug has been introduced in the queue refilling
4280 * routines -- catch it here */
4281 BUG_ON(rxb == NULL);
4282
4283 rxq->queue[i] = NULL;
4284
4285 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4286 IWL_RX_BUF_SIZE,
4287 PCI_DMA_FROMDEVICE);
4288 pkt = (struct iwl_rx_packet *)rxb->skb->data;
4289
4290 /* Reclaim a command buffer only if this packet is a response
4291 * to a (driver-originated) command.
4292 * If the packet (e.g. Rx frame) originated from uCode,
4293 * there is no command buffer to reclaim.
4294 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4295 * but apparently a few don't get set; catch them here. */
4296 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4297 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4298 (pkt->hdr.cmd != REPLY_TX);
4299
4300 /* Based on type of command response or notification,
4301 * handle those that need handling via function in
4302 * rx_handlers table. See iwl_setup_rx_handlers() */
4303 if (priv->rx_handlers[pkt->hdr.cmd]) {
4304 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4305 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4306 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4307 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4308 } else {
4309 /* No handling needed */
4310 IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4311 "r %d i %d No handler needed for %s, 0x%02x\n",
4312 r, i, get_cmd_string(pkt->hdr.cmd),
4313 pkt->hdr.cmd);
4314 }
4315
4316 if (reclaim) {
4317 /* Invoke any callbacks, transfer the skb to caller,
4318 * and fire off the (possibly) blocking iwl_send_cmd()
4319 * as we reclaim the driver command queue */
4320 if (rxb && rxb->skb)
4321 iwl_tx_cmd_complete(priv, rxb);
4322 else
4323 IWL_WARNING("Claim null rxb?\n");
4324 }
4325
4326 /* For now we just don't re-use anything. We can tweak this
4327 * later to try and re-use notification packets and SKBs that
4328 * fail to Rx correctly */
4329 if (rxb->skb != NULL) {
4330 priv->alloc_rxb_skb--;
4331 dev_kfree_skb_any(rxb->skb);
4332 rxb->skb = NULL;
4333 }
4334
4335 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
4336 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4337 spin_lock_irqsave(&rxq->lock, flags);
4338 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4339 spin_unlock_irqrestore(&rxq->lock, flags);
4340 i = (i + 1) & RX_QUEUE_MASK;
4341 }
4342
4343 /* Backtrack one entry */
4344 priv->rxq.read = i;
4345 iwl_rx_queue_restock(priv);
4346}
4347
4348int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv,
4349 struct iwl_tx_queue *txq)
4350{
4351 u32 reg = 0;
4352 int rc = 0;
4353 int txq_id = txq->q.id;
4354
4355 if (txq->need_update == 0)
4356 return rc;
4357
4358 /* if we're trying to save power */
4359 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4360 /* wake up nic if it's powered down ...
4361 * uCode will wake up, and interrupt us again, so next
4362 * time we'll skip this part. */
4363 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4364
4365 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4366 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
4367 iwl_set_bit(priv, CSR_GP_CNTRL,
4368 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4369 return rc;
4370 }
4371
4372 /* restore this queue's parameters in nic hardware. */
4373 rc = iwl_grab_restricted_access(priv);
4374 if (rc)
4375 return rc;
4376 iwl_write_restricted(priv, HBUS_TARG_WRPTR,
4377 txq->q.first_empty | (txq_id << 8));
4378 iwl_release_restricted_access(priv);
4379
4380 /* else not in power-save mode, uCode will never sleep when we're
4381 * trying to tx (during RFKILL, we're not trying to tx). */
4382 } else
4383 iwl_write32(priv, HBUS_TARG_WRPTR,
4384 txq->q.first_empty | (txq_id << 8));
4385
4386 txq->need_update = 0;
4387
4388 return rc;
4389}
4390
4391#ifdef CONFIG_IWLWIFI_DEBUG
4392static void iwl_print_rx_config_cmd(struct iwl_rxon_cmd *rxon)
4393{
0795af57
JP
4394 DECLARE_MAC_BUF(mac);
4395
b481de9c
ZY
4396 IWL_DEBUG_RADIO("RX CONFIG:\n");
4397 iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4398 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4399 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4400 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4401 le32_to_cpu(rxon->filter_flags));
4402 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4403 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4404 rxon->ofdm_basic_rates);
4405 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
0795af57
JP
4406 IWL_DEBUG_RADIO("u8[6] node_addr: %s\n",
4407 print_mac(mac, rxon->node_addr));
4408 IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n",
4409 print_mac(mac, rxon->bssid_addr));
b481de9c
ZY
4410 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4411}
4412#endif
4413
4414static void iwl_enable_interrupts(struct iwl_priv *priv)
4415{
4416 IWL_DEBUG_ISR("Enabling interrupts\n");
4417 set_bit(STATUS_INT_ENABLED, &priv->status);
4418 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
4419}
4420
4421static inline void iwl_disable_interrupts(struct iwl_priv *priv)
4422{
4423 clear_bit(STATUS_INT_ENABLED, &priv->status);
4424
4425 /* disable interrupts from uCode/NIC to host */
4426 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
4427
4428 /* acknowledge/clear/reset any interrupts still pending
4429 * from uCode or flow handler (Rx/Tx DMA) */
4430 iwl_write32(priv, CSR_INT, 0xffffffff);
4431 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
4432 IWL_DEBUG_ISR("Disabled interrupts\n");
4433}
4434
4435static const char *desc_lookup(int i)
4436{
4437 switch (i) {
4438 case 1:
4439 return "FAIL";
4440 case 2:
4441 return "BAD_PARAM";
4442 case 3:
4443 return "BAD_CHECKSUM";
4444 case 4:
4445 return "NMI_INTERRUPT";
4446 case 5:
4447 return "SYSASSERT";
4448 case 6:
4449 return "FATAL_ERROR";
4450 }
4451
4452 return "UNKNOWN";
4453}
4454
4455#define ERROR_START_OFFSET (1 * sizeof(u32))
4456#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4457
4458static void iwl_dump_nic_error_log(struct iwl_priv *priv)
4459{
4460 u32 i;
4461 u32 desc, time, count, base, data1;
4462 u32 blink1, blink2, ilink1, ilink2;
4463 int rc;
4464
4465 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4466
4467 if (!iwl_hw_valid_rtc_data_addr(base)) {
4468 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4469 return;
4470 }
4471
4472 rc = iwl_grab_restricted_access(priv);
4473 if (rc) {
4474 IWL_WARNING("Can not read from adapter at this time.\n");
4475 return;
4476 }
4477
4478 count = iwl_read_restricted_mem(priv, base);
4479
4480 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4481 IWL_ERROR("Start IWL Error Log Dump:\n");
4482 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n",
4483 priv->status, priv->config, count);
4484 }
4485
4486 IWL_ERROR("Desc Time asrtPC blink2 "
4487 "ilink1 nmiPC Line\n");
4488 for (i = ERROR_START_OFFSET;
4489 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
4490 i += ERROR_ELEM_SIZE) {
4491 desc = iwl_read_restricted_mem(priv, base + i);
4492 time =
4493 iwl_read_restricted_mem(priv, base + i + 1 * sizeof(u32));
4494 blink1 =
4495 iwl_read_restricted_mem(priv, base + i + 2 * sizeof(u32));
4496 blink2 =
4497 iwl_read_restricted_mem(priv, base + i + 3 * sizeof(u32));
4498 ilink1 =
4499 iwl_read_restricted_mem(priv, base + i + 4 * sizeof(u32));
4500 ilink2 =
4501 iwl_read_restricted_mem(priv, base + i + 5 * sizeof(u32));
4502 data1 =
4503 iwl_read_restricted_mem(priv, base + i + 6 * sizeof(u32));
4504
4505 IWL_ERROR
4506 ("%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
4507 desc_lookup(desc), desc, time, blink1, blink2,
4508 ilink1, ilink2, data1);
4509 }
4510
4511 iwl_release_restricted_access(priv);
4512
4513}
4514
4515#define EVENT_START_OFFSET (4 * sizeof(u32))
4516
4517/**
4518 * iwl_print_event_log - Dump error event log to syslog
4519 *
4520 * NOTE: Must be called with iwl_grab_restricted_access() already obtained!
4521 */
4522static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
4523 u32 num_events, u32 mode)
4524{
4525 u32 i;
4526 u32 base; /* SRAM byte address of event log header */
4527 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4528 u32 ptr; /* SRAM byte address of log data */
4529 u32 ev, time, data; /* event log data */
4530
4531 if (num_events == 0)
4532 return;
4533
4534 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4535
4536 if (mode == 0)
4537 event_size = 2 * sizeof(u32);
4538 else
4539 event_size = 3 * sizeof(u32);
4540
4541 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4542
4543 /* "time" is actually "data" for mode 0 (no timestamp).
4544 * place event id # at far right for easier visual parsing. */
4545 for (i = 0; i < num_events; i++) {
4546 ev = iwl_read_restricted_mem(priv, ptr);
4547 ptr += sizeof(u32);
4548 time = iwl_read_restricted_mem(priv, ptr);
4549 ptr += sizeof(u32);
4550 if (mode == 0)
4551 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4552 else {
4553 data = iwl_read_restricted_mem(priv, ptr);
4554 ptr += sizeof(u32);
4555 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4556 }
4557 }
4558}
4559
4560static void iwl_dump_nic_event_log(struct iwl_priv *priv)
4561{
4562 int rc;
4563 u32 base; /* SRAM byte address of event log header */
4564 u32 capacity; /* event log capacity in # entries */
4565 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4566 u32 num_wraps; /* # times uCode wrapped to top of log */
4567 u32 next_entry; /* index of next entry to be written by uCode */
4568 u32 size; /* # entries that we'll print */
4569
4570 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4571 if (!iwl_hw_valid_rtc_data_addr(base)) {
4572 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4573 return;
4574 }
4575
4576 rc = iwl_grab_restricted_access(priv);
4577 if (rc) {
4578 IWL_WARNING("Can not read from adapter at this time.\n");
4579 return;
4580 }
4581
4582 /* event log header */
4583 capacity = iwl_read_restricted_mem(priv, base);
4584 mode = iwl_read_restricted_mem(priv, base + (1 * sizeof(u32)));
4585 num_wraps = iwl_read_restricted_mem(priv, base + (2 * sizeof(u32)));
4586 next_entry = iwl_read_restricted_mem(priv, base + (3 * sizeof(u32)));
4587
4588 size = num_wraps ? capacity : next_entry;
4589
4590 /* bail out if nothing in log */
4591 if (size == 0) {
4592 IWL_ERROR("Start IPW Event Log Dump: nothing in log\n");
4593 iwl_release_restricted_access(priv);
4594 return;
4595 }
4596
4597 IWL_ERROR("Start IPW Event Log Dump: display count %d, wraps %d\n",
4598 size, num_wraps);
4599
4600 /* if uCode has wrapped back to top of log, start at the oldest entry,
4601 * i.e the next one that uCode would fill. */
4602 if (num_wraps)
4603 iwl_print_event_log(priv, next_entry,
4604 capacity - next_entry, mode);
4605
4606 /* (then/else) start at top of log */
4607 iwl_print_event_log(priv, 0, next_entry, mode);
4608
4609 iwl_release_restricted_access(priv);
4610}
4611
4612/**
4613 * iwl_irq_handle_error - called for HW or SW error interrupt from card
4614 */
4615static void iwl_irq_handle_error(struct iwl_priv *priv)
4616{
4617 /* Set the FW error flag -- cleared on iwl_down */
4618 set_bit(STATUS_FW_ERROR, &priv->status);
4619
4620 /* Cancel currently queued command. */
4621 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4622
4623#ifdef CONFIG_IWLWIFI_DEBUG
4624 if (iwl_debug_level & IWL_DL_FW_ERRORS) {
4625 iwl_dump_nic_error_log(priv);
4626 iwl_dump_nic_event_log(priv);
4627 iwl_print_rx_config_cmd(&priv->staging_rxon);
4628 }
4629#endif
4630
4631 wake_up_interruptible(&priv->wait_command_queue);
4632
4633 /* Keep the restart process from trying to send host
4634 * commands by clearing the INIT status bit */
4635 clear_bit(STATUS_READY, &priv->status);
4636
4637 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4638 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
4639 "Restarting adapter due to uCode error.\n");
4640
4641 if (iwl_is_associated(priv)) {
4642 memcpy(&priv->recovery_rxon, &priv->active_rxon,
4643 sizeof(priv->recovery_rxon));
4644 priv->error_recovering = 1;
4645 }
4646 queue_work(priv->workqueue, &priv->restart);
4647 }
4648}
4649
4650static void iwl_error_recovery(struct iwl_priv *priv)
4651{
4652 unsigned long flags;
4653
4654 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
4655 sizeof(priv->staging_rxon));
4656 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4657 iwl_commit_rxon(priv);
4658
4659 iwl_rxon_add_station(priv, priv->bssid, 1);
4660
4661 spin_lock_irqsave(&priv->lock, flags);
4662 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
4663 priv->error_recovering = 0;
4664 spin_unlock_irqrestore(&priv->lock, flags);
4665}
4666
4667static void iwl_irq_tasklet(struct iwl_priv *priv)
4668{
4669 u32 inta, handled = 0;
4670 u32 inta_fh;
4671 unsigned long flags;
4672#ifdef CONFIG_IWLWIFI_DEBUG
4673 u32 inta_mask;
4674#endif
4675
4676 spin_lock_irqsave(&priv->lock, flags);
4677
4678 /* Ack/clear/reset pending uCode interrupts.
4679 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4680 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
4681 inta = iwl_read32(priv, CSR_INT);
4682 iwl_write32(priv, CSR_INT, inta);
4683
4684 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4685 * Any new interrupts that happen after this, either while we're
4686 * in this tasklet, or later, will show up in next ISR/tasklet. */
4687 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4688 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
4689
4690#ifdef CONFIG_IWLWIFI_DEBUG
4691 if (iwl_debug_level & IWL_DL_ISR) {
4692 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
4693 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4694 inta, inta_mask, inta_fh);
4695 }
4696#endif
4697
4698 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4699 * atomic, make sure that inta covers all the interrupts that
4700 * we've discovered, even if FH interrupt came in just after
4701 * reading CSR_INT. */
4702 if (inta_fh & CSR_FH_INT_RX_MASK)
4703 inta |= CSR_INT_BIT_FH_RX;
4704 if (inta_fh & CSR_FH_INT_TX_MASK)
4705 inta |= CSR_INT_BIT_FH_TX;
4706
4707 /* Now service all interrupt bits discovered above. */
4708 if (inta & CSR_INT_BIT_HW_ERR) {
4709 IWL_ERROR("Microcode HW error detected. Restarting.\n");
4710
4711 /* Tell the device to stop sending interrupts */
4712 iwl_disable_interrupts(priv);
4713
4714 iwl_irq_handle_error(priv);
4715
4716 handled |= CSR_INT_BIT_HW_ERR;
4717
4718 spin_unlock_irqrestore(&priv->lock, flags);
4719
4720 return;
4721 }
4722
4723#ifdef CONFIG_IWLWIFI_DEBUG
4724 if (iwl_debug_level & (IWL_DL_ISR)) {
4725 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4726 if (inta & CSR_INT_BIT_MAC_CLK_ACTV)
4727 IWL_DEBUG_ISR("Microcode started or stopped.\n");
4728
4729 /* Alive notification via Rx interrupt will do the real work */
4730 if (inta & CSR_INT_BIT_ALIVE)
4731 IWL_DEBUG_ISR("Alive interrupt\n");
4732 }
4733#endif
4734 /* Safely ignore these bits for debug checks below */
4735 inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE);
4736
4737 /* HW RF KILL switch toggled (4965 only) */
4738 if (inta & CSR_INT_BIT_RF_KILL) {
4739 int hw_rf_kill = 0;
4740 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
4741 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4742 hw_rf_kill = 1;
4743
4744 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
4745 "RF_KILL bit toggled to %s.\n",
4746 hw_rf_kill ? "disable radio":"enable radio");
4747
4748 /* Queue restart only if RF_KILL switch was set to "kill"
4749 * when we loaded driver, and is now set to "enable".
4750 * After we're Alive, RF_KILL gets handled by
4751 * iwl_rx_card_state_notif() */
4752 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status))
4753 queue_work(priv->workqueue, &priv->restart);
4754
4755 handled |= CSR_INT_BIT_RF_KILL;
4756 }
4757
4758 /* Chip got too hot and stopped itself (4965 only) */
4759 if (inta & CSR_INT_BIT_CT_KILL) {
4760 IWL_ERROR("Microcode CT kill error detected.\n");
4761 handled |= CSR_INT_BIT_CT_KILL;
4762 }
4763
4764 /* Error detected by uCode */
4765 if (inta & CSR_INT_BIT_SW_ERR) {
4766 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
4767 inta);
4768 iwl_irq_handle_error(priv);
4769 handled |= CSR_INT_BIT_SW_ERR;
4770 }
4771
4772 /* uCode wakes up after power-down sleep */
4773 if (inta & CSR_INT_BIT_WAKEUP) {
4774 IWL_DEBUG_ISR("Wakeup interrupt\n");
4775 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
4776 iwl_tx_queue_update_write_ptr(priv, &priv->txq[0]);
4777 iwl_tx_queue_update_write_ptr(priv, &priv->txq[1]);
4778 iwl_tx_queue_update_write_ptr(priv, &priv->txq[2]);
4779 iwl_tx_queue_update_write_ptr(priv, &priv->txq[3]);
4780 iwl_tx_queue_update_write_ptr(priv, &priv->txq[4]);
4781 iwl_tx_queue_update_write_ptr(priv, &priv->txq[5]);
4782
4783 handled |= CSR_INT_BIT_WAKEUP;
4784 }
4785
4786 /* All uCode command responses, including Tx command responses,
4787 * Rx "responses" (frame-received notification), and other
4788 * notifications from uCode come through here*/
4789 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4790 iwl_rx_handle(priv);
4791 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4792 }
4793
4794 if (inta & CSR_INT_BIT_FH_TX) {
4795 IWL_DEBUG_ISR("Tx interrupt\n");
4796
4797 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
4798 if (!iwl_grab_restricted_access(priv)) {
4799 iwl_write_restricted(priv,
4800 FH_TCSR_CREDIT
4801 (ALM_FH_SRVC_CHNL), 0x0);
4802 iwl_release_restricted_access(priv);
4803 }
4804 handled |= CSR_INT_BIT_FH_TX;
4805 }
4806
4807 if (inta & ~handled)
4808 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4809
4810 if (inta & ~CSR_INI_SET_MASK) {
4811 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
4812 inta & ~CSR_INI_SET_MASK);
4813 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh);
4814 }
4815
4816 /* Re-enable all interrupts */
4817 iwl_enable_interrupts(priv);
4818
4819#ifdef CONFIG_IWLWIFI_DEBUG
4820 if (iwl_debug_level & (IWL_DL_ISR)) {
4821 inta = iwl_read32(priv, CSR_INT);
4822 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4823 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4824 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4825 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4826 }
4827#endif
4828 spin_unlock_irqrestore(&priv->lock, flags);
4829}
4830
4831static irqreturn_t iwl_isr(int irq, void *data)
4832{
4833 struct iwl_priv *priv = data;
4834 u32 inta, inta_mask;
4835 u32 inta_fh;
4836 if (!priv)
4837 return IRQ_NONE;
4838
4839 spin_lock(&priv->lock);
4840
4841 /* Disable (but don't clear!) interrupts here to avoid
4842 * back-to-back ISRs and sporadic interrupts from our NIC.
4843 * If we have something to service, the tasklet will re-enable ints.
4844 * If we *don't* have something, we'll re-enable before leaving here. */
4845 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
4846 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
4847
4848 /* Discover which interrupts are active/pending */
4849 inta = iwl_read32(priv, CSR_INT);
4850 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4851
4852 /* Ignore interrupt if there's nothing in NIC to service.
4853 * This may be due to IRQ shared with another device,
4854 * or due to sporadic interrupts thrown from our NIC. */
4855 if (!inta && !inta_fh) {
4856 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
4857 goto none;
4858 }
4859
4860 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
4861 /* Hardware disappeared */
4862 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
4863 goto none;
4864 }
4865
4866 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4867 inta, inta_mask, inta_fh);
4868
4869 /* iwl_irq_tasklet() will service interrupts and re-enable them */
4870 tasklet_schedule(&priv->irq_tasklet);
4871 spin_unlock(&priv->lock);
4872
4873 return IRQ_HANDLED;
4874
4875 none:
4876 /* re-enable interrupts here since we don't have anything to service. */
4877 iwl_enable_interrupts(priv);
4878 spin_unlock(&priv->lock);
4879 return IRQ_NONE;
4880}
4881
4882/************************** EEPROM BANDS ****************************
4883 *
4884 * The iwl_eeprom_band definitions below provide the mapping from the
4885 * EEPROM contents to the specific channel number supported for each
4886 * band.
4887 *
4888 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
4889 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
4890 * The specific geography and calibration information for that channel
4891 * is contained in the eeprom map itself.
4892 *
4893 * During init, we copy the eeprom information and channel map
4894 * information into priv->channel_info_24/52 and priv->channel_map_24/52
4895 *
4896 * channel_map_24/52 provides the index in the channel_info array for a
4897 * given channel. We have to have two separate maps as there is channel
4898 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
4899 * band_2
4900 *
4901 * A value of 0xff stored in the channel_map indicates that the channel
4902 * is not supported by the hardware at all.
4903 *
4904 * A value of 0xfe in the channel_map indicates that the channel is not
4905 * valid for Tx with the current hardware. This means that
4906 * while the system can tune and receive on a given channel, it may not
4907 * be able to associate or transmit any frames on that
4908 * channel. There is no corresponding channel information for that
4909 * entry.
4910 *
4911 *********************************************************************/
4912
4913/* 2.4 GHz */
4914static const u8 iwl_eeprom_band_1[14] = {
4915 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4916};
4917
4918/* 5.2 GHz bands */
4919static const u8 iwl_eeprom_band_2[] = {
4920 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4921};
4922
4923static const u8 iwl_eeprom_band_3[] = { /* 5205-5320MHz */
4924 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4925};
4926
4927static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
4928 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4929};
4930
4931static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
4932 145, 149, 153, 157, 161, 165
4933};
4934
4935static void iwl_init_band_reference(const struct iwl_priv *priv, int band,
4936 int *eeprom_ch_count,
4937 const struct iwl_eeprom_channel
4938 **eeprom_ch_info,
4939 const u8 **eeprom_ch_index)
4940{
4941 switch (band) {
4942 case 1: /* 2.4GHz band */
4943 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
4944 *eeprom_ch_info = priv->eeprom.band_1_channels;
4945 *eeprom_ch_index = iwl_eeprom_band_1;
4946 break;
4947 case 2: /* 5.2GHz band */
4948 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
4949 *eeprom_ch_info = priv->eeprom.band_2_channels;
4950 *eeprom_ch_index = iwl_eeprom_band_2;
4951 break;
4952 case 3: /* 5.2GHz band */
4953 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
4954 *eeprom_ch_info = priv->eeprom.band_3_channels;
4955 *eeprom_ch_index = iwl_eeprom_band_3;
4956 break;
4957 case 4: /* 5.2GHz band */
4958 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
4959 *eeprom_ch_info = priv->eeprom.band_4_channels;
4960 *eeprom_ch_index = iwl_eeprom_band_4;
4961 break;
4962 case 5: /* 5.2GHz band */
4963 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
4964 *eeprom_ch_info = priv->eeprom.band_5_channels;
4965 *eeprom_ch_index = iwl_eeprom_band_5;
4966 break;
4967 default:
4968 BUG();
4969 return;
4970 }
4971}
4972
4973const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
4974 int phymode, u16 channel)
4975{
4976 int i;
4977
4978 switch (phymode) {
4979 case MODE_IEEE80211A:
4980 for (i = 14; i < priv->channel_count; i++) {
4981 if (priv->channel_info[i].channel == channel)
4982 return &priv->channel_info[i];
4983 }
4984 break;
4985
4986 case MODE_IEEE80211B:
4987 case MODE_IEEE80211G:
4988 if (channel >= 1 && channel <= 14)
4989 return &priv->channel_info[channel - 1];
4990 break;
4991
4992 }
4993
4994 return NULL;
4995}
4996
4997#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
4998 ? # x " " : "")
4999
5000static int iwl_init_channel_map(struct iwl_priv *priv)
5001{
5002 int eeprom_ch_count = 0;
5003 const u8 *eeprom_ch_index = NULL;
5004 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
5005 int band, ch;
5006 struct iwl_channel_info *ch_info;
5007
5008 if (priv->channel_count) {
5009 IWL_DEBUG_INFO("Channel map already initialized.\n");
5010 return 0;
5011 }
5012
5013 if (priv->eeprom.version < 0x2f) {
5014 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5015 priv->eeprom.version);
5016 return -EINVAL;
5017 }
5018
5019 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5020
5021 priv->channel_count =
5022 ARRAY_SIZE(iwl_eeprom_band_1) +
5023 ARRAY_SIZE(iwl_eeprom_band_2) +
5024 ARRAY_SIZE(iwl_eeprom_band_3) +
5025 ARRAY_SIZE(iwl_eeprom_band_4) +
5026 ARRAY_SIZE(iwl_eeprom_band_5);
5027
5028 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5029
5030 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
5031 priv->channel_count, GFP_KERNEL);
5032 if (!priv->channel_info) {
5033 IWL_ERROR("Could not allocate channel_info\n");
5034 priv->channel_count = 0;
5035 return -ENOMEM;
5036 }
5037
5038 ch_info = priv->channel_info;
5039
5040 /* Loop through the 5 EEPROM bands adding them in order to the
5041 * channel map we maintain (that contains additional information than
5042 * what just in the EEPROM) */
5043 for (band = 1; band <= 5; band++) {
5044
5045 iwl_init_band_reference(priv, band, &eeprom_ch_count,
5046 &eeprom_ch_info, &eeprom_ch_index);
5047
5048 /* Loop through each band adding each of the channels */
5049 for (ch = 0; ch < eeprom_ch_count; ch++) {
5050 ch_info->channel = eeprom_ch_index[ch];
5051 ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5052 MODE_IEEE80211A;
5053
5054 /* permanently store EEPROM's channel regulatory flags
5055 * and max power in channel info database. */
5056 ch_info->eeprom = eeprom_ch_info[ch];
5057
5058 /* Copy the run-time flags so they are there even on
5059 * invalid channels */
5060 ch_info->flags = eeprom_ch_info[ch].flags;
5061
5062 if (!(is_channel_valid(ch_info))) {
5063 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5064 "No traffic\n",
5065 ch_info->channel,
5066 ch_info->flags,
5067 is_channel_a_band(ch_info) ?
5068 "5.2" : "2.4");
5069 ch_info++;
5070 continue;
5071 }
5072
5073 /* Initialize regulatory-based run-time data */
5074 ch_info->max_power_avg = ch_info->curr_txpow =
5075 eeprom_ch_info[ch].max_power_avg;
5076 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5077 ch_info->min_power = 0;
5078
5079 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5080 " %ddBm): Ad-Hoc %ssupported\n",
5081 ch_info->channel,
5082 is_channel_a_band(ch_info) ?
5083 "5.2" : "2.4",
5084 CHECK_AND_PRINT(IBSS),
5085 CHECK_AND_PRINT(ACTIVE),
5086 CHECK_AND_PRINT(RADAR),
5087 CHECK_AND_PRINT(WIDE),
5088 CHECK_AND_PRINT(NARROW),
5089 CHECK_AND_PRINT(DFS),
5090 eeprom_ch_info[ch].flags,
5091 eeprom_ch_info[ch].max_power_avg,
5092 ((eeprom_ch_info[ch].
5093 flags & EEPROM_CHANNEL_IBSS)
5094 && !(eeprom_ch_info[ch].
5095 flags & EEPROM_CHANNEL_RADAR))
5096 ? "" : "not ");
5097
5098 /* Set the user_txpower_limit to the highest power
5099 * supported by any channel */
5100 if (eeprom_ch_info[ch].max_power_avg >
5101 priv->user_txpower_limit)
5102 priv->user_txpower_limit =
5103 eeprom_ch_info[ch].max_power_avg;
5104
5105 ch_info++;
5106 }
5107 }
5108
5109 if (iwl3945_txpower_set_from_eeprom(priv))
5110 return -EIO;
5111
5112 return 0;
5113}
5114
5115/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5116 * sending probe req. This should be set long enough to hear probe responses
5117 * from more than one AP. */
5118#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */
5119#define IWL_ACTIVE_DWELL_TIME_52 (10)
5120
5121/* For faster active scanning, scan will move to the next channel if fewer than
5122 * PLCP_QUIET_THRESH packets are heard on this channel within
5123 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
5124 * time if it's a quiet channel (nothing responded to our probe, and there's
5125 * no other traffic).
5126 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
5127#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
5128#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */
5129
5130/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
5131 * Must be set longer than active dwell time.
5132 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
5133#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
5134#define IWL_PASSIVE_DWELL_TIME_52 (10)
5135#define IWL_PASSIVE_DWELL_BASE (100)
5136#define IWL_CHANNEL_TUNE_TIME 5
5137
5138static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, int phymode)
5139{
5140 if (phymode == MODE_IEEE80211A)
5141 return IWL_ACTIVE_DWELL_TIME_52;
5142 else
5143 return IWL_ACTIVE_DWELL_TIME_24;
5144}
5145
5146static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, int phymode)
5147{
5148 u16 active = iwl_get_active_dwell_time(priv, phymode);
5149 u16 passive = (phymode != MODE_IEEE80211A) ?
5150 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5151 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5152
5153 if (iwl_is_associated(priv)) {
5154 /* If we're associated, we clamp the maximum passive
5155 * dwell time to be 98% of the beacon interval (minus
5156 * 2 * channel tune time) */
5157 passive = priv->beacon_int;
5158 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
5159 passive = IWL_PASSIVE_DWELL_BASE;
5160 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
5161 }
5162
5163 if (passive <= active)
5164 passive = active + 1;
5165
5166 return passive;
5167}
5168
5169static int iwl_get_channels_for_scan(struct iwl_priv *priv, int phymode,
5170 u8 is_active, u8 direct_mask,
5171 struct iwl_scan_channel *scan_ch)
5172{
5173 const struct ieee80211_channel *channels = NULL;
5174 const struct ieee80211_hw_mode *hw_mode;
5175 const struct iwl_channel_info *ch_info;
5176 u16 passive_dwell = 0;
5177 u16 active_dwell = 0;
5178 int added, i;
5179
5180 hw_mode = iwl_get_hw_mode(priv, phymode);
5181 if (!hw_mode)
5182 return 0;
5183
5184 channels = hw_mode->channels;
5185
5186 active_dwell = iwl_get_active_dwell_time(priv, phymode);
5187 passive_dwell = iwl_get_passive_dwell_time(priv, phymode);
5188
5189 for (i = 0, added = 0; i < hw_mode->num_channels; i++) {
5190 if (channels[i].chan ==
5191 le16_to_cpu(priv->active_rxon.channel)) {
5192 if (iwl_is_associated(priv)) {
5193 IWL_DEBUG_SCAN
5194 ("Skipping current channel %d\n",
5195 le16_to_cpu(priv->active_rxon.channel));
5196 continue;
5197 }
5198 } else if (priv->only_active_channel)
5199 continue;
5200
5201 scan_ch->channel = channels[i].chan;
5202
5203 ch_info = iwl_get_channel_info(priv, phymode, scan_ch->channel);
5204 if (!is_channel_valid(ch_info)) {
5205 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5206 scan_ch->channel);
5207 continue;
5208 }
5209
5210 if (!is_active || is_channel_passive(ch_info) ||
5211 !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN))
5212 scan_ch->type = 0; /* passive */
5213 else
5214 scan_ch->type = 1; /* active */
5215
5216 if (scan_ch->type & 1)
5217 scan_ch->type |= (direct_mask << 1);
5218
5219 if (is_channel_narrow(ch_info))
5220 scan_ch->type |= (1 << 7);
5221
5222 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5223 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5224
5225 /* Set power levels to defaults */
5226 scan_ch->tpc.dsp_atten = 110;
5227 /* scan_pwr_info->tpc.dsp_atten; */
5228
5229 /*scan_pwr_info->tpc.tx_gain; */
5230 if (phymode == MODE_IEEE80211A)
5231 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5232 else {
5233 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
5234 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
5235 * power level
5236 scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3;
5237 */
5238 }
5239
5240 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
5241 scan_ch->channel,
5242 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
5243 (scan_ch->type & 1) ?
5244 active_dwell : passive_dwell);
5245
5246 scan_ch++;
5247 added++;
5248 }
5249
5250 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
5251 return added;
5252}
5253
5254static void iwl_reset_channel_flag(struct iwl_priv *priv)
5255{
5256 int i, j;
5257 for (i = 0; i < 3; i++) {
5258 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5259 for (j = 0; j < hw_mode->num_channels; j++)
5260 hw_mode->channels[j].flag = hw_mode->channels[j].val;
5261 }
5262}
5263
5264static void iwl_init_hw_rates(struct iwl_priv *priv,
5265 struct ieee80211_rate *rates)
5266{
5267 int i;
5268
5269 for (i = 0; i < IWL_RATE_COUNT; i++) {
5270 rates[i].rate = iwl_rates[i].ieee * 5;
5271 rates[i].val = i; /* Rate scaling will work on indexes */
5272 rates[i].val2 = i;
5273 rates[i].flags = IEEE80211_RATE_SUPPORTED;
5274 /* Only OFDM have the bits-per-symbol set */
5275 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5276 rates[i].flags |= IEEE80211_RATE_OFDM;
5277 else {
5278 /*
5279 * If CCK 1M then set rate flag to CCK else CCK_2
5280 * which is CCK | PREAMBLE2
5281 */
5282 rates[i].flags |= (iwl_rates[i].plcp == 10) ?
5283 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2;
5284 }
5285
5286 /* Set up which ones are basic rates... */
5287 if (IWL_BASIC_RATES_MASK & (1 << i))
5288 rates[i].flags |= IEEE80211_RATE_BASIC;
5289 }
5290}
5291
5292/**
5293 * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
5294 */
5295static int iwl_init_geos(struct iwl_priv *priv)
5296{
5297 struct iwl_channel_info *ch;
5298 struct ieee80211_hw_mode *modes;
5299 struct ieee80211_channel *channels;
5300 struct ieee80211_channel *geo_ch;
5301 struct ieee80211_rate *rates;
5302 int i = 0;
5303 enum {
5304 A = 0,
5305 B = 1,
5306 G = 2,
5307 };
5308 int mode_count = 3;
5309
5310 if (priv->modes) {
5311 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5312 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5313 return 0;
5314 }
5315
5316 modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5317 GFP_KERNEL);
5318 if (!modes)
5319 return -ENOMEM;
5320
5321 channels = kzalloc(sizeof(struct ieee80211_channel) *
5322 priv->channel_count, GFP_KERNEL);
5323 if (!channels) {
5324 kfree(modes);
5325 return -ENOMEM;
5326 }
5327
5328 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)),
5329 GFP_KERNEL);
5330 if (!rates) {
5331 kfree(modes);
5332 kfree(channels);
5333 return -ENOMEM;
5334 }
5335
5336 /* 0 = 802.11a
5337 * 1 = 802.11b
5338 * 2 = 802.11g
5339 */
5340
5341 /* 5.2GHz channels start after the 2.4GHz channels */
5342 modes[A].mode = MODE_IEEE80211A;
5343 modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
5344 modes[A].rates = rates;
5345 modes[A].num_rates = 8; /* just OFDM */
5346 modes[A].num_channels = 0;
5347
5348 modes[B].mode = MODE_IEEE80211B;
5349 modes[B].channels = channels;
5350 modes[B].rates = &rates[8];
5351 modes[B].num_rates = 4; /* just CCK */
5352 modes[B].num_channels = 0;
5353
5354 modes[G].mode = MODE_IEEE80211G;
5355 modes[G].channels = channels;
5356 modes[G].rates = rates;
5357 modes[G].num_rates = 12; /* OFDM & CCK */
5358 modes[G].num_channels = 0;
5359
5360 priv->ieee_channels = channels;
5361 priv->ieee_rates = rates;
5362
5363 iwl_init_hw_rates(priv, rates);
5364
5365 for (i = 0, geo_ch = channels; i < priv->channel_count; i++) {
5366 ch = &priv->channel_info[i];
5367
5368 if (!is_channel_valid(ch)) {
5369 IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- "
5370 "skipping.\n",
5371 ch->channel, is_channel_a_band(ch) ?
5372 "5.2" : "2.4");
5373 continue;
5374 }
5375
5376 if (is_channel_a_band(ch))
5377 geo_ch = &modes[A].channels[modes[A].num_channels++];
5378 else {
5379 geo_ch = &modes[B].channels[modes[B].num_channels++];
5380 modes[G].num_channels++;
5381 }
5382
5383 geo_ch->freq = ieee80211chan2mhz(ch->channel);
5384 geo_ch->chan = ch->channel;
5385 geo_ch->power_level = ch->max_power_avg;
5386 geo_ch->antenna_max = 0xff;
5387
5388 if (is_channel_valid(ch)) {
5389 geo_ch->flag = IEEE80211_CHAN_W_SCAN;
5390 if (ch->flags & EEPROM_CHANNEL_IBSS)
5391 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5392
5393 if (ch->flags & EEPROM_CHANNEL_ACTIVE)
5394 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN;
5395
5396 if (ch->flags & EEPROM_CHANNEL_RADAR)
5397 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT;
5398
5399 if (ch->max_power_avg > priv->max_channel_txpower_limit)
5400 priv->max_channel_txpower_limit =
5401 ch->max_power_avg;
5402 }
5403
5404 geo_ch->val = geo_ch->flag;
5405 }
5406
5407 if ((modes[A].num_channels == 0) && priv->is_abg) {
5408 printk(KERN_INFO DRV_NAME
5409 ": Incorrectly detected BG card as ABG. Please send "
5410 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5411 priv->pci_dev->device, priv->pci_dev->subsystem_device);
5412 priv->is_abg = 0;
5413 }
5414
5415 printk(KERN_INFO DRV_NAME
5416 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5417 modes[G].num_channels, modes[A].num_channels);
5418
5419 /*
5420 * NOTE: We register these in preference of order -- the
5421 * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5422 * a phymode based on rates or AP capabilities but seems to
5423 * configure it purely on if the channel being configured
5424 * is supported by a mode -- and the first match is taken
5425 */
5426
5427 if (modes[G].num_channels)
5428 ieee80211_register_hwmode(priv->hw, &modes[G]);
5429 if (modes[B].num_channels)
5430 ieee80211_register_hwmode(priv->hw, &modes[B]);
5431 if (modes[A].num_channels)
5432 ieee80211_register_hwmode(priv->hw, &modes[A]);
5433
5434 priv->modes = modes;
5435 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5436
5437 return 0;
5438}
5439
5440/******************************************************************************
5441 *
5442 * uCode download functions
5443 *
5444 ******************************************************************************/
5445
5446static void iwl_dealloc_ucode_pci(struct iwl_priv *priv)
5447{
5448 if (priv->ucode_code.v_addr != NULL) {
5449 pci_free_consistent(priv->pci_dev,
5450 priv->ucode_code.len,
5451 priv->ucode_code.v_addr,
5452 priv->ucode_code.p_addr);
5453 priv->ucode_code.v_addr = NULL;
5454 }
5455 if (priv->ucode_data.v_addr != NULL) {
5456 pci_free_consistent(priv->pci_dev,
5457 priv->ucode_data.len,
5458 priv->ucode_data.v_addr,
5459 priv->ucode_data.p_addr);
5460 priv->ucode_data.v_addr = NULL;
5461 }
5462 if (priv->ucode_data_backup.v_addr != NULL) {
5463 pci_free_consistent(priv->pci_dev,
5464 priv->ucode_data_backup.len,
5465 priv->ucode_data_backup.v_addr,
5466 priv->ucode_data_backup.p_addr);
5467 priv->ucode_data_backup.v_addr = NULL;
5468 }
5469 if (priv->ucode_init.v_addr != NULL) {
5470 pci_free_consistent(priv->pci_dev,
5471 priv->ucode_init.len,
5472 priv->ucode_init.v_addr,
5473 priv->ucode_init.p_addr);
5474 priv->ucode_init.v_addr = NULL;
5475 }
5476 if (priv->ucode_init_data.v_addr != NULL) {
5477 pci_free_consistent(priv->pci_dev,
5478 priv->ucode_init_data.len,
5479 priv->ucode_init_data.v_addr,
5480 priv->ucode_init_data.p_addr);
5481 priv->ucode_init_data.v_addr = NULL;
5482 }
5483 if (priv->ucode_boot.v_addr != NULL) {
5484 pci_free_consistent(priv->pci_dev,
5485 priv->ucode_boot.len,
5486 priv->ucode_boot.v_addr,
5487 priv->ucode_boot.p_addr);
5488 priv->ucode_boot.v_addr = NULL;
5489 }
5490}
5491
5492/**
5493 * iwl_verify_inst_full - verify runtime uCode image in card vs. host,
5494 * looking at all data.
5495 */
5496static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 * image, u32 len)
5497{
5498 u32 val;
5499 u32 save_len = len;
5500 int rc = 0;
5501 u32 errcnt;
5502
5503 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5504
5505 rc = iwl_grab_restricted_access(priv);
5506 if (rc)
5507 return rc;
5508
5509 iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
5510
5511 errcnt = 0;
5512 for (; len > 0; len -= sizeof(u32), image++) {
5513 /* read data comes through single port, auto-incr addr */
5514 /* NOTE: Use the debugless read so we don't flood kernel log
5515 * if IWL_DL_IO is set */
5516 val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT);
5517 if (val != le32_to_cpu(*image)) {
5518 IWL_ERROR("uCode INST section is invalid at "
5519 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5520 save_len - len, val, le32_to_cpu(*image));
5521 rc = -EIO;
5522 errcnt++;
5523 if (errcnt >= 20)
5524 break;
5525 }
5526 }
5527
5528 iwl_release_restricted_access(priv);
5529
5530 if (!errcnt)
5531 IWL_DEBUG_INFO
5532 ("ucode image in INSTRUCTION memory is good\n");
5533
5534 return rc;
5535}
5536
5537
5538/**
5539 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
5540 * using sample data 100 bytes apart. If these sample points are good,
5541 * it's a pretty good bet that everything between them is good, too.
5542 */
5543static int iwl_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
5544{
5545 u32 val;
5546 int rc = 0;
5547 u32 errcnt = 0;
5548 u32 i;
5549
5550 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5551
5552 rc = iwl_grab_restricted_access(priv);
5553 if (rc)
5554 return rc;
5555
5556 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
5557 /* read data comes through single port, auto-incr addr */
5558 /* NOTE: Use the debugless read so we don't flood kernel log
5559 * if IWL_DL_IO is set */
5560 iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR,
5561 i + RTC_INST_LOWER_BOUND);
5562 val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT);
5563 if (val != le32_to_cpu(*image)) {
5564#if 0 /* Enable this if you want to see details */
5565 IWL_ERROR("uCode INST section is invalid at "
5566 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5567 i, val, *image);
5568#endif
5569 rc = -EIO;
5570 errcnt++;
5571 if (errcnt >= 3)
5572 break;
5573 }
5574 }
5575
5576 iwl_release_restricted_access(priv);
5577
5578 return rc;
5579}
5580
5581
5582/**
5583 * iwl_verify_ucode - determine which instruction image is in SRAM,
5584 * and verify its contents
5585 */
5586static int iwl_verify_ucode(struct iwl_priv *priv)
5587{
5588 __le32 *image;
5589 u32 len;
5590 int rc = 0;
5591
5592 /* Try bootstrap */
5593 image = (__le32 *)priv->ucode_boot.v_addr;
5594 len = priv->ucode_boot.len;
5595 rc = iwl_verify_inst_sparse(priv, image, len);
5596 if (rc == 0) {
5597 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
5598 return 0;
5599 }
5600
5601 /* Try initialize */
5602 image = (__le32 *)priv->ucode_init.v_addr;
5603 len = priv->ucode_init.len;
5604 rc = iwl_verify_inst_sparse(priv, image, len);
5605 if (rc == 0) {
5606 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
5607 return 0;
5608 }
5609
5610 /* Try runtime/protocol */
5611 image = (__le32 *)priv->ucode_code.v_addr;
5612 len = priv->ucode_code.len;
5613 rc = iwl_verify_inst_sparse(priv, image, len);
5614 if (rc == 0) {
5615 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
5616 return 0;
5617 }
5618
5619 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
5620
5621 /* Show first several data entries in instruction SRAM.
5622 * Selection of bootstrap image is arbitrary. */
5623 image = (__le32 *)priv->ucode_boot.v_addr;
5624 len = priv->ucode_boot.len;
5625 rc = iwl_verify_inst_full(priv, image, len);
5626
5627 return rc;
5628}
5629
5630
5631/* check contents of special bootstrap uCode SRAM */
5632static int iwl_verify_bsm(struct iwl_priv *priv)
5633{
5634 __le32 *image = priv->ucode_boot.v_addr;
5635 u32 len = priv->ucode_boot.len;
5636 u32 reg;
5637 u32 val;
5638
5639 IWL_DEBUG_INFO("Begin verify bsm\n");
5640
5641 /* verify BSM SRAM contents */
5642 val = iwl_read_restricted_reg(priv, BSM_WR_DWCOUNT_REG);
5643 for (reg = BSM_SRAM_LOWER_BOUND;
5644 reg < BSM_SRAM_LOWER_BOUND + len;
5645 reg += sizeof(u32), image ++) {
5646 val = iwl_read_restricted_reg(priv, reg);
5647 if (val != le32_to_cpu(*image)) {
5648 IWL_ERROR("BSM uCode verification failed at "
5649 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
5650 BSM_SRAM_LOWER_BOUND,
5651 reg - BSM_SRAM_LOWER_BOUND, len,
5652 val, le32_to_cpu(*image));
5653 return -EIO;
5654 }
5655 }
5656
5657 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
5658
5659 return 0;
5660}
5661
5662/**
5663 * iwl_load_bsm - Load bootstrap instructions
5664 *
5665 * BSM operation:
5666 *
5667 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
5668 * in special SRAM that does not power down during RFKILL. When powering back
5669 * up after power-saving sleeps (or during initial uCode load), the BSM loads
5670 * the bootstrap program into the on-board processor, and starts it.
5671 *
5672 * The bootstrap program loads (via DMA) instructions and data for a new
5673 * program from host DRAM locations indicated by the host driver in the
5674 * BSM_DRAM_* registers. Once the new program is loaded, it starts
5675 * automatically.
5676 *
5677 * When initializing the NIC, the host driver points the BSM to the
5678 * "initialize" uCode image. This uCode sets up some internal data, then
5679 * notifies host via "initialize alive" that it is complete.
5680 *
5681 * The host then replaces the BSM_DRAM_* pointer values to point to the
5682 * normal runtime uCode instructions and a backup uCode data cache buffer
5683 * (filled initially with starting data values for the on-board processor),
5684 * then triggers the "initialize" uCode to load and launch the runtime uCode,
5685 * which begins normal operation.
5686 *
5687 * When doing a power-save shutdown, runtime uCode saves data SRAM into
5688 * the backup data cache in DRAM before SRAM is powered down.
5689 *
5690 * When powering back up, the BSM loads the bootstrap program. This reloads
5691 * the runtime uCode instructions and the backup data cache into SRAM,
5692 * and re-launches the runtime uCode from where it left off.
5693 */
5694static int iwl_load_bsm(struct iwl_priv *priv)
5695{
5696 __le32 *image = priv->ucode_boot.v_addr;
5697 u32 len = priv->ucode_boot.len;
5698 dma_addr_t pinst;
5699 dma_addr_t pdata;
5700 u32 inst_len;
5701 u32 data_len;
5702 int rc;
5703 int i;
5704 u32 done;
5705 u32 reg_offset;
5706
5707 IWL_DEBUG_INFO("Begin load bsm\n");
5708
5709 /* make sure bootstrap program is no larger than BSM's SRAM size */
5710 if (len > IWL_MAX_BSM_SIZE)
5711 return -EINVAL;
5712
5713 /* Tell bootstrap uCode where to find the "Initialize" uCode
5714 * in host DRAM ... bits 31:0 for 3945, bits 35:4 for 4965.
5715 * NOTE: iwl_initialize_alive_start() will replace these values,
5716 * after the "initialize" uCode has run, to point to
5717 * runtime/protocol instructions and backup data cache. */
5718 pinst = priv->ucode_init.p_addr;
5719 pdata = priv->ucode_init_data.p_addr;
5720 inst_len = priv->ucode_init.len;
5721 data_len = priv->ucode_init_data.len;
5722
5723 rc = iwl_grab_restricted_access(priv);
5724 if (rc)
5725 return rc;
5726
5727 iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst);
5728 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5729 iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
5730 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
5731
5732 /* Fill BSM memory with bootstrap instructions */
5733 for (reg_offset = BSM_SRAM_LOWER_BOUND;
5734 reg_offset < BSM_SRAM_LOWER_BOUND + len;
5735 reg_offset += sizeof(u32), image++)
5736 _iwl_write_restricted_reg(priv, reg_offset,
5737 le32_to_cpu(*image));
5738
5739 rc = iwl_verify_bsm(priv);
5740 if (rc) {
5741 iwl_release_restricted_access(priv);
5742 return rc;
5743 }
5744
5745 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
5746 iwl_write_restricted_reg(priv, BSM_WR_MEM_SRC_REG, 0x0);
5747 iwl_write_restricted_reg(priv, BSM_WR_MEM_DST_REG,
5748 RTC_INST_LOWER_BOUND);
5749 iwl_write_restricted_reg(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
5750
5751 /* Load bootstrap code into instruction SRAM now,
5752 * to prepare to load "initialize" uCode */
5753 iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG,
5754 BSM_WR_CTRL_REG_BIT_START);
5755
5756 /* Wait for load of bootstrap uCode to finish */
5757 for (i = 0; i < 100; i++) {
5758 done = iwl_read_restricted_reg(priv, BSM_WR_CTRL_REG);
5759 if (!(done & BSM_WR_CTRL_REG_BIT_START))
5760 break;
5761 udelay(10);
5762 }
5763 if (i < 100)
5764 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
5765 else {
5766 IWL_ERROR("BSM write did not complete!\n");
5767 return -EIO;
5768 }
5769
5770 /* Enable future boot loads whenever power management unit triggers it
5771 * (e.g. when powering back up after power-save shutdown) */
5772 iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG,
5773 BSM_WR_CTRL_REG_BIT_START_EN);
5774
5775 iwl_release_restricted_access(priv);
5776
5777 return 0;
5778}
5779
5780static void iwl_nic_start(struct iwl_priv *priv)
5781{
5782 /* Remove all resets to allow NIC to operate */
5783 iwl_write32(priv, CSR_RESET, 0);
5784}
5785
5786/**
5787 * iwl_read_ucode - Read uCode images from disk file.
5788 *
5789 * Copy into buffers for card to fetch via bus-mastering
5790 */
5791static int iwl_read_ucode(struct iwl_priv *priv)
5792{
5793 struct iwl_ucode *ucode;
5794 int rc = 0;
5795 const struct firmware *ucode_raw;
5796 /* firmware file name contains uCode/driver compatibility version */
5797 const char *name = "iwlwifi-3945" IWL3945_UCODE_API ".ucode";
5798 u8 *src;
5799 size_t len;
5800 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
5801
5802 /* Ask kernel firmware_class module to get the boot firmware off disk.
5803 * request_firmware() is synchronous, file is in memory on return. */
5804 rc = request_firmware(&ucode_raw, name, &priv->pci_dev->dev);
5805 if (rc < 0) {
5806 IWL_ERROR("%s firmware file req failed: Reason %d\n", name, rc);
5807 goto error;
5808 }
5809
5810 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
5811 name, ucode_raw->size);
5812
5813 /* Make sure that we got at least our header! */
5814 if (ucode_raw->size < sizeof(*ucode)) {
5815 IWL_ERROR("File size way too small!\n");
5816 rc = -EINVAL;
5817 goto err_release;
5818 }
5819
5820 /* Data from ucode file: header followed by uCode images */
5821 ucode = (void *)ucode_raw->data;
5822
5823 ver = le32_to_cpu(ucode->ver);
5824 inst_size = le32_to_cpu(ucode->inst_size);
5825 data_size = le32_to_cpu(ucode->data_size);
5826 init_size = le32_to_cpu(ucode->init_size);
5827 init_data_size = le32_to_cpu(ucode->init_data_size);
5828 boot_size = le32_to_cpu(ucode->boot_size);
5829
5830 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver);
5831 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
5832 inst_size);
5833 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
5834 data_size);
5835 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n",
5836 init_size);
5837 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n",
5838 init_data_size);
5839 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n",
5840 boot_size);
5841
5842 /* Verify size of file vs. image size info in file's header */
5843 if (ucode_raw->size < sizeof(*ucode) +
5844 inst_size + data_size + init_size +
5845 init_data_size + boot_size) {
5846
5847 IWL_DEBUG_INFO("uCode file size %d too small\n",
5848 (int)ucode_raw->size);
5849 rc = -EINVAL;
5850 goto err_release;
5851 }
5852
5853 /* Verify that uCode images will fit in card's SRAM */
5854 if (inst_size > IWL_MAX_INST_SIZE) {
5855 IWL_DEBUG_INFO("uCode instr len %d too large to fit in card\n",
5856 (int)inst_size);
5857 rc = -EINVAL;
5858 goto err_release;
5859 }
5860
5861 if (data_size > IWL_MAX_DATA_SIZE) {
5862 IWL_DEBUG_INFO("uCode data len %d too large to fit in card\n",
5863 (int)data_size);
5864 rc = -EINVAL;
5865 goto err_release;
5866 }
5867 if (init_size > IWL_MAX_INST_SIZE) {
5868 IWL_DEBUG_INFO
5869 ("uCode init instr len %d too large to fit in card\n",
5870 (int)init_size);
5871 rc = -EINVAL;
5872 goto err_release;
5873 }
5874 if (init_data_size > IWL_MAX_DATA_SIZE) {
5875 IWL_DEBUG_INFO
5876 ("uCode init data len %d too large to fit in card\n",
5877 (int)init_data_size);
5878 rc = -EINVAL;
5879 goto err_release;
5880 }
5881 if (boot_size > IWL_MAX_BSM_SIZE) {
5882 IWL_DEBUG_INFO
5883 ("uCode boot instr len %d too large to fit in bsm\n",
5884 (int)boot_size);
5885 rc = -EINVAL;
5886 goto err_release;
5887 }
5888
5889 /* Allocate ucode buffers for card's bus-master loading ... */
5890
5891 /* Runtime instructions and 2 copies of data:
5892 * 1) unmodified from disk
5893 * 2) backup cache for save/restore during power-downs */
5894 priv->ucode_code.len = inst_size;
5895 priv->ucode_code.v_addr =
5896 pci_alloc_consistent(priv->pci_dev,
5897 priv->ucode_code.len,
5898 &(priv->ucode_code.p_addr));
5899
5900 priv->ucode_data.len = data_size;
5901 priv->ucode_data.v_addr =
5902 pci_alloc_consistent(priv->pci_dev,
5903 priv->ucode_data.len,
5904 &(priv->ucode_data.p_addr));
5905
5906 priv->ucode_data_backup.len = data_size;
5907 priv->ucode_data_backup.v_addr =
5908 pci_alloc_consistent(priv->pci_dev,
5909 priv->ucode_data_backup.len,
5910 &(priv->ucode_data_backup.p_addr));
5911
5912
5913 /* Initialization instructions and data */
5914 priv->ucode_init.len = init_size;
5915 priv->ucode_init.v_addr =
5916 pci_alloc_consistent(priv->pci_dev,
5917 priv->ucode_init.len,
5918 &(priv->ucode_init.p_addr));
5919
5920 priv->ucode_init_data.len = init_data_size;
5921 priv->ucode_init_data.v_addr =
5922 pci_alloc_consistent(priv->pci_dev,
5923 priv->ucode_init_data.len,
5924 &(priv->ucode_init_data.p_addr));
5925
5926 /* Bootstrap (instructions only, no data) */
5927 priv->ucode_boot.len = boot_size;
5928 priv->ucode_boot.v_addr =
5929 pci_alloc_consistent(priv->pci_dev,
5930 priv->ucode_boot.len,
5931 &(priv->ucode_boot.p_addr));
5932
5933 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
5934 !priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr ||
5935 !priv->ucode_boot.v_addr || !priv->ucode_data_backup.v_addr)
5936 goto err_pci_alloc;
5937
5938 /* Copy images into buffers for card's bus-master reads ... */
5939
5940 /* Runtime instructions (first block of data in file) */
5941 src = &ucode->data[0];
5942 len = priv->ucode_code.len;
5943 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %d\n",
5944 (int)len);
5945 memcpy(priv->ucode_code.v_addr, src, len);
5946 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
5947 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
5948
5949 /* Runtime data (2nd block)
5950 * NOTE: Copy into backup buffer will be done in iwl_up() */
5951 src = &ucode->data[inst_size];
5952 len = priv->ucode_data.len;
5953 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %d\n",
5954 (int)len);
5955 memcpy(priv->ucode_data.v_addr, src, len);
5956 memcpy(priv->ucode_data_backup.v_addr, src, len);
5957
5958 /* Initialization instructions (3rd block) */
5959 if (init_size) {
5960 src = &ucode->data[inst_size + data_size];
5961 len = priv->ucode_init.len;
5962 IWL_DEBUG_INFO("Copying (but not loading) init instr len %d\n",
5963 (int)len);
5964 memcpy(priv->ucode_init.v_addr, src, len);
5965 }
5966
5967 /* Initialization data (4th block) */
5968 if (init_data_size) {
5969 src = &ucode->data[inst_size + data_size + init_size];
5970 len = priv->ucode_init_data.len;
5971 IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n",
5972 (int)len);
5973 memcpy(priv->ucode_init_data.v_addr, src, len);
5974 }
5975
5976 /* Bootstrap instructions (5th block) */
5977 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
5978 len = priv->ucode_boot.len;
5979 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n",
5980 (int)len);
5981 memcpy(priv->ucode_boot.v_addr, src, len);
5982
5983 /* We have our copies now, allow OS release its copies */
5984 release_firmware(ucode_raw);
5985 return 0;
5986
5987 err_pci_alloc:
5988 IWL_ERROR("failed to allocate pci memory\n");
5989 rc = -ENOMEM;
5990 iwl_dealloc_ucode_pci(priv);
5991
5992 err_release:
5993 release_firmware(ucode_raw);
5994
5995 error:
5996 return rc;
5997}
5998
5999
6000/**
6001 * iwl_set_ucode_ptrs - Set uCode address location
6002 *
6003 * Tell initialization uCode where to find runtime uCode.
6004 *
6005 * BSM registers initially contain pointers to initialization uCode.
6006 * We need to replace them to load runtime uCode inst and data,
6007 * and to save runtime data when powering down.
6008 */
6009static int iwl_set_ucode_ptrs(struct iwl_priv *priv)
6010{
6011 dma_addr_t pinst;
6012 dma_addr_t pdata;
6013 int rc = 0;
6014 unsigned long flags;
6015
6016 /* bits 31:0 for 3945 */
6017 pinst = priv->ucode_code.p_addr;
6018 pdata = priv->ucode_data_backup.p_addr;
6019
6020 spin_lock_irqsave(&priv->lock, flags);
6021 rc = iwl_grab_restricted_access(priv);
6022 if (rc) {
6023 spin_unlock_irqrestore(&priv->lock, flags);
6024 return rc;
6025 }
6026
6027 /* Tell bootstrap uCode where to find image to load */
6028 iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst);
6029 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6030 iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
6031 priv->ucode_data.len);
6032
6033 /* Inst bytecount must be last to set up, bit 31 signals uCode
6034 * that all new ptr/size info is in place */
6035 iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG,
6036 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
6037
6038 iwl_release_restricted_access(priv);
6039
6040 spin_unlock_irqrestore(&priv->lock, flags);
6041
6042 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
6043
6044 return rc;
6045}
6046
6047/**
6048 * iwl_init_alive_start - Called after REPLY_ALIVE notification receieved
6049 *
6050 * Called after REPLY_ALIVE notification received from "initialize" uCode.
6051 *
6052 * The 4965 "initialize" ALIVE reply contains calibration data for:
6053 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
6054 * (3945 does not contain this data).
6055 *
6056 * Tell "initialize" uCode to go ahead and load the runtime uCode.
6057*/
6058static void iwl_init_alive_start(struct iwl_priv *priv)
6059{
6060 /* Check alive response for "valid" sign from uCode */
6061 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
6062 /* We had an error bringing up the hardware, so take it
6063 * all the way back down so we can try again */
6064 IWL_DEBUG_INFO("Initialize Alive failed.\n");
6065 goto restart;
6066 }
6067
6068 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
6069 * This is a paranoid check, because we would not have gotten the
6070 * "initialize" alive if code weren't properly loaded. */
6071 if (iwl_verify_ucode(priv)) {
6072 /* Runtime instruction load was bad;
6073 * take it all the way back down so we can try again */
6074 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
6075 goto restart;
6076 }
6077
6078 /* Send pointers to protocol/runtime uCode image ... init code will
6079 * load and launch runtime uCode, which will send us another "Alive"
6080 * notification. */
6081 IWL_DEBUG_INFO("Initialization Alive received.\n");
6082 if (iwl_set_ucode_ptrs(priv)) {
6083 /* Runtime instruction load won't happen;
6084 * take it all the way back down so we can try again */
6085 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
6086 goto restart;
6087 }
6088 return;
6089
6090 restart:
6091 queue_work(priv->workqueue, &priv->restart);
6092}
6093
6094
6095/**
6096 * iwl_alive_start - called after REPLY_ALIVE notification received
6097 * from protocol/runtime uCode (initialization uCode's
6098 * Alive gets handled by iwl_init_alive_start()).
6099 */
6100static void iwl_alive_start(struct iwl_priv *priv)
6101{
6102 int rc = 0;
6103 int thermal_spin = 0;
6104 u32 rfkill;
6105
6106 IWL_DEBUG_INFO("Runtime Alive received.\n");
6107
6108 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
6109 /* We had an error bringing up the hardware, so take it
6110 * all the way back down so we can try again */
6111 IWL_DEBUG_INFO("Alive failed.\n");
6112 goto restart;
6113 }
6114
6115 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
6116 * This is a paranoid check, because we would not have gotten the
6117 * "runtime" alive if code weren't properly loaded. */
6118 if (iwl_verify_ucode(priv)) {
6119 /* Runtime instruction load was bad;
6120 * take it all the way back down so we can try again */
6121 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
6122 goto restart;
6123 }
6124
6125 iwl_clear_stations_table(priv);
6126
6127 rc = iwl_grab_restricted_access(priv);
6128 if (rc) {
6129 IWL_WARNING("Can not read rfkill status from adapter\n");
6130 return;
6131 }
6132
6133 rfkill = iwl_read_restricted_reg(priv, APMG_RFKILL_REG);
6134 IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill);
6135 iwl_release_restricted_access(priv);
6136
6137 if (rfkill & 0x1) {
6138 clear_bit(STATUS_RF_KILL_HW, &priv->status);
6139 /* if rfkill is not on, then wait for thermal
6140 * sensor in adapter to kick in */
6141 while (iwl_hw_get_temperature(priv) == 0) {
6142 thermal_spin++;
6143 udelay(10);
6144 }
6145
6146 if (thermal_spin)
6147 IWL_DEBUG_INFO("Thermal calibration took %dus\n",
6148 thermal_spin * 10);
6149 } else
6150 set_bit(STATUS_RF_KILL_HW, &priv->status);
6151
6152 /* After the ALIVE response, we can process host commands */
6153 set_bit(STATUS_ALIVE, &priv->status);
6154
6155 /* Clear out the uCode error bit if it is set */
6156 clear_bit(STATUS_FW_ERROR, &priv->status);
6157
6158 rc = iwl_init_channel_map(priv);
6159 if (rc) {
6160 IWL_ERROR("initializing regulatory failed: %d\n", rc);
6161 return;
6162 }
6163
6164 iwl_init_geos(priv);
6165
6166 if (iwl_is_rfkill(priv))
6167 return;
6168
6169 if (!priv->mac80211_registered) {
6170 /* Unlock so any user space entry points can call back into
6171 * the driver without a deadlock... */
6172 mutex_unlock(&priv->mutex);
6173 iwl_rate_control_register(priv->hw);
6174 rc = ieee80211_register_hw(priv->hw);
6175 priv->hw->conf.beacon_int = 100;
6176 mutex_lock(&priv->mutex);
6177
6178 if (rc) {
6179 IWL_ERROR("Failed to register network "
6180 "device (error %d)\n", rc);
6181 return;
6182 }
6183
6184 priv->mac80211_registered = 1;
6185
6186 iwl_reset_channel_flag(priv);
6187 } else
6188 ieee80211_start_queues(priv->hw);
6189
6190 priv->active_rate = priv->rates_mask;
6191 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
6192
6193 iwl_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
6194
6195 if (iwl_is_associated(priv)) {
6196 struct iwl_rxon_cmd *active_rxon =
6197 (struct iwl_rxon_cmd *)(&priv->active_rxon);
6198
6199 memcpy(&priv->staging_rxon, &priv->active_rxon,
6200 sizeof(priv->staging_rxon));
6201 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6202 } else {
6203 /* Initialize our rx_config data */
6204 iwl_connection_init_rx_config(priv);
6205 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
6206 }
6207
6208 /* Configure BT coexistence */
6209 iwl_send_bt_config(priv);
6210
6211 /* Configure the adapter for unassociated operation */
6212 iwl_commit_rxon(priv);
6213
6214 /* At this point, the NIC is initialized and operational */
6215 priv->notif_missed_beacons = 0;
6216 set_bit(STATUS_READY, &priv->status);
6217
6218 iwl3945_reg_txpower_periodic(priv);
6219
6220 IWL_DEBUG_INFO("ALIVE processing complete.\n");
6221
6222 if (priv->error_recovering)
6223 iwl_error_recovery(priv);
6224
6225 return;
6226
6227 restart:
6228 queue_work(priv->workqueue, &priv->restart);
6229}
6230
6231static void iwl_cancel_deferred_work(struct iwl_priv *priv);
6232
6233static void __iwl_down(struct iwl_priv *priv)
6234{
6235 unsigned long flags;
6236 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
6237 struct ieee80211_conf *conf = NULL;
6238
6239 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
6240
6241 conf = ieee80211_get_hw_conf(priv->hw);
6242
6243 if (!exit_pending)
6244 set_bit(STATUS_EXIT_PENDING, &priv->status);
6245
6246 iwl_clear_stations_table(priv);
6247
6248 /* Unblock any waiting calls */
6249 wake_up_interruptible_all(&priv->wait_command_queue);
6250
6251 iwl_cancel_deferred_work(priv);
6252
6253 /* Wipe out the EXIT_PENDING status bit if we are not actually
6254 * exiting the module */
6255 if (!exit_pending)
6256 clear_bit(STATUS_EXIT_PENDING, &priv->status);
6257
6258 /* stop and reset the on-board processor */
6259 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6260
6261 /* tell the device to stop sending interrupts */
6262 iwl_disable_interrupts(priv);
6263
6264 if (priv->mac80211_registered)
6265 ieee80211_stop_queues(priv->hw);
6266
6267 /* If we have not previously called iwl_init() then
6268 * clear all bits but the RF Kill and SUSPEND bits and return */
6269 if (!iwl_is_init(priv)) {
6270 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6271 STATUS_RF_KILL_HW |
6272 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6273 STATUS_RF_KILL_SW |
6274 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6275 STATUS_IN_SUSPEND;
6276 goto exit;
6277 }
6278
6279 /* ...otherwise clear out all the status bits but the RF Kill and
6280 * SUSPEND bits and continue taking the NIC down. */
6281 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6282 STATUS_RF_KILL_HW |
6283 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6284 STATUS_RF_KILL_SW |
6285 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6286 STATUS_IN_SUSPEND |
6287 test_bit(STATUS_FW_ERROR, &priv->status) <<
6288 STATUS_FW_ERROR;
6289
6290 spin_lock_irqsave(&priv->lock, flags);
6291 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
6292 spin_unlock_irqrestore(&priv->lock, flags);
6293
6294 iwl_hw_txq_ctx_stop(priv);
6295 iwl_hw_rxq_stop(priv);
6296
6297 spin_lock_irqsave(&priv->lock, flags);
6298 if (!iwl_grab_restricted_access(priv)) {
6299 iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG,
6300 APMG_CLK_VAL_DMA_CLK_RQT);
6301 iwl_release_restricted_access(priv);
6302 }
6303 spin_unlock_irqrestore(&priv->lock, flags);
6304
6305 udelay(5);
6306
6307 iwl_hw_nic_stop_master(priv);
6308 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
6309 iwl_hw_nic_reset(priv);
6310
6311 exit:
6312 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
6313
6314 if (priv->ibss_beacon)
6315 dev_kfree_skb(priv->ibss_beacon);
6316 priv->ibss_beacon = NULL;
6317
6318 /* clear out any free frames */
6319 iwl_clear_free_frames(priv);
6320}
6321
6322static void iwl_down(struct iwl_priv *priv)
6323{
6324 mutex_lock(&priv->mutex);
6325 __iwl_down(priv);
6326 mutex_unlock(&priv->mutex);
6327}
6328
6329#define MAX_HW_RESTARTS 5
6330
6331static int __iwl_up(struct iwl_priv *priv)
6332{
0795af57 6333 DECLARE_MAC_BUF(mac);
b481de9c
ZY
6334 int rc, i;
6335
6336 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6337 IWL_WARNING("Exit pending; will not bring the NIC up\n");
6338 return -EIO;
6339 }
6340
6341 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
6342 IWL_WARNING("Radio disabled by SW RF kill (module "
6343 "parameter)\n");
6344 return 0;
6345 }
6346
6347 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
6348
6349 rc = iwl_hw_nic_init(priv);
6350 if (rc) {
6351 IWL_ERROR("Unable to int nic\n");
6352 return rc;
6353 }
6354
6355 /* make sure rfkill handshake bits are cleared */
6356 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6357 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
6358 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
6359
6360 /* clear (again), then enable host interrupts */
6361 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
6362 iwl_enable_interrupts(priv);
6363
6364 /* really make sure rfkill handshake bits are cleared */
6365 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6366 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6367
6368 /* Copy original ucode data image from disk into backup cache.
6369 * This will be used to initialize the on-board processor's
6370 * data SRAM for a clean start when the runtime program first loads. */
6371 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
6372 priv->ucode_data.len);
6373
6374 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6375
6376 iwl_clear_stations_table(priv);
6377
6378 /* load bootstrap state machine,
6379 * load bootstrap program into processor's memory,
6380 * prepare to load the "initialize" uCode */
6381 rc = iwl_load_bsm(priv);
6382
6383 if (rc) {
6384 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc);
6385 continue;
6386 }
6387
6388 /* start card; "initialize" will load runtime ucode */
6389 iwl_nic_start(priv);
6390
6391 /* MAC Address location in EEPROM same for 3945/4965 */
6392 get_eeprom_mac(priv, priv->mac_addr);
0795af57
JP
6393 IWL_DEBUG_INFO("MAC address: %s\n",
6394 print_mac(mac, priv->mac_addr));
b481de9c
ZY
6395
6396 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
6397
6398 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
6399
6400 return 0;
6401 }
6402
6403 set_bit(STATUS_EXIT_PENDING, &priv->status);
6404 __iwl_down(priv);
6405
6406 /* tried to restart and config the device for as long as our
6407 * patience could withstand */
6408 IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
6409 return -EIO;
6410}
6411
6412
6413/*****************************************************************************
6414 *
6415 * Workqueue callbacks
6416 *
6417 *****************************************************************************/
6418
6419static void iwl_bg_init_alive_start(struct work_struct *data)
6420{
6421 struct iwl_priv *priv =
6422 container_of(data, struct iwl_priv, init_alive_start.work);
6423
6424 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6425 return;
6426
6427 mutex_lock(&priv->mutex);
6428 iwl_init_alive_start(priv);
6429 mutex_unlock(&priv->mutex);
6430}
6431
6432static void iwl_bg_alive_start(struct work_struct *data)
6433{
6434 struct iwl_priv *priv =
6435 container_of(data, struct iwl_priv, alive_start.work);
6436
6437 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6438 return;
6439
6440 mutex_lock(&priv->mutex);
6441 iwl_alive_start(priv);
6442 mutex_unlock(&priv->mutex);
6443}
6444
6445static void iwl_bg_rf_kill(struct work_struct *work)
6446{
6447 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
6448
6449 wake_up_interruptible(&priv->wait_command_queue);
6450
6451 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6452 return;
6453
6454 mutex_lock(&priv->mutex);
6455
6456 if (!iwl_is_rfkill(priv)) {
6457 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
6458 "HW and/or SW RF Kill no longer active, restarting "
6459 "device\n");
6460 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6461 queue_work(priv->workqueue, &priv->restart);
6462 } else {
6463
6464 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
6465 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
6466 "disabled by SW switch\n");
6467 else
6468 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
6469 "Kill switch must be turned off for "
6470 "wireless networking to work.\n");
6471 }
6472 mutex_unlock(&priv->mutex);
6473}
6474
6475#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6476
6477static void iwl_bg_scan_check(struct work_struct *data)
6478{
6479 struct iwl_priv *priv =
6480 container_of(data, struct iwl_priv, scan_check.work);
6481
6482 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6483 return;
6484
6485 mutex_lock(&priv->mutex);
6486 if (test_bit(STATUS_SCANNING, &priv->status) ||
6487 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6488 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
6489 "Scan completion watchdog resetting adapter (%dms)\n",
6490 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
6491 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6492 queue_work(priv->workqueue, &priv->restart);
6493 }
6494 mutex_unlock(&priv->mutex);
6495}
6496
6497static void iwl_bg_request_scan(struct work_struct *data)
6498{
6499 struct iwl_priv *priv =
6500 container_of(data, struct iwl_priv, request_scan);
6501 struct iwl_host_cmd cmd = {
6502 .id = REPLY_SCAN_CMD,
6503 .len = sizeof(struct iwl_scan_cmd),
6504 .meta.flags = CMD_SIZE_HUGE,
6505 };
6506 int rc = 0;
6507 struct iwl_scan_cmd *scan;
6508 struct ieee80211_conf *conf = NULL;
6509 u8 direct_mask;
6510 int phymode;
6511
6512 conf = ieee80211_get_hw_conf(priv->hw);
6513
6514 mutex_lock(&priv->mutex);
6515
6516 if (!iwl_is_ready(priv)) {
6517 IWL_WARNING("request scan called when driver not ready.\n");
6518 goto done;
6519 }
6520
6521 /* Make sure the scan wasn't cancelled before this queued work
6522 * was given the chance to run... */
6523 if (!test_bit(STATUS_SCANNING, &priv->status))
6524 goto done;
6525
6526 /* This should never be called or scheduled if there is currently
6527 * a scan active in the hardware. */
6528 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
6529 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
6530 "Ignoring second request.\n");
6531 rc = -EIO;
6532 goto done;
6533 }
6534
6535 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6536 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
6537 goto done;
6538 }
6539
6540 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6541 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6542 goto done;
6543 }
6544
6545 if (iwl_is_rfkill(priv)) {
6546 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6547 goto done;
6548 }
6549
6550 if (!test_bit(STATUS_READY, &priv->status)) {
6551 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
6552 goto done;
6553 }
6554
6555 if (!priv->scan_bands) {
6556 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
6557 goto done;
6558 }
6559
6560 if (!priv->scan) {
6561 priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) +
6562 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
6563 if (!priv->scan) {
6564 rc = -ENOMEM;
6565 goto done;
6566 }
6567 }
6568 scan = priv->scan;
6569 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
6570
6571 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
6572 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
6573
6574 if (iwl_is_associated(priv)) {
6575 u16 interval = 0;
6576 u32 extra;
6577 u32 suspend_time = 100;
6578 u32 scan_suspend_time = 100;
6579 unsigned long flags;
6580
6581 IWL_DEBUG_INFO("Scanning while associated...\n");
6582
6583 spin_lock_irqsave(&priv->lock, flags);
6584 interval = priv->beacon_int;
6585 spin_unlock_irqrestore(&priv->lock, flags);
6586
6587 scan->suspend_time = 0;
6588 scan->max_out_time = cpu_to_le32(600 * 1024);
6589 if (!interval)
6590 interval = suspend_time;
6591 /*
6592 * suspend time format:
6593 * 0-19: beacon interval in usec (time before exec.)
6594 * 20-23: 0
6595 * 24-31: number of beacons (suspend between channels)
6596 */
6597
6598 extra = (suspend_time / interval) << 24;
6599 scan_suspend_time = 0xFF0FFFFF &
6600 (extra | ((suspend_time % interval) * 1024));
6601
6602 scan->suspend_time = cpu_to_le32(scan_suspend_time);
6603 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
6604 scan_suspend_time, interval);
6605 }
6606
6607 /* We should add the ability for user to lock to PASSIVE ONLY */
6608 if (priv->one_direct_scan) {
6609 IWL_DEBUG_SCAN
6610 ("Kicking off one direct scan for '%s'\n",
6611 iwl_escape_essid(priv->direct_ssid,
6612 priv->direct_ssid_len));
6613 scan->direct_scan[0].id = WLAN_EID_SSID;
6614 scan->direct_scan[0].len = priv->direct_ssid_len;
6615 memcpy(scan->direct_scan[0].ssid,
6616 priv->direct_ssid, priv->direct_ssid_len);
6617 direct_mask = 1;
6618 } else if (!iwl_is_associated(priv)) {
6619 scan->direct_scan[0].id = WLAN_EID_SSID;
6620 scan->direct_scan[0].len = priv->essid_len;
6621 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
6622 direct_mask = 1;
6623 } else
6624 direct_mask = 0;
6625
6626 /* We don't build a direct scan probe request; the uCode will do
6627 * that based on the direct_mask added to each channel entry */
6628 scan->tx_cmd.len = cpu_to_le16(
6629 iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
6630 IWL_MAX_SCAN_SIZE - sizeof(scan), 0));
6631 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
6632 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
6633 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
6634
6635 /* flags + rate selection */
6636
6637 switch (priv->scan_bands) {
6638 case 2:
6639 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
6640 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
6641 scan->good_CRC_th = 0;
6642 phymode = MODE_IEEE80211G;
6643 break;
6644
6645 case 1:
6646 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
6647 scan->good_CRC_th = IWL_GOOD_CRC_TH;
6648 phymode = MODE_IEEE80211A;
6649 break;
6650
6651 default:
6652 IWL_WARNING("Invalid scan band count\n");
6653 goto done;
6654 }
6655
6656 /* select Rx antennas */
6657 scan->flags |= iwl3945_get_antenna_flags(priv);
6658
6659 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
6660 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
6661
6662 if (direct_mask)
6663 IWL_DEBUG_SCAN
6664 ("Initiating direct scan for %s.\n",
6665 iwl_escape_essid(priv->essid, priv->essid_len));
6666 else
6667 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
6668
6669 scan->channel_count =
6670 iwl_get_channels_for_scan(
6671 priv, phymode, 1, /* active */
6672 direct_mask,
6673 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6674
6675 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
6676 scan->channel_count * sizeof(struct iwl_scan_channel);
6677 cmd.data = scan;
6678 scan->len = cpu_to_le16(cmd.len);
6679
6680 set_bit(STATUS_SCAN_HW, &priv->status);
6681 rc = iwl_send_cmd_sync(priv, &cmd);
6682 if (rc)
6683 goto done;
6684
6685 queue_delayed_work(priv->workqueue, &priv->scan_check,
6686 IWL_SCAN_CHECK_WATCHDOG);
6687
6688 mutex_unlock(&priv->mutex);
6689 return;
6690
6691 done:
6692 /* inform mac80211 sacn aborted */
6693 queue_work(priv->workqueue, &priv->scan_completed);
6694 mutex_unlock(&priv->mutex);
6695}
6696
6697static void iwl_bg_up(struct work_struct *data)
6698{
6699 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
6700
6701 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6702 return;
6703
6704 mutex_lock(&priv->mutex);
6705 __iwl_up(priv);
6706 mutex_unlock(&priv->mutex);
6707}
6708
6709static void iwl_bg_restart(struct work_struct *data)
6710{
6711 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
6712
6713 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6714 return;
6715
6716 iwl_down(priv);
6717 queue_work(priv->workqueue, &priv->up);
6718}
6719
6720static void iwl_bg_rx_replenish(struct work_struct *data)
6721{
6722 struct iwl_priv *priv =
6723 container_of(data, struct iwl_priv, rx_replenish);
6724
6725 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6726 return;
6727
6728 mutex_lock(&priv->mutex);
6729 iwl_rx_replenish(priv);
6730 mutex_unlock(&priv->mutex);
6731}
6732
6733static void iwl_bg_post_associate(struct work_struct *data)
6734{
6735 struct iwl_priv *priv = container_of(data, struct iwl_priv,
6736 post_associate.work);
6737
6738 int rc = 0;
6739 struct ieee80211_conf *conf = NULL;
0795af57 6740 DECLARE_MAC_BUF(mac);
b481de9c
ZY
6741
6742 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
6743 IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__);
6744 return;
6745 }
6746
6747
0795af57
JP
6748 IWL_DEBUG_ASSOC("Associated as %d to: %s\n",
6749 priv->assoc_id,
6750 print_mac(mac, priv->active_rxon.bssid_addr));
b481de9c
ZY
6751
6752 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6753 return;
6754
6755 mutex_lock(&priv->mutex);
6756
6757 conf = ieee80211_get_hw_conf(priv->hw);
6758
6759 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6760 iwl_commit_rxon(priv);
6761
6762 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
6763 iwl_setup_rxon_timing(priv);
6764 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
6765 sizeof(priv->rxon_timing), &priv->rxon_timing);
6766 if (rc)
6767 IWL_WARNING("REPLY_RXON_TIMING failed - "
6768 "Attempting to continue.\n");
6769
6770 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
6771
6772 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
6773
6774 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
6775 priv->assoc_id, priv->beacon_int);
6776
6777 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
6778 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6779 else
6780 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
6781
6782 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
6783 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
6784 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
6785 else
6786 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6787
6788 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
6789 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6790
6791 }
6792
6793 iwl_commit_rxon(priv);
6794
6795 switch (priv->iw_mode) {
6796 case IEEE80211_IF_TYPE_STA:
6797 iwl_rate_scale_init(priv->hw, IWL_AP_ID);
6798 break;
6799
6800 case IEEE80211_IF_TYPE_IBSS:
6801
6802 /* clear out the station table */
6803 iwl_clear_stations_table(priv);
6804
6805 iwl_rxon_add_station(priv, BROADCAST_ADDR, 0);
6806 iwl_rxon_add_station(priv, priv->bssid, 0);
6807 iwl3945_sync_sta(priv, IWL_STA_ID,
6808 (priv->phymode == MODE_IEEE80211A)?
6809 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
6810 CMD_ASYNC);
6811 iwl_rate_scale_init(priv->hw, IWL_STA_ID);
6812 iwl_send_beacon_cmd(priv);
6813
6814 break;
6815
6816 default:
6817 IWL_ERROR("%s Should not be called in %d mode\n",
6818 __FUNCTION__, priv->iw_mode);
6819 break;
6820 }
6821
6822 iwl_sequence_reset(priv);
6823
6824#ifdef CONFIG_IWLWIFI_QOS
6825 iwl_activate_qos(priv, 0);
6826#endif /* CONFIG_IWLWIFI_QOS */
6827 mutex_unlock(&priv->mutex);
6828}
6829
6830static void iwl_bg_abort_scan(struct work_struct *work)
6831{
6832 struct iwl_priv *priv = container_of(work, struct iwl_priv,
6833 abort_scan);
6834
6835 if (!iwl_is_ready(priv))
6836 return;
6837
6838 mutex_lock(&priv->mutex);
6839
6840 set_bit(STATUS_SCAN_ABORTING, &priv->status);
6841 iwl_send_scan_abort(priv);
6842
6843 mutex_unlock(&priv->mutex);
6844}
6845
6846static void iwl_bg_scan_completed(struct work_struct *work)
6847{
6848 struct iwl_priv *priv =
6849 container_of(work, struct iwl_priv, scan_completed);
6850
6851 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
6852
6853 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6854 return;
6855
6856 ieee80211_scan_completed(priv->hw);
6857
6858 /* Since setting the TXPOWER may have been deferred while
6859 * performing the scan, fire one off */
6860 mutex_lock(&priv->mutex);
6861 iwl_hw_reg_send_txpower(priv);
6862 mutex_unlock(&priv->mutex);
6863}
6864
6865/*****************************************************************************
6866 *
6867 * mac80211 entry point functions
6868 *
6869 *****************************************************************************/
6870
6871static int iwl_mac_open(struct ieee80211_hw *hw)
6872{
6873 struct iwl_priv *priv = hw->priv;
6874
6875 IWL_DEBUG_MAC80211("enter\n");
6876
6877 /* we should be verifying the device is ready to be opened */
6878 mutex_lock(&priv->mutex);
6879
6880 priv->is_open = 1;
6881
6882 if (!iwl_is_rfkill(priv))
6883 ieee80211_start_queues(priv->hw);
6884
6885 mutex_unlock(&priv->mutex);
6886 IWL_DEBUG_MAC80211("leave\n");
6887 return 0;
6888}
6889
6890static int iwl_mac_stop(struct ieee80211_hw *hw)
6891{
6892 struct iwl_priv *priv = hw->priv;
6893
6894 IWL_DEBUG_MAC80211("enter\n");
6895 priv->is_open = 0;
6896 /*netif_stop_queue(dev); */
6897 flush_workqueue(priv->workqueue);
6898 IWL_DEBUG_MAC80211("leave\n");
6899
6900 return 0;
6901}
6902
6903static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
6904 struct ieee80211_tx_control *ctl)
6905{
6906 struct iwl_priv *priv = hw->priv;
6907
6908 IWL_DEBUG_MAC80211("enter\n");
6909
6910 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
6911 IWL_DEBUG_MAC80211("leave - monitor\n");
6912 return -1;
6913 }
6914
6915 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
6916 ctl->tx_rate);
6917
6918 if (iwl_tx_skb(priv, skb, ctl))
6919 dev_kfree_skb_any(skb);
6920
6921 IWL_DEBUG_MAC80211("leave\n");
6922 return 0;
6923}
6924
6925static int iwl_mac_add_interface(struct ieee80211_hw *hw,
6926 struct ieee80211_if_init_conf *conf)
6927{
6928 struct iwl_priv *priv = hw->priv;
6929 unsigned long flags;
0795af57 6930 DECLARE_MAC_BUF(mac);
b481de9c
ZY
6931
6932 IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type);
6933 if (conf->mac_addr)
0795af57
JP
6934 IWL_DEBUG_MAC80211("enter: MAC %s\n",
6935 print_mac(mac, conf->mac_addr));
b481de9c
ZY
6936
6937 if (priv->interface_id) {
6938 IWL_DEBUG_MAC80211("leave - interface_id != 0\n");
6939 return 0;
6940 }
6941
6942 spin_lock_irqsave(&priv->lock, flags);
6943 priv->interface_id = conf->if_id;
6944
6945 spin_unlock_irqrestore(&priv->lock, flags);
6946
6947 mutex_lock(&priv->mutex);
6948 iwl_set_mode(priv, conf->type);
6949
6950 IWL_DEBUG_MAC80211("leave\n");
6951 mutex_unlock(&priv->mutex);
6952
6953 return 0;
6954}
6955
6956/**
6957 * iwl_mac_config - mac80211 config callback
6958 *
6959 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
6960 * be set inappropriately and the driver currently sets the hardware up to
6961 * use it whenever needed.
6962 */
6963static int iwl_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
6964{
6965 struct iwl_priv *priv = hw->priv;
6966 const struct iwl_channel_info *ch_info;
6967 unsigned long flags;
6968
6969 mutex_lock(&priv->mutex);
6970 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel);
6971
6972 if (!iwl_is_ready(priv)) {
6973 IWL_DEBUG_MAC80211("leave - not ready\n");
6974 mutex_unlock(&priv->mutex);
6975 return -EIO;
6976 }
6977
6978 /* TODO: Figure out how to get ieee80211_local->sta_scanning w/ only
6979 * what is exposed through include/ declrations */
6980 if (unlikely(!iwl_param_disable_hw_scan &&
6981 test_bit(STATUS_SCANNING, &priv->status))) {
6982 IWL_DEBUG_MAC80211("leave - scanning\n");
6983 mutex_unlock(&priv->mutex);
6984 return 0;
6985 }
6986
6987 spin_lock_irqsave(&priv->lock, flags);
6988
6989 ch_info = iwl_get_channel_info(priv, conf->phymode, conf->channel);
6990 if (!is_channel_valid(ch_info)) {
6991 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
6992 conf->channel, conf->phymode);
6993 IWL_DEBUG_MAC80211("leave - invalid channel\n");
6994 spin_unlock_irqrestore(&priv->lock, flags);
6995 mutex_unlock(&priv->mutex);
6996 return -EINVAL;
6997 }
6998
6999 iwl_set_rxon_channel(priv, conf->phymode, conf->channel);
7000
7001 iwl_set_flags_for_phymode(priv, conf->phymode);
7002
7003 /* The list of supported rates and rate mask can be different
7004 * for each phymode; since the phymode may have changed, reset
7005 * the rate mask to what mac80211 lists */
7006 iwl_set_rate(priv);
7007
7008 spin_unlock_irqrestore(&priv->lock, flags);
7009
7010#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7011 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
7012 iwl_hw_channel_switch(priv, conf->channel);
7013 mutex_unlock(&priv->mutex);
7014 return 0;
7015 }
7016#endif
7017
7018 iwl_radio_kill_sw(priv, !conf->radio_enabled);
7019
7020 if (!conf->radio_enabled) {
7021 IWL_DEBUG_MAC80211("leave - radio disabled\n");
7022 mutex_unlock(&priv->mutex);
7023 return 0;
7024 }
7025
7026 if (iwl_is_rfkill(priv)) {
7027 IWL_DEBUG_MAC80211("leave - RF kill\n");
7028 mutex_unlock(&priv->mutex);
7029 return -EIO;
7030 }
7031
7032 iwl_set_rate(priv);
7033
7034 if (memcmp(&priv->active_rxon,
7035 &priv->staging_rxon, sizeof(priv->staging_rxon)))
7036 iwl_commit_rxon(priv);
7037 else
7038 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
7039
7040 IWL_DEBUG_MAC80211("leave\n");
7041
7042 mutex_unlock(&priv->mutex);
7043
7044 return 0;
7045}
7046
7047static void iwl_config_ap(struct iwl_priv *priv)
7048{
7049 int rc = 0;
7050
7051 if (priv->status & STATUS_EXIT_PENDING)
7052 return;
7053
7054 /* The following should be done only at AP bring up */
7055 if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) {
7056
7057 /* RXON - unassoc (to set timing command) */
7058 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7059 iwl_commit_rxon(priv);
7060
7061 /* RXON Timing */
7062 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
7063 iwl_setup_rxon_timing(priv);
7064 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
7065 sizeof(priv->rxon_timing), &priv->rxon_timing);
7066 if (rc)
7067 IWL_WARNING("REPLY_RXON_TIMING failed - "
7068 "Attempting to continue.\n");
7069
7070 /* FIXME: what should be the assoc_id for AP? */
7071 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
7072 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7073 priv->staging_rxon.flags |=
7074 RXON_FLG_SHORT_PREAMBLE_MSK;
7075 else
7076 priv->staging_rxon.flags &=
7077 ~RXON_FLG_SHORT_PREAMBLE_MSK;
7078
7079 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
7080 if (priv->assoc_capability &
7081 WLAN_CAPABILITY_SHORT_SLOT_TIME)
7082 priv->staging_rxon.flags |=
7083 RXON_FLG_SHORT_SLOT_MSK;
7084 else
7085 priv->staging_rxon.flags &=
7086 ~RXON_FLG_SHORT_SLOT_MSK;
7087
7088 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7089 priv->staging_rxon.flags &=
7090 ~RXON_FLG_SHORT_SLOT_MSK;
7091 }
7092 /* restore RXON assoc */
7093 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
7094 iwl_commit_rxon(priv);
7095 iwl_rxon_add_station(priv, BROADCAST_ADDR, 0);
7096 iwl_send_beacon_cmd(priv);
7097 } else
7098 iwl_send_beacon_cmd(priv);
7099
7100 /* FIXME - we need to add code here to detect a totally new
7101 * configuration, reset the AP, unassoc, rxon timing, assoc,
7102 * clear sta table, add BCAST sta... */
7103}
7104
7105static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id,
7106 struct ieee80211_if_conf *conf)
7107{
7108 struct iwl_priv *priv = hw->priv;
0795af57 7109 DECLARE_MAC_BUF(mac);
b481de9c
ZY
7110 unsigned long flags;
7111 int rc;
7112
7113 if (conf == NULL)
7114 return -EIO;
7115
7116 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
7117 (!conf->beacon || !conf->ssid_len)) {
7118 IWL_DEBUG_MAC80211
7119 ("Leaving in AP mode because HostAPD is not ready.\n");
7120 return 0;
7121 }
7122
7123 mutex_lock(&priv->mutex);
7124
7125 IWL_DEBUG_MAC80211("enter: interface id %d\n", if_id);
7126 if (conf->bssid)
0795af57
JP
7127 IWL_DEBUG_MAC80211("bssid: %s\n",
7128 print_mac(mac, conf->bssid));
b481de9c
ZY
7129
7130 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
7131 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
7132 IWL_DEBUG_MAC80211("leave - scanning\n");
7133 mutex_unlock(&priv->mutex);
7134 return 0;
7135 }
7136
7137 if (priv->interface_id != if_id) {
7138 IWL_DEBUG_MAC80211("leave - interface_id != if_id\n");
7139 mutex_unlock(&priv->mutex);
7140 return 0;
7141 }
7142
7143 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7144 if (!conf->bssid) {
7145 conf->bssid = priv->mac_addr;
7146 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
0795af57
JP
7147 IWL_DEBUG_MAC80211("bssid was set to: %s\n",
7148 print_mac(mac, conf->bssid));
b481de9c
ZY
7149 }
7150 if (priv->ibss_beacon)
7151 dev_kfree_skb(priv->ibss_beacon);
7152
7153 priv->ibss_beacon = conf->beacon;
7154 }
7155
7156 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
7157 !is_multicast_ether_addr(conf->bssid)) {
7158 /* If there is currently a HW scan going on in the background
7159 * then we need to cancel it else the RXON below will fail. */
7160 if (iwl_scan_cancel_timeout(priv, 100)) {
7161 IWL_WARNING("Aborted scan still in progress "
7162 "after 100ms\n");
7163 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
7164 mutex_unlock(&priv->mutex);
7165 return -EAGAIN;
7166 }
7167 memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
7168
7169 /* TODO: Audit driver for usage of these members and see
7170 * if mac80211 deprecates them (priv->bssid looks like it
7171 * shouldn't be there, but I haven't scanned the IBSS code
7172 * to verify) - jpk */
7173 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
7174
7175 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
7176 iwl_config_ap(priv);
7177 else {
7178 priv->staging_rxon.filter_flags |=
7179 RXON_FILTER_ASSOC_MSK;
7180 rc = iwl_commit_rxon(priv);
7181 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
7182 iwl_rxon_add_station(
7183 priv, priv->active_rxon.bssid_addr, 1);
7184 }
7185
7186 } else {
7187 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7188 iwl_commit_rxon(priv);
7189 }
7190
7191 spin_lock_irqsave(&priv->lock, flags);
7192 if (!conf->ssid_len)
7193 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7194 else
7195 memcpy(priv->essid, conf->ssid, conf->ssid_len);
7196
7197 priv->essid_len = conf->ssid_len;
7198 spin_unlock_irqrestore(&priv->lock, flags);
7199
7200 IWL_DEBUG_MAC80211("leave\n");
7201 mutex_unlock(&priv->mutex);
7202
7203 return 0;
7204}
7205
7206static void iwl_mac_remove_interface(struct ieee80211_hw *hw,
7207 struct ieee80211_if_init_conf *conf)
7208{
7209 struct iwl_priv *priv = hw->priv;
7210
7211 IWL_DEBUG_MAC80211("enter\n");
7212
7213 mutex_lock(&priv->mutex);
7214 if (priv->interface_id == conf->if_id) {
7215 priv->interface_id = 0;
7216 memset(priv->bssid, 0, ETH_ALEN);
7217 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
7218 priv->essid_len = 0;
7219 }
7220 mutex_unlock(&priv->mutex);
7221
7222 IWL_DEBUG_MAC80211("leave\n");
7223
7224}
7225
7226#define IWL_DELAY_NEXT_SCAN (HZ*2)
7227static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
7228{
7229 int rc = 0;
7230 unsigned long flags;
7231 struct iwl_priv *priv = hw->priv;
7232
7233 IWL_DEBUG_MAC80211("enter\n");
7234
7235 spin_lock_irqsave(&priv->lock, flags);
7236
7237 if (!iwl_is_ready_rf(priv)) {
7238 rc = -EIO;
7239 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
7240 goto out_unlock;
7241 }
7242
7243 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */
7244 rc = -EIO;
7245 IWL_ERROR("ERROR: APs don't scan\n");
7246 goto out_unlock;
7247 }
7248
7249 /* if we just finished scan ask for delay */
7250 if (priv->last_scan_jiffies &&
7251 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
7252 jiffies)) {
7253 rc = -EAGAIN;
7254 goto out_unlock;
7255 }
7256 if (len) {
7257 IWL_DEBUG_SCAN("direct scan for "
7258 "%s [%d]\n ",
7259 iwl_escape_essid(ssid, len), (int)len);
7260
7261 priv->one_direct_scan = 1;
7262 priv->direct_ssid_len = (u8)
7263 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
7264 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
7265 }
7266
7267 rc = iwl_scan_initiate(priv);
7268
7269 IWL_DEBUG_MAC80211("leave\n");
7270
7271out_unlock:
7272 spin_unlock_irqrestore(&priv->lock, flags);
7273
7274 return rc;
7275}
7276
7277static int iwl_mac_set_key(struct ieee80211_hw *hw, set_key_cmd cmd,
7278 const u8 *local_addr, const u8 *addr,
7279 struct ieee80211_key_conf *key)
7280{
7281 struct iwl_priv *priv = hw->priv;
7282 int rc = 0;
7283 u8 sta_id;
7284
7285 IWL_DEBUG_MAC80211("enter\n");
7286
7287 if (!iwl_param_hwcrypto) {
7288 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7289 return -EOPNOTSUPP;
7290 }
7291
7292 if (is_zero_ether_addr(addr))
7293 /* only support pairwise keys */
7294 return -EOPNOTSUPP;
7295
7296 sta_id = iwl_hw_find_station(priv, addr);
7297 if (sta_id == IWL_INVALID_STATION) {
0795af57
JP
7298 DECLARE_MAC_BUF(mac);
7299
7300 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
7301 print_mac(mac, addr));
b481de9c
ZY
7302 return -EINVAL;
7303 }
7304
7305 mutex_lock(&priv->mutex);
7306
7307 switch (cmd) {
7308 case SET_KEY:
7309 rc = iwl_update_sta_key_info(priv, key, sta_id);
7310 if (!rc) {
7311 iwl_set_rxon_hwcrypto(priv, 1);
7312 iwl_commit_rxon(priv);
7313 key->hw_key_idx = sta_id;
7314 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
7315 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7316 }
7317 break;
7318 case DISABLE_KEY:
7319 rc = iwl_clear_sta_key_info(priv, sta_id);
7320 if (!rc) {
7321 iwl_set_rxon_hwcrypto(priv, 0);
7322 iwl_commit_rxon(priv);
7323 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
7324 }
7325 break;
7326 default:
7327 rc = -EINVAL;
7328 }
7329
7330 IWL_DEBUG_MAC80211("leave\n");
7331 mutex_unlock(&priv->mutex);
7332
7333 return rc;
7334}
7335
7336static int iwl_mac_conf_tx(struct ieee80211_hw *hw, int queue,
7337 const struct ieee80211_tx_queue_params *params)
7338{
7339 struct iwl_priv *priv = hw->priv;
7340#ifdef CONFIG_IWLWIFI_QOS
7341 unsigned long flags;
7342 int q;
7343#endif /* CONFIG_IWL_QOS */
7344
7345 IWL_DEBUG_MAC80211("enter\n");
7346
7347 if (!iwl_is_ready_rf(priv)) {
7348 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7349 return -EIO;
7350 }
7351
7352 if (queue >= AC_NUM) {
7353 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
7354 return 0;
7355 }
7356
7357#ifdef CONFIG_IWLWIFI_QOS
7358 if (!priv->qos_data.qos_enable) {
7359 priv->qos_data.qos_active = 0;
7360 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
7361 return 0;
7362 }
7363 q = AC_NUM - 1 - queue;
7364
7365 spin_lock_irqsave(&priv->lock, flags);
7366
7367 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
7368 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
7369 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
7370 priv->qos_data.def_qos_parm.ac[q].edca_txop =
7371 cpu_to_le16((params->burst_time * 100));
7372
7373 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
7374 priv->qos_data.qos_active = 1;
7375
7376 spin_unlock_irqrestore(&priv->lock, flags);
7377
7378 mutex_lock(&priv->mutex);
7379 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
7380 iwl_activate_qos(priv, 1);
7381 else if (priv->assoc_id && iwl_is_associated(priv))
7382 iwl_activate_qos(priv, 0);
7383
7384 mutex_unlock(&priv->mutex);
7385
7386#endif /*CONFIG_IWLWIFI_QOS */
7387
7388 IWL_DEBUG_MAC80211("leave\n");
7389 return 0;
7390}
7391
7392static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
7393 struct ieee80211_tx_queue_stats *stats)
7394{
7395 struct iwl_priv *priv = hw->priv;
7396 int i, avail;
7397 struct iwl_tx_queue *txq;
7398 struct iwl_queue *q;
7399 unsigned long flags;
7400
7401 IWL_DEBUG_MAC80211("enter\n");
7402
7403 if (!iwl_is_ready_rf(priv)) {
7404 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7405 return -EIO;
7406 }
7407
7408 spin_lock_irqsave(&priv->lock, flags);
7409
7410 for (i = 0; i < AC_NUM; i++) {
7411 txq = &priv->txq[i];
7412 q = &txq->q;
7413 avail = iwl_queue_space(q);
7414
7415 stats->data[i].len = q->n_window - avail;
7416 stats->data[i].limit = q->n_window - q->high_mark;
7417 stats->data[i].count = q->n_window;
7418
7419 }
7420 spin_unlock_irqrestore(&priv->lock, flags);
7421
7422 IWL_DEBUG_MAC80211("leave\n");
7423
7424 return 0;
7425}
7426
7427static int iwl_mac_get_stats(struct ieee80211_hw *hw,
7428 struct ieee80211_low_level_stats *stats)
7429{
7430 IWL_DEBUG_MAC80211("enter\n");
7431 IWL_DEBUG_MAC80211("leave\n");
7432
7433 return 0;
7434}
7435
7436static u64 iwl_mac_get_tsf(struct ieee80211_hw *hw)
7437{
7438 IWL_DEBUG_MAC80211("enter\n");
7439 IWL_DEBUG_MAC80211("leave\n");
7440
7441 return 0;
7442}
7443
7444static void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
7445{
7446 struct iwl_priv *priv = hw->priv;
7447 unsigned long flags;
7448
7449 mutex_lock(&priv->mutex);
7450 IWL_DEBUG_MAC80211("enter\n");
7451
7452#ifdef CONFIG_IWLWIFI_QOS
7453 iwl_reset_qos(priv);
7454#endif
7455 cancel_delayed_work(&priv->post_associate);
7456
7457 spin_lock_irqsave(&priv->lock, flags);
7458 priv->assoc_id = 0;
7459 priv->assoc_capability = 0;
7460 priv->call_post_assoc_from_beacon = 0;
7461
7462 /* new association get rid of ibss beacon skb */
7463 if (priv->ibss_beacon)
7464 dev_kfree_skb(priv->ibss_beacon);
7465
7466 priv->ibss_beacon = NULL;
7467
7468 priv->beacon_int = priv->hw->conf.beacon_int;
7469 priv->timestamp1 = 0;
7470 priv->timestamp0 = 0;
7471 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
7472 priv->beacon_int = 0;
7473
7474 spin_unlock_irqrestore(&priv->lock, flags);
7475
7476 /* Per mac80211.h: This is only used in IBSS mode... */
7477 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
7478 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
7479 mutex_unlock(&priv->mutex);
7480 return;
7481 }
7482
7483 if (!iwl_is_ready_rf(priv)) {
7484 IWL_DEBUG_MAC80211("leave - not ready\n");
7485 mutex_unlock(&priv->mutex);
7486 return;
7487 }
7488
7489 priv->only_active_channel = 0;
7490
7491 iwl_set_rate(priv);
7492
7493 mutex_unlock(&priv->mutex);
7494
7495 IWL_DEBUG_MAC80211("leave\n");
7496
7497}
7498
7499static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
7500 struct ieee80211_tx_control *control)
7501{
7502 struct iwl_priv *priv = hw->priv;
7503 unsigned long flags;
7504
7505 mutex_lock(&priv->mutex);
7506 IWL_DEBUG_MAC80211("enter\n");
7507
7508 if (!iwl_is_ready_rf(priv)) {
7509 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7510 mutex_unlock(&priv->mutex);
7511 return -EIO;
7512 }
7513
7514 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
7515 IWL_DEBUG_MAC80211("leave - not IBSS\n");
7516 mutex_unlock(&priv->mutex);
7517 return -EIO;
7518 }
7519
7520 spin_lock_irqsave(&priv->lock, flags);
7521
7522 if (priv->ibss_beacon)
7523 dev_kfree_skb(priv->ibss_beacon);
7524
7525 priv->ibss_beacon = skb;
7526
7527 priv->assoc_id = 0;
7528
7529 IWL_DEBUG_MAC80211("leave\n");
7530 spin_unlock_irqrestore(&priv->lock, flags);
7531
7532#ifdef CONFIG_IWLWIFI_QOS
7533 iwl_reset_qos(priv);
7534#endif
7535
7536 queue_work(priv->workqueue, &priv->post_associate.work);
7537
7538 mutex_unlock(&priv->mutex);
7539
7540 return 0;
7541}
7542
7543/*****************************************************************************
7544 *
7545 * sysfs attributes
7546 *
7547 *****************************************************************************/
7548
7549#ifdef CONFIG_IWLWIFI_DEBUG
7550
7551/*
7552 * The following adds a new attribute to the sysfs representation
7553 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
7554 * used for controlling the debug level.
7555 *
7556 * See the level definitions in iwl for details.
7557 */
7558
7559static ssize_t show_debug_level(struct device_driver *d, char *buf)
7560{
7561 return sprintf(buf, "0x%08X\n", iwl_debug_level);
7562}
7563static ssize_t store_debug_level(struct device_driver *d,
7564 const char *buf, size_t count)
7565{
7566 char *p = (char *)buf;
7567 u32 val;
7568
7569 val = simple_strtoul(p, &p, 0);
7570 if (p == buf)
7571 printk(KERN_INFO DRV_NAME
7572 ": %s is not in hex or decimal form.\n", buf);
7573 else
7574 iwl_debug_level = val;
7575
7576 return strnlen(buf, count);
7577}
7578
7579static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
7580 show_debug_level, store_debug_level);
7581
7582#endif /* CONFIG_IWLWIFI_DEBUG */
7583
7584static ssize_t show_rf_kill(struct device *d,
7585 struct device_attribute *attr, char *buf)
7586{
7587 /*
7588 * 0 - RF kill not enabled
7589 * 1 - SW based RF kill active (sysfs)
7590 * 2 - HW based RF kill active
7591 * 3 - Both HW and SW based RF kill active
7592 */
7593 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7594 int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) |
7595 (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0);
7596
7597 return sprintf(buf, "%i\n", val);
7598}
7599
7600static ssize_t store_rf_kill(struct device *d,
7601 struct device_attribute *attr,
7602 const char *buf, size_t count)
7603{
7604 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7605
7606 mutex_lock(&priv->mutex);
7607 iwl_radio_kill_sw(priv, buf[0] == '1');
7608 mutex_unlock(&priv->mutex);
7609
7610 return count;
7611}
7612
7613static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
7614
7615static ssize_t show_temperature(struct device *d,
7616 struct device_attribute *attr, char *buf)
7617{
7618 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7619
7620 if (!iwl_is_alive(priv))
7621 return -EAGAIN;
7622
7623 return sprintf(buf, "%d\n", iwl_hw_get_temperature(priv));
7624}
7625
7626static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
7627
7628static ssize_t show_rs_window(struct device *d,
7629 struct device_attribute *attr,
7630 char *buf)
7631{
7632 struct iwl_priv *priv = d->driver_data;
7633 return iwl_fill_rs_info(priv->hw, buf, IWL_AP_ID);
7634}
7635static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
7636
7637static ssize_t show_tx_power(struct device *d,
7638 struct device_attribute *attr, char *buf)
7639{
7640 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7641 return sprintf(buf, "%d\n", priv->user_txpower_limit);
7642}
7643
7644static ssize_t store_tx_power(struct device *d,
7645 struct device_attribute *attr,
7646 const char *buf, size_t count)
7647{
7648 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7649 char *p = (char *)buf;
7650 u32 val;
7651
7652 val = simple_strtoul(p, &p, 10);
7653 if (p == buf)
7654 printk(KERN_INFO DRV_NAME
7655 ": %s is not in decimal form.\n", buf);
7656 else
7657 iwl_hw_reg_set_txpower(priv, val);
7658
7659 return count;
7660}
7661
7662static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
7663
7664static ssize_t show_flags(struct device *d,
7665 struct device_attribute *attr, char *buf)
7666{
7667 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7668
7669 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
7670}
7671
7672static ssize_t store_flags(struct device *d,
7673 struct device_attribute *attr,
7674 const char *buf, size_t count)
7675{
7676 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7677 u32 flags = simple_strtoul(buf, NULL, 0);
7678
7679 mutex_lock(&priv->mutex);
7680 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
7681 /* Cancel any currently running scans... */
7682 if (iwl_scan_cancel_timeout(priv, 100))
7683 IWL_WARNING("Could not cancel scan.\n");
7684 else {
7685 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
7686 flags);
7687 priv->staging_rxon.flags = cpu_to_le32(flags);
7688 iwl_commit_rxon(priv);
7689 }
7690 }
7691 mutex_unlock(&priv->mutex);
7692
7693 return count;
7694}
7695
7696static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
7697
7698static ssize_t show_filter_flags(struct device *d,
7699 struct device_attribute *attr, char *buf)
7700{
7701 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7702
7703 return sprintf(buf, "0x%04X\n",
7704 le32_to_cpu(priv->active_rxon.filter_flags));
7705}
7706
7707static ssize_t store_filter_flags(struct device *d,
7708 struct device_attribute *attr,
7709 const char *buf, size_t count)
7710{
7711 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7712 u32 filter_flags = simple_strtoul(buf, NULL, 0);
7713
7714 mutex_lock(&priv->mutex);
7715 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
7716 /* Cancel any currently running scans... */
7717 if (iwl_scan_cancel_timeout(priv, 100))
7718 IWL_WARNING("Could not cancel scan.\n");
7719 else {
7720 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
7721 "0x%04X\n", filter_flags);
7722 priv->staging_rxon.filter_flags =
7723 cpu_to_le32(filter_flags);
7724 iwl_commit_rxon(priv);
7725 }
7726 }
7727 mutex_unlock(&priv->mutex);
7728
7729 return count;
7730}
7731
7732static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
7733 store_filter_flags);
7734
7735static ssize_t show_tune(struct device *d,
7736 struct device_attribute *attr, char *buf)
7737{
7738 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7739
7740 return sprintf(buf, "0x%04X\n",
7741 (priv->phymode << 8) |
7742 le16_to_cpu(priv->active_rxon.channel));
7743}
7744
7745static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode);
7746
7747static ssize_t store_tune(struct device *d,
7748 struct device_attribute *attr,
7749 const char *buf, size_t count)
7750{
7751 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
7752 char *p = (char *)buf;
7753 u16 tune = simple_strtoul(p, &p, 0);
7754 u8 phymode = (tune >> 8) & 0xff;
7755 u16 channel = tune & 0xff;
7756
7757 IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel);
7758
7759 mutex_lock(&priv->mutex);
7760 if ((le16_to_cpu(priv->staging_rxon.channel) != channel) ||
7761 (priv->phymode != phymode)) {
7762 const struct iwl_channel_info *ch_info;
7763
7764 ch_info = iwl_get_channel_info(priv, phymode, channel);
7765 if (!ch_info) {
7766 IWL_WARNING("Requested invalid phymode/channel "
7767 "combination: %d %d\n", phymode, channel);
7768 mutex_unlock(&priv->mutex);
7769 return -EINVAL;
7770 }
7771
7772 /* Cancel any currently running scans... */
7773 if (iwl_scan_cancel_timeout(priv, 100))
7774 IWL_WARNING("Could not cancel scan.\n");
7775 else {
7776 IWL_DEBUG_INFO("Committing phymode and "
7777 "rxon.channel = %d %d\n",
7778 phymode, channel);
7779
7780 iwl_set_rxon_channel(priv, phymode, channel);
7781 iwl_set_flags_for_phymode(priv, phymode);
7782
7783 iwl_set_rate(priv);
7784 iwl_commit_rxon(priv);
7785 }
7786 }
7787 mutex_unlock(&priv->mutex);
7788
7789 return count;
7790}
7791
7792static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune);
7793
7794#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
7795
7796static ssize_t show_measurement(struct device *d,
7797 struct device_attribute *attr, char *buf)
7798{
7799 struct iwl_priv *priv = dev_get_drvdata(d);
7800 struct iwl_spectrum_notification measure_report;
7801 u32 size = sizeof(measure_report), len = 0, ofs = 0;
7802 u8 *data = (u8 *) & measure_report;
7803 unsigned long flags;
7804
7805 spin_lock_irqsave(&priv->lock, flags);
7806 if (!(priv->measurement_status & MEASUREMENT_READY)) {
7807 spin_unlock_irqrestore(&priv->lock, flags);
7808 return 0;
7809 }
7810 memcpy(&measure_report, &priv->measure_report, size);
7811 priv->measurement_status = 0;
7812 spin_unlock_irqrestore(&priv->lock, flags);
7813
7814 while (size && (PAGE_SIZE - len)) {
7815 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
7816 PAGE_SIZE - len, 1);
7817 len = strlen(buf);
7818 if (PAGE_SIZE - len)
7819 buf[len++] = '\n';
7820
7821 ofs += 16;
7822 size -= min(size, 16U);
7823 }
7824
7825 return len;
7826}
7827
7828static ssize_t store_measurement(struct device *d,
7829 struct device_attribute *attr,
7830 const char *buf, size_t count)
7831{
7832 struct iwl_priv *priv = dev_get_drvdata(d);
7833 struct ieee80211_measurement_params params = {
7834 .channel = le16_to_cpu(priv->active_rxon.channel),
7835 .start_time = cpu_to_le64(priv->last_tsf),
7836 .duration = cpu_to_le16(1),
7837 };
7838 u8 type = IWL_MEASURE_BASIC;
7839 u8 buffer[32];
7840 u8 channel;
7841
7842 if (count) {
7843 char *p = buffer;
7844 strncpy(buffer, buf, min(sizeof(buffer), count));
7845 channel = simple_strtoul(p, NULL, 0);
7846 if (channel)
7847 params.channel = channel;
7848
7849 p = buffer;
7850 while (*p && *p != ' ')
7851 p++;
7852 if (*p)
7853 type = simple_strtoul(p + 1, NULL, 0);
7854 }
7855
7856 IWL_DEBUG_INFO("Invoking measurement of type %d on "
7857 "channel %d (for '%s')\n", type, params.channel, buf);
7858 iwl_get_measurement(priv, &params, type);
7859
7860 return count;
7861}
7862
7863static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
7864 show_measurement, store_measurement);
7865#endif /* CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT */
7866
7867static ssize_t show_rate(struct device *d,
7868 struct device_attribute *attr, char *buf)
7869{
7870 struct iwl_priv *priv = dev_get_drvdata(d);
7871 unsigned long flags;
7872 int i;
7873
7874 spin_lock_irqsave(&priv->sta_lock, flags);
7875 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
7876 i = priv->stations[IWL_AP_ID].current_rate.s.rate;
7877 else
7878 i = priv->stations[IWL_STA_ID].current_rate.s.rate;
7879 spin_unlock_irqrestore(&priv->sta_lock, flags);
7880
7881 i = iwl_rate_index_from_plcp(i);
7882 if (i == -1)
7883 return sprintf(buf, "0\n");
7884
7885 return sprintf(buf, "%d%s\n",
7886 (iwl_rates[i].ieee >> 1),
7887 (iwl_rates[i].ieee & 0x1) ? ".5" : "");
7888}
7889
7890static DEVICE_ATTR(rate, S_IRUSR, show_rate, NULL);
7891
7892static ssize_t store_retry_rate(struct device *d,
7893 struct device_attribute *attr,
7894 const char *buf, size_t count)
7895{
7896 struct iwl_priv *priv = dev_get_drvdata(d);
7897
7898 priv->retry_rate = simple_strtoul(buf, NULL, 0);
7899 if (priv->retry_rate <= 0)
7900 priv->retry_rate = 1;
7901
7902 return count;
7903}
7904
7905static ssize_t show_retry_rate(struct device *d,
7906 struct device_attribute *attr, char *buf)
7907{
7908 struct iwl_priv *priv = dev_get_drvdata(d);
7909 return sprintf(buf, "%d", priv->retry_rate);
7910}
7911
7912static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
7913 store_retry_rate);
7914
7915static ssize_t store_power_level(struct device *d,
7916 struct device_attribute *attr,
7917 const char *buf, size_t count)
7918{
7919 struct iwl_priv *priv = dev_get_drvdata(d);
7920 int rc;
7921 int mode;
7922
7923 mode = simple_strtoul(buf, NULL, 0);
7924 mutex_lock(&priv->mutex);
7925
7926 if (!iwl_is_ready(priv)) {
7927 rc = -EAGAIN;
7928 goto out;
7929 }
7930
7931 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC))
7932 mode = IWL_POWER_AC;
7933 else
7934 mode |= IWL_POWER_ENABLED;
7935
7936 if (mode != priv->power_mode) {
7937 rc = iwl_send_power_mode(priv, IWL_POWER_LEVEL(mode));
7938 if (rc) {
7939 IWL_DEBUG_MAC80211("failed setting power mode.\n");
7940 goto out;
7941 }
7942 priv->power_mode = mode;
7943 }
7944
7945 rc = count;
7946
7947 out:
7948 mutex_unlock(&priv->mutex);
7949 return rc;
7950}
7951
7952#define MAX_WX_STRING 80
7953
7954/* Values are in microsecond */
7955static const s32 timeout_duration[] = {
7956 350000,
7957 250000,
7958 75000,
7959 37000,
7960 25000,
7961};
7962static const s32 period_duration[] = {
7963 400000,
7964 700000,
7965 1000000,
7966 1000000,
7967 1000000
7968};
7969
7970static ssize_t show_power_level(struct device *d,
7971 struct device_attribute *attr, char *buf)
7972{
7973 struct iwl_priv *priv = dev_get_drvdata(d);
7974 int level = IWL_POWER_LEVEL(priv->power_mode);
7975 char *p = buf;
7976
7977 p += sprintf(p, "%d ", level);
7978 switch (level) {
7979 case IWL_POWER_MODE_CAM:
7980 case IWL_POWER_AC:
7981 p += sprintf(p, "(AC)");
7982 break;
7983 case IWL_POWER_BATTERY:
7984 p += sprintf(p, "(BATTERY)");
7985 break;
7986 default:
7987 p += sprintf(p,
7988 "(Timeout %dms, Period %dms)",
7989 timeout_duration[level - 1] / 1000,
7990 period_duration[level - 1] / 1000);
7991 }
7992
7993 if (!(priv->power_mode & IWL_POWER_ENABLED))
7994 p += sprintf(p, " OFF\n");
7995 else
7996 p += sprintf(p, " \n");
7997
7998 return (p - buf + 1);
7999
8000}
8001
8002static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
8003 store_power_level);
8004
8005static ssize_t show_channels(struct device *d,
8006 struct device_attribute *attr, char *buf)
8007{
8008 struct iwl_priv *priv = dev_get_drvdata(d);
8009 int len = 0, i;
8010 struct ieee80211_channel *channels = NULL;
8011 const struct ieee80211_hw_mode *hw_mode = NULL;
8012 int count = 0;
8013
8014 if (!iwl_is_ready(priv))
8015 return -EAGAIN;
8016
8017 hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211G);
8018 if (!hw_mode)
8019 hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211B);
8020 if (hw_mode) {
8021 channels = hw_mode->channels;
8022 count = hw_mode->num_channels;
8023 }
8024
8025 len +=
8026 sprintf(&buf[len],
8027 "Displaying %d channels in 2.4GHz band "
8028 "(802.11bg):\n", count);
8029
8030 for (i = 0; i < count; i++)
8031 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8032 channels[i].chan,
8033 channels[i].power_level,
8034 channels[i].
8035 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8036 " (IEEE 802.11h required)" : "",
8037 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8038 || (channels[i].
8039 flag &
8040 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8041 ", IBSS",
8042 channels[i].
8043 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8044 "active/passive" : "passive only");
8045
8046 hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211A);
8047 if (hw_mode) {
8048 channels = hw_mode->channels;
8049 count = hw_mode->num_channels;
8050 } else {
8051 channels = NULL;
8052 count = 0;
8053 }
8054
8055 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
8056 "(802.11a):\n", count);
8057
8058 for (i = 0; i < count; i++)
8059 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8060 channels[i].chan,
8061 channels[i].power_level,
8062 channels[i].
8063 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8064 " (IEEE 802.11h required)" : "",
8065 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8066 || (channels[i].
8067 flag &
8068 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8069 ", IBSS",
8070 channels[i].
8071 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8072 "active/passive" : "passive only");
8073
8074 return len;
8075}
8076
8077static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
8078
8079static ssize_t show_statistics(struct device *d,
8080 struct device_attribute *attr, char *buf)
8081{
8082 struct iwl_priv *priv = dev_get_drvdata(d);
8083 u32 size = sizeof(struct iwl_notif_statistics);
8084 u32 len = 0, ofs = 0;
8085 u8 *data = (u8 *) & priv->statistics;
8086 int rc = 0;
8087
8088 if (!iwl_is_alive(priv))
8089 return -EAGAIN;
8090
8091 mutex_lock(&priv->mutex);
8092 rc = iwl_send_statistics_request(priv);
8093 mutex_unlock(&priv->mutex);
8094
8095 if (rc) {
8096 len = sprintf(buf,
8097 "Error sending statistics request: 0x%08X\n", rc);
8098 return len;
8099 }
8100
8101 while (size && (PAGE_SIZE - len)) {
8102 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
8103 PAGE_SIZE - len, 1);
8104 len = strlen(buf);
8105 if (PAGE_SIZE - len)
8106 buf[len++] = '\n';
8107
8108 ofs += 16;
8109 size -= min(size, 16U);
8110 }
8111
8112 return len;
8113}
8114
8115static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
8116
8117static ssize_t show_antenna(struct device *d,
8118 struct device_attribute *attr, char *buf)
8119{
8120 struct iwl_priv *priv = dev_get_drvdata(d);
8121
8122 if (!iwl_is_alive(priv))
8123 return -EAGAIN;
8124
8125 return sprintf(buf, "%d\n", priv->antenna);
8126}
8127
8128static ssize_t store_antenna(struct device *d,
8129 struct device_attribute *attr,
8130 const char *buf, size_t count)
8131{
8132 int ant;
8133 struct iwl_priv *priv = dev_get_drvdata(d);
8134
8135 if (count == 0)
8136 return 0;
8137
8138 if (sscanf(buf, "%1i", &ant) != 1) {
8139 IWL_DEBUG_INFO("not in hex or decimal form.\n");
8140 return count;
8141 }
8142
8143 if ((ant >= 0) && (ant <= 2)) {
8144 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
8145 priv->antenna = (enum iwl_antenna)ant;
8146 } else
8147 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
8148
8149
8150 return count;
8151}
8152
8153static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
8154
8155static ssize_t show_status(struct device *d,
8156 struct device_attribute *attr, char *buf)
8157{
8158 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8159 if (!iwl_is_alive(priv))
8160 return -EAGAIN;
8161 return sprintf(buf, "0x%08x\n", (int)priv->status);
8162}
8163
8164static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
8165
8166static ssize_t dump_error_log(struct device *d,
8167 struct device_attribute *attr,
8168 const char *buf, size_t count)
8169{
8170 char *p = (char *)buf;
8171
8172 if (p[0] == '1')
8173 iwl_dump_nic_error_log((struct iwl_priv *)d->driver_data);
8174
8175 return strnlen(buf, count);
8176}
8177
8178static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
8179
8180static ssize_t dump_event_log(struct device *d,
8181 struct device_attribute *attr,
8182 const char *buf, size_t count)
8183{
8184 char *p = (char *)buf;
8185
8186 if (p[0] == '1')
8187 iwl_dump_nic_event_log((struct iwl_priv *)d->driver_data);
8188
8189 return strnlen(buf, count);
8190}
8191
8192static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
8193
8194/*****************************************************************************
8195 *
8196 * driver setup and teardown
8197 *
8198 *****************************************************************************/
8199
8200static void iwl_setup_deferred_work(struct iwl_priv *priv)
8201{
8202 priv->workqueue = create_workqueue(DRV_NAME);
8203
8204 init_waitqueue_head(&priv->wait_command_queue);
8205
8206 INIT_WORK(&priv->up, iwl_bg_up);
8207 INIT_WORK(&priv->restart, iwl_bg_restart);
8208 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
8209 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
8210 INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
8211 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
8212 INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
8213 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
8214 INIT_DELAYED_WORK(&priv->post_associate, iwl_bg_post_associate);
8215 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
8216 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
8217 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
8218
8219 iwl_hw_setup_deferred_work(priv);
8220
8221 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
8222 iwl_irq_tasklet, (unsigned long)priv);
8223}
8224
8225static void iwl_cancel_deferred_work(struct iwl_priv *priv)
8226{
8227 iwl_hw_cancel_deferred_work(priv);
8228
8229 cancel_delayed_work(&priv->scan_check);
8230 cancel_delayed_work(&priv->alive_start);
8231 cancel_delayed_work(&priv->post_associate);
8232 cancel_work_sync(&priv->beacon_update);
8233}
8234
8235static struct attribute *iwl_sysfs_entries[] = {
8236 &dev_attr_antenna.attr,
8237 &dev_attr_channels.attr,
8238 &dev_attr_dump_errors.attr,
8239 &dev_attr_dump_events.attr,
8240 &dev_attr_flags.attr,
8241 &dev_attr_filter_flags.attr,
8242#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
8243 &dev_attr_measurement.attr,
8244#endif
8245 &dev_attr_power_level.attr,
8246 &dev_attr_rate.attr,
8247 &dev_attr_retry_rate.attr,
8248 &dev_attr_rf_kill.attr,
8249 &dev_attr_rs_window.attr,
8250 &dev_attr_statistics.attr,
8251 &dev_attr_status.attr,
8252 &dev_attr_temperature.attr,
8253 &dev_attr_tune.attr,
8254 &dev_attr_tx_power.attr,
8255
8256 NULL
8257};
8258
8259static struct attribute_group iwl_attribute_group = {
8260 .name = NULL, /* put in device directory */
8261 .attrs = iwl_sysfs_entries,
8262};
8263
8264static struct ieee80211_ops iwl_hw_ops = {
8265 .tx = iwl_mac_tx,
8266 .open = iwl_mac_open,
8267 .stop = iwl_mac_stop,
8268 .add_interface = iwl_mac_add_interface,
8269 .remove_interface = iwl_mac_remove_interface,
8270 .config = iwl_mac_config,
8271 .config_interface = iwl_mac_config_interface,
8272 .set_key = iwl_mac_set_key,
8273 .get_stats = iwl_mac_get_stats,
8274 .get_tx_stats = iwl_mac_get_tx_stats,
8275 .conf_tx = iwl_mac_conf_tx,
8276 .get_tsf = iwl_mac_get_tsf,
8277 .reset_tsf = iwl_mac_reset_tsf,
8278 .beacon_update = iwl_mac_beacon_update,
8279 .hw_scan = iwl_mac_hw_scan
8280};
8281
8282static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8283{
8284 int err = 0;
8285 u32 pci_id;
8286 struct iwl_priv *priv;
8287 struct ieee80211_hw *hw;
8288 int i;
8289
8290 if (iwl_param_disable_hw_scan) {
8291 IWL_DEBUG_INFO("Disabling hw_scan\n");
8292 iwl_hw_ops.hw_scan = NULL;
8293 }
8294
8295 if ((iwl_param_queues_num > IWL_MAX_NUM_QUEUES) ||
8296 (iwl_param_queues_num < IWL_MIN_NUM_QUEUES)) {
8297 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
8298 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
8299 err = -EINVAL;
8300 goto out;
8301 }
8302
8303 /* mac80211 allocates memory for this device instance, including
8304 * space for this driver's private structure */
8305 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwl_hw_ops);
8306 if (hw == NULL) {
8307 IWL_ERROR("Can not allocate network device\n");
8308 err = -ENOMEM;
8309 goto out;
8310 }
8311 SET_IEEE80211_DEV(hw, &pdev->dev);
8312
8313 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
8314 priv = hw->priv;
8315 priv->hw = hw;
8316
8317 priv->pci_dev = pdev;
8318 priv->antenna = (enum iwl_antenna)iwl_param_antenna;
8319#ifdef CONFIG_IWLWIFI_DEBUG
8320 iwl_debug_level = iwl_param_debug;
8321 atomic_set(&priv->restrict_refcnt, 0);
8322#endif
8323 priv->retry_rate = 1;
8324
8325 priv->ibss_beacon = NULL;
8326
8327 /* Tell mac80211 and its clients (e.g. Wireless Extensions)
8328 * the range of signal quality values that we'll provide.
8329 * Negative values for level/noise indicate that we'll provide dBm.
8330 * For WE, at least, non-0 values here *enable* display of values
8331 * in app (iwconfig). */
8332 hw->max_rssi = -20; /* signal level, negative indicates dBm */
8333 hw->max_noise = -20; /* noise level, negative indicates dBm */
8334 hw->max_signal = 100; /* link quality indication (%) */
8335
8336 /* Tell mac80211 our Tx characteristics */
8337 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
8338
8339 hw->queues = 4;
8340
8341 spin_lock_init(&priv->lock);
8342 spin_lock_init(&priv->power_data.lock);
8343 spin_lock_init(&priv->sta_lock);
8344 spin_lock_init(&priv->hcmd_lock);
8345
8346 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
8347 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
8348
8349 INIT_LIST_HEAD(&priv->free_frames);
8350
8351 mutex_init(&priv->mutex);
8352 if (pci_enable_device(pdev)) {
8353 err = -ENODEV;
8354 goto out_ieee80211_free_hw;
8355 }
8356
8357 pci_set_master(pdev);
8358
8359 iwl_clear_stations_table(priv);
8360
8361 priv->data_retry_limit = -1;
8362 priv->ieee_channels = NULL;
8363 priv->ieee_rates = NULL;
8364 priv->phymode = -1;
8365
8366 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
8367 if (!err)
8368 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
8369 if (err) {
8370 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
8371 goto out_pci_disable_device;
8372 }
8373
8374 pci_set_drvdata(pdev, priv);
8375 err = pci_request_regions(pdev, DRV_NAME);
8376 if (err)
8377 goto out_pci_disable_device;
8378 /* We disable the RETRY_TIMEOUT register (0x41) to keep
8379 * PCI Tx retries from interfering with C3 CPU state */
8380 pci_write_config_byte(pdev, 0x41, 0x00);
8381 priv->hw_base = pci_iomap(pdev, 0, 0);
8382 if (!priv->hw_base) {
8383 err = -ENODEV;
8384 goto out_pci_release_regions;
8385 }
8386
8387 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
8388 (unsigned long long) pci_resource_len(pdev, 0));
8389 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
8390
8391 /* Initialize module parameter values here */
8392
8393 if (iwl_param_disable) {
8394 set_bit(STATUS_RF_KILL_SW, &priv->status);
8395 IWL_DEBUG_INFO("Radio disabled.\n");
8396 }
8397
8398 priv->iw_mode = IEEE80211_IF_TYPE_STA;
8399
8400 pci_id =
8401 (priv->pci_dev->device << 16) | priv->pci_dev->subsystem_device;
8402
8403 switch (pci_id) {
8404 case 0x42221005: /* 0x4222 0x8086 0x1005 is BG SKU */
8405 case 0x42221034: /* 0x4222 0x8086 0x1034 is BG SKU */
8406 case 0x42271014: /* 0x4227 0x8086 0x1014 is BG SKU */
8407 case 0x42221044: /* 0x4222 0x8086 0x1044 is BG SKU */
8408 priv->is_abg = 0;
8409 break;
8410
8411 /*
8412 * Rest are assumed ABG SKU -- if this is not the
8413 * case then the card will get the wrong 'Detected'
8414 * line in the kernel log however the code that
8415 * initializes the GEO table will detect no A-band
8416 * channels and remove the is_abg mask.
8417 */
8418 default:
8419 priv->is_abg = 1;
8420 break;
8421 }
8422
8423 printk(KERN_INFO DRV_NAME
8424 ": Detected Intel PRO/Wireless 3945%sBG Network Connection\n",
8425 priv->is_abg ? "A" : "");
8426
8427 /* Device-specific setup */
8428 if (iwl_hw_set_hw_setting(priv)) {
8429 IWL_ERROR("failed to set hw settings\n");
8430 mutex_unlock(&priv->mutex);
8431 goto out_iounmap;
8432 }
8433
8434#ifdef CONFIG_IWLWIFI_QOS
8435 if (iwl_param_qos_enable)
8436 priv->qos_data.qos_enable = 1;
8437
8438 iwl_reset_qos(priv);
8439
8440 priv->qos_data.qos_active = 0;
8441 priv->qos_data.qos_cap.val = 0;
8442#endif /* CONFIG_IWLWIFI_QOS */
8443
8444 iwl_set_rxon_channel(priv, MODE_IEEE80211G, 6);
8445 iwl_setup_deferred_work(priv);
8446 iwl_setup_rx_handlers(priv);
8447
8448 priv->rates_mask = IWL_RATES_MASK;
8449 /* If power management is turned on, default to AC mode */
8450 priv->power_mode = IWL_POWER_AC;
8451 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
8452
8453 pci_enable_msi(pdev);
8454
8455 err = request_irq(pdev->irq, iwl_isr, IRQF_SHARED, DRV_NAME, priv);
8456 if (err) {
8457 IWL_ERROR("Error allocating IRQ %d\n", pdev->irq);
8458 goto out_disable_msi;
8459 }
8460
8461 mutex_lock(&priv->mutex);
8462
8463 err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group);
8464 if (err) {
8465 IWL_ERROR("failed to create sysfs device attributes\n");
8466 mutex_unlock(&priv->mutex);
8467 goto out_release_irq;
8468 }
8469
8470 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
8471 * ucode filename and max sizes are card-specific. */
8472 err = iwl_read_ucode(priv);
8473 if (err) {
8474 IWL_ERROR("Could not read microcode: %d\n", err);
8475 mutex_unlock(&priv->mutex);
8476 goto out_pci_alloc;
8477 }
8478
8479 mutex_unlock(&priv->mutex);
8480
8481 IWL_DEBUG_INFO("Queing UP work.\n");
8482
8483 queue_work(priv->workqueue, &priv->up);
8484
8485 return 0;
8486
8487 out_pci_alloc:
8488 iwl_dealloc_ucode_pci(priv);
8489
8490 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
8491
8492 out_release_irq:
8493 free_irq(pdev->irq, priv);
8494
8495 out_disable_msi:
8496 pci_disable_msi(pdev);
8497 destroy_workqueue(priv->workqueue);
8498 priv->workqueue = NULL;
8499 iwl_unset_hw_setting(priv);
8500
8501 out_iounmap:
8502 pci_iounmap(pdev, priv->hw_base);
8503 out_pci_release_regions:
8504 pci_release_regions(pdev);
8505 out_pci_disable_device:
8506 pci_disable_device(pdev);
8507 pci_set_drvdata(pdev, NULL);
8508 out_ieee80211_free_hw:
8509 ieee80211_free_hw(priv->hw);
8510 out:
8511 return err;
8512}
8513
8514static void iwl_pci_remove(struct pci_dev *pdev)
8515{
8516 struct iwl_priv *priv = pci_get_drvdata(pdev);
8517 struct list_head *p, *q;
8518 int i;
8519
8520 if (!priv)
8521 return;
8522
8523 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
8524
8525 mutex_lock(&priv->mutex);
8526 set_bit(STATUS_EXIT_PENDING, &priv->status);
8527 __iwl_down(priv);
8528 mutex_unlock(&priv->mutex);
8529
8530 /* Free MAC hash list for ADHOC */
8531 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
8532 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
8533 list_del(p);
8534 kfree(list_entry(p, struct iwl_ibss_seq, list));
8535 }
8536 }
8537
8538 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
8539
8540 iwl_dealloc_ucode_pci(priv);
8541
8542 if (priv->rxq.bd)
8543 iwl_rx_queue_free(priv, &priv->rxq);
8544 iwl_hw_txq_ctx_free(priv);
8545
8546 iwl_unset_hw_setting(priv);
8547 iwl_clear_stations_table(priv);
8548
8549 if (priv->mac80211_registered) {
8550 ieee80211_unregister_hw(priv->hw);
8551 iwl_rate_control_unregister(priv->hw);
8552 }
8553
8554 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
8555 * priv->workqueue... so we can't take down the workqueue
8556 * until now... */
8557 destroy_workqueue(priv->workqueue);
8558 priv->workqueue = NULL;
8559
8560 free_irq(pdev->irq, priv);
8561 pci_disable_msi(pdev);
8562 pci_iounmap(pdev, priv->hw_base);
8563 pci_release_regions(pdev);
8564 pci_disable_device(pdev);
8565 pci_set_drvdata(pdev, NULL);
8566
8567 kfree(priv->channel_info);
8568
8569 kfree(priv->ieee_channels);
8570 kfree(priv->ieee_rates);
8571
8572 if (priv->ibss_beacon)
8573 dev_kfree_skb(priv->ibss_beacon);
8574
8575 ieee80211_free_hw(priv->hw);
8576}
8577
8578#ifdef CONFIG_PM
8579
8580static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
8581{
8582 struct iwl_priv *priv = pci_get_drvdata(pdev);
8583
8584 mutex_lock(&priv->mutex);
8585
8586 set_bit(STATUS_IN_SUSPEND, &priv->status);
8587
8588 /* Take down the device; powers it off, etc. */
8589 __iwl_down(priv);
8590
8591 if (priv->mac80211_registered)
8592 ieee80211_stop_queues(priv->hw);
8593
8594 pci_save_state(pdev);
8595 pci_disable_device(pdev);
8596 pci_set_power_state(pdev, PCI_D3hot);
8597
8598 mutex_unlock(&priv->mutex);
8599
8600 return 0;
8601}
8602
8603static void iwl_resume(struct iwl_priv *priv)
8604{
8605 unsigned long flags;
8606
8607 /* The following it a temporary work around due to the
8608 * suspend / resume not fully initializing the NIC correctly.
8609 * Without all of the following, resume will not attempt to take
8610 * down the NIC (it shouldn't really need to) and will just try
8611 * and bring the NIC back up. However that fails during the
8612 * ucode verification process. This then causes iwl_down to be
8613 * called *after* iwl_hw_nic_init() has succeeded -- which
8614 * then lets the next init sequence succeed. So, we've
8615 * replicated all of that NIC init code here... */
8616
8617 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
8618
8619 iwl_hw_nic_init(priv);
8620
8621 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
8622 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
8623 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
8624 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
8625 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
8626 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
8627
8628 /* tell the device to stop sending interrupts */
8629 iwl_disable_interrupts(priv);
8630
8631 spin_lock_irqsave(&priv->lock, flags);
8632 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
8633
8634 if (!iwl_grab_restricted_access(priv)) {
8635 iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG,
8636 APMG_CLK_VAL_DMA_CLK_RQT);
8637 iwl_release_restricted_access(priv);
8638 }
8639 spin_unlock_irqrestore(&priv->lock, flags);
8640
8641 udelay(5);
8642
8643 iwl_hw_nic_reset(priv);
8644
8645 /* Bring the device back up */
8646 clear_bit(STATUS_IN_SUSPEND, &priv->status);
8647 queue_work(priv->workqueue, &priv->up);
8648}
8649
8650static int iwl_pci_resume(struct pci_dev *pdev)
8651{
8652 struct iwl_priv *priv = pci_get_drvdata(pdev);
8653 int err;
8654
8655 printk(KERN_INFO "Coming out of suspend...\n");
8656
8657 mutex_lock(&priv->mutex);
8658
8659 pci_set_power_state(pdev, PCI_D0);
8660 err = pci_enable_device(pdev);
8661 pci_restore_state(pdev);
8662
8663 /*
8664 * Suspend/Resume resets the PCI configuration space, so we have to
8665 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
8666 * from interfering with C3 CPU state. pci_restore_state won't help
8667 * here since it only restores the first 64 bytes pci config header.
8668 */
8669 pci_write_config_byte(pdev, 0x41, 0x00);
8670
8671 iwl_resume(priv);
8672 mutex_unlock(&priv->mutex);
8673
8674 return 0;
8675}
8676
8677#endif /* CONFIG_PM */
8678
8679/*****************************************************************************
8680 *
8681 * driver and module entry point
8682 *
8683 *****************************************************************************/
8684
8685static struct pci_driver iwl_driver = {
8686 .name = DRV_NAME,
8687 .id_table = iwl_hw_card_ids,
8688 .probe = iwl_pci_probe,
8689 .remove = __devexit_p(iwl_pci_remove),
8690#ifdef CONFIG_PM
8691 .suspend = iwl_pci_suspend,
8692 .resume = iwl_pci_resume,
8693#endif
8694};
8695
8696static int __init iwl_init(void)
8697{
8698
8699 int ret;
8700 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
8701 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
8702 ret = pci_register_driver(&iwl_driver);
8703 if (ret) {
8704 IWL_ERROR("Unable to initialize PCI module\n");
8705 return ret;
8706 }
8707#ifdef CONFIG_IWLWIFI_DEBUG
8708 ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level);
8709 if (ret) {
8710 IWL_ERROR("Unable to create driver sysfs file\n");
8711 pci_unregister_driver(&iwl_driver);
8712 return ret;
8713 }
8714#endif
8715
8716 return ret;
8717}
8718
8719static void __exit iwl_exit(void)
8720{
8721#ifdef CONFIG_IWLWIFI_DEBUG
8722 driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level);
8723#endif
8724 pci_unregister_driver(&iwl_driver);
8725}
8726
8727module_param_named(antenna, iwl_param_antenna, int, 0444);
8728MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
8729module_param_named(disable, iwl_param_disable, int, 0444);
8730MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
8731module_param_named(hwcrypto, iwl_param_hwcrypto, int, 0444);
8732MODULE_PARM_DESC(hwcrypto,
8733 "using hardware crypto engine (default 0 [software])\n");
8734module_param_named(debug, iwl_param_debug, int, 0444);
8735MODULE_PARM_DESC(debug, "debug output mask");
8736module_param_named(disable_hw_scan, iwl_param_disable_hw_scan, int, 0444);
8737MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
8738
8739module_param_named(queues_num, iwl_param_queues_num, int, 0444);
8740MODULE_PARM_DESC(queues_num, "number of hw queues.");
8741
8742/* QoS */
8743module_param_named(qos_enable, iwl_param_qos_enable, int, 0444);
8744MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
8745
8746module_exit(iwl_exit);
8747module_init(iwl_init);
This page took 0.363068 seconds and 5 git commands to generate.