iwlegacy: s/STATUS_/S_/
[deliverable/linux.git] / drivers / net / wireless / iwlegacy / 4965-mac.c
CommitLineData
be663ab6
WYG
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
be663ab6
WYG
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/mac80211.h>
48
49#include <asm/div64.h>
50
51#define DRV_NAME "iwl4965"
52
53#include "iwl-eeprom.h"
54#include "iwl-dev.h"
55#include "iwl-core.h"
56#include "iwl-io.h"
57#include "iwl-helpers.h"
58#include "iwl-sta.h"
af038f40 59#include "4965.h"
be663ab6
WYG
60
61
62/******************************************************************************
63 *
64 * module boiler plate
65 *
66 ******************************************************************************/
67
68/*
69 * module name, copyright, version, etc.
70 */
71#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
72
d3175167 73#ifdef CONFIG_IWLEGACY_DEBUG
be663ab6
WYG
74#define VD "d"
75#else
76#define VD
77#endif
78
79#define DRV_VERSION IWLWIFI_VERSION VD
80
81
82MODULE_DESCRIPTION(DRV_DESCRIPTION);
83MODULE_VERSION(DRV_VERSION);
84MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
85MODULE_LICENSE("GPL");
86MODULE_ALIAS("iwl4965");
87
fcb74588
SG
88void il4965_check_abort_status(struct il_priv *il,
89 u8 frame_count, u32 status)
90{
91 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
92 IL_ERR("Tx flush command to flush out all frames\n");
a6766ccd 93 if (!test_bit(S_EXIT_PENDING, &il->status))
fcb74588
SG
94 queue_work(il->workqueue, &il->tx_flush);
95 }
96}
97
98/*
99 * EEPROM
100 */
101struct il_mod_params il4965_mod_params = {
102 .amsdu_size_8K = 1,
103 .restart_fw = 1,
104 /* the rest are 0 by default */
105};
106
107void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
108{
109 unsigned long flags;
110 int i;
111 spin_lock_irqsave(&rxq->lock, flags);
112 INIT_LIST_HEAD(&rxq->rx_free);
113 INIT_LIST_HEAD(&rxq->rx_used);
114 /* Fill the rx_used queue with _all_ of the Rx buffers */
115 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
116 /* In the reset function, these buffers may have been allocated
117 * to an SKB, so we need to unmap and free potential storage */
118 if (rxq->pool[i].page != NULL) {
119 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
120 PAGE_SIZE << il->hw_params.rx_page_order,
121 PCI_DMA_FROMDEVICE);
122 __il_free_pages(il, rxq->pool[i].page);
123 rxq->pool[i].page = NULL;
124 }
125 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
126 }
127
128 for (i = 0; i < RX_QUEUE_SIZE; i++)
129 rxq->queue[i] = NULL;
130
131 /* Set us so that we have processed and used all buffers, but have
132 * not restocked the Rx queue with fresh buffers */
133 rxq->read = rxq->write = 0;
134 rxq->write_actual = 0;
135 rxq->free_count = 0;
136 spin_unlock_irqrestore(&rxq->lock, flags);
137}
138
139int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
140{
141 u32 rb_size;
142 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
143 u32 rb_timeout = 0;
144
145 if (il->cfg->mod_params->amsdu_size_8K)
146 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
147 else
148 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
149
150 /* Stop Rx DMA */
151 il_wr(il, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
152
153 /* Reset driver's Rx queue write idx */
154 il_wr(il, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
155
156 /* Tell device where to find RBD circular buffer in DRAM */
157 il_wr(il, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
158 (u32)(rxq->bd_dma >> 8));
159
160 /* Tell device where in DRAM to update its Rx status */
161 il_wr(il, FH_RSCSR_CHNL0_STTS_WPTR_REG,
162 rxq->rb_stts_dma >> 4);
163
164 /* Enable Rx DMA
165 * Direct rx interrupts to hosts
166 * Rx buffer size 4 or 8k
167 * RB timeout 0x10
168 * 256 RBDs
169 */
170 il_wr(il, FH_MEM_RCSR_CHNL0_CONFIG_REG,
171 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
172 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
173 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
174 rb_size|
175 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
176 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
177
178 /* Set interrupt coalescing timer to default (2048 usecs) */
179 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
180
181 return 0;
182}
183
184static void il4965_set_pwr_vmain(struct il_priv *il)
185{
186/*
187 * (for documentation purposes)
188 * to set power to V_AUX, do:
189
190 if (pci_pme_capable(il->pci_dev, PCI_D3cold))
191 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
192 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
193 ~APMG_PS_CTRL_MSK_PWR_SRC);
194 */
195
196 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
197 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
198 ~APMG_PS_CTRL_MSK_PWR_SRC);
199}
200
201int il4965_hw_nic_init(struct il_priv *il)
202{
203 unsigned long flags;
204 struct il_rx_queue *rxq = &il->rxq;
205 int ret;
206
207 /* nic_init */
208 spin_lock_irqsave(&il->lock, flags);
209 il->cfg->ops->lib->apm_ops.init(il);
210
211 /* Set interrupt coalescing calibration timer to default (512 usecs) */
212 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
213
214 spin_unlock_irqrestore(&il->lock, flags);
215
216 il4965_set_pwr_vmain(il);
217
218 il->cfg->ops->lib->apm_ops.config(il);
219
220 /* Allocate the RX queue, or reset if it is already allocated */
221 if (!rxq->bd) {
222 ret = il_rx_queue_alloc(il);
223 if (ret) {
224 IL_ERR("Unable to initialize Rx queue\n");
225 return -ENOMEM;
226 }
227 } else
228 il4965_rx_queue_reset(il, rxq);
229
230 il4965_rx_replenish(il);
231
232 il4965_rx_init(il, rxq);
233
234 spin_lock_irqsave(&il->lock, flags);
235
236 rxq->need_update = 1;
237 il_rx_queue_update_write_ptr(il, rxq);
238
239 spin_unlock_irqrestore(&il->lock, flags);
240
241 /* Allocate or reset and init all Tx and Command queues */
242 if (!il->txq) {
243 ret = il4965_txq_ctx_alloc(il);
244 if (ret)
245 return ret;
246 } else
247 il4965_txq_ctx_reset(il);
248
a6766ccd 249 set_bit(S_INIT, &il->status);
fcb74588
SG
250
251 return 0;
252}
253
254/**
255 * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
256 */
257static inline __le32 il4965_dma_addr2rbd_ptr(struct il_priv *il,
258 dma_addr_t dma_addr)
259{
260 return cpu_to_le32((u32)(dma_addr >> 8));
261}
262
263/**
264 * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
265 *
266 * If there are slots in the RX queue that need to be restocked,
267 * and we have free pre-allocated buffers, fill the ranks as much
268 * as we can, pulling from rx_free.
269 *
270 * This moves the 'write' idx forward to catch up with 'processed', and
271 * also updates the memory address in the firmware to reference the new
272 * target buffer.
273 */
274void il4965_rx_queue_restock(struct il_priv *il)
275{
276 struct il_rx_queue *rxq = &il->rxq;
277 struct list_head *element;
278 struct il_rx_buf *rxb;
279 unsigned long flags;
280
281 spin_lock_irqsave(&rxq->lock, flags);
282 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
283 /* The overwritten rxb must be a used one */
284 rxb = rxq->queue[rxq->write];
285 BUG_ON(rxb && rxb->page);
286
287 /* Get next free Rx buffer, remove from free list */
288 element = rxq->rx_free.next;
289 rxb = list_entry(element, struct il_rx_buf, list);
290 list_del(element);
291
292 /* Point to Rx buffer via next RBD in circular buffer */
293 rxq->bd[rxq->write] = il4965_dma_addr2rbd_ptr(il,
294 rxb->page_dma);
295 rxq->queue[rxq->write] = rxb;
296 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
297 rxq->free_count--;
298 }
299 spin_unlock_irqrestore(&rxq->lock, flags);
300 /* If the pre-allocated buffer pool is dropping low, schedule to
301 * refill it */
302 if (rxq->free_count <= RX_LOW_WATERMARK)
303 queue_work(il->workqueue, &il->rx_replenish);
304
305
306 /* If we've added more space for the firmware to place data, tell it.
307 * Increment device's write pointer in multiples of 8. */
308 if (rxq->write_actual != (rxq->write & ~0x7)) {
309 spin_lock_irqsave(&rxq->lock, flags);
310 rxq->need_update = 1;
311 spin_unlock_irqrestore(&rxq->lock, flags);
312 il_rx_queue_update_write_ptr(il, rxq);
313 }
314}
315
316/**
317 * il4965_rx_replenish - Move all used packet from rx_used to rx_free
318 *
319 * When moving to rx_free an SKB is allocated for the slot.
320 *
321 * Also restock the Rx queue via il_rx_queue_restock.
322 * This is called as a scheduled work item (except for during initialization)
323 */
324static void il4965_rx_allocate(struct il_priv *il, gfp_t priority)
325{
326 struct il_rx_queue *rxq = &il->rxq;
327 struct list_head *element;
328 struct il_rx_buf *rxb;
329 struct page *page;
330 unsigned long flags;
331 gfp_t gfp_mask = priority;
332
333 while (1) {
334 spin_lock_irqsave(&rxq->lock, flags);
335 if (list_empty(&rxq->rx_used)) {
336 spin_unlock_irqrestore(&rxq->lock, flags);
337 return;
338 }
339 spin_unlock_irqrestore(&rxq->lock, flags);
340
341 if (rxq->free_count > RX_LOW_WATERMARK)
342 gfp_mask |= __GFP_NOWARN;
343
344 if (il->hw_params.rx_page_order > 0)
345 gfp_mask |= __GFP_COMP;
346
347 /* Alloc a new receive buffer */
348 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
349 if (!page) {
350 if (net_ratelimit())
351 D_INFO("alloc_pages failed, "
352 "order: %d\n",
353 il->hw_params.rx_page_order);
354
355 if (rxq->free_count <= RX_LOW_WATERMARK &&
356 net_ratelimit())
357 IL_ERR(
358 "Failed to alloc_pages with %s. "
359 "Only %u free buffers remaining.\n",
360 priority == GFP_ATOMIC ?
361 "GFP_ATOMIC" : "GFP_KERNEL",
362 rxq->free_count);
363 /* We don't reschedule replenish work here -- we will
364 * call the restock method and if it still needs
365 * more buffers it will schedule replenish */
366 return;
367 }
368
369 spin_lock_irqsave(&rxq->lock, flags);
370
371 if (list_empty(&rxq->rx_used)) {
372 spin_unlock_irqrestore(&rxq->lock, flags);
373 __free_pages(page, il->hw_params.rx_page_order);
374 return;
375 }
376 element = rxq->rx_used.next;
377 rxb = list_entry(element, struct il_rx_buf, list);
378 list_del(element);
379
380 spin_unlock_irqrestore(&rxq->lock, flags);
381
382 BUG_ON(rxb->page);
383 rxb->page = page;
384 /* Get physical address of the RB */
385 rxb->page_dma = pci_map_page(il->pci_dev, page, 0,
386 PAGE_SIZE << il->hw_params.rx_page_order,
387 PCI_DMA_FROMDEVICE);
388 /* dma address must be no more than 36 bits */
389 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
390 /* and also 256 byte aligned! */
391 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
392
393 spin_lock_irqsave(&rxq->lock, flags);
394
395 list_add_tail(&rxb->list, &rxq->rx_free);
396 rxq->free_count++;
397 il->alloc_rxb_page++;
398
399 spin_unlock_irqrestore(&rxq->lock, flags);
400 }
401}
402
403void il4965_rx_replenish(struct il_priv *il)
404{
405 unsigned long flags;
406
407 il4965_rx_allocate(il, GFP_KERNEL);
408
409 spin_lock_irqsave(&il->lock, flags);
410 il4965_rx_queue_restock(il);
411 spin_unlock_irqrestore(&il->lock, flags);
412}
413
414void il4965_rx_replenish_now(struct il_priv *il)
415{
416 il4965_rx_allocate(il, GFP_ATOMIC);
417
418 il4965_rx_queue_restock(il);
419}
420
421/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
422 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
423 * This free routine walks the list of POOL entries and if SKB is set to
424 * non NULL it is unmapped and freed
425 */
426void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
427{
428 int i;
429 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
430 if (rxq->pool[i].page != NULL) {
431 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
432 PAGE_SIZE << il->hw_params.rx_page_order,
433 PCI_DMA_FROMDEVICE);
434 __il_free_pages(il, rxq->pool[i].page);
435 rxq->pool[i].page = NULL;
436 }
437 }
438
439 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
440 rxq->bd_dma);
441 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
442 rxq->rb_stts, rxq->rb_stts_dma);
443 rxq->bd = NULL;
444 rxq->rb_stts = NULL;
445}
446
447int il4965_rxq_stop(struct il_priv *il)
448{
449
450 /* stop Rx DMA */
451 il_wr(il, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
452 il_poll_bit(il, FH_MEM_RSSR_RX_STATUS_REG,
453 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
454
455 return 0;
456}
457
458int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
459{
460 int idx = 0;
461 int band_offset = 0;
462
463 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
464 if (rate_n_flags & RATE_MCS_HT_MSK) {
465 idx = (rate_n_flags & 0xff);
466 return idx;
467 /* Legacy rate format, search for match in table */
468 } else {
469 if (band == IEEE80211_BAND_5GHZ)
470 band_offset = IL_FIRST_OFDM_RATE;
471 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
472 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
473 return idx - band_offset;
474 }
475
476 return -1;
477}
478
479static int il4965_calc_rssi(struct il_priv *il,
480 struct il_rx_phy_res *rx_resp)
481{
482 /* data from PHY/DSP regarding signal strength, etc.,
483 * contents are always there, not configurable by host. */
484 struct il4965_rx_non_cfg_phy *ncphy =
485 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
486 u32 agc = (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK)
487 >> IL49_AGC_DB_POS;
488
489 u32 valid_antennae =
490 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
491 >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
492 u8 max_rssi = 0;
493 u32 i;
494
495 /* Find max rssi among 3 possible receivers.
496 * These values are measured by the digital signal processor (DSP).
497 * They should stay fairly constant even as the signal strength varies,
498 * if the radio's automatic gain control (AGC) is working right.
499 * AGC value (see below) will provide the "interesting" info. */
500 for (i = 0; i < 3; i++)
501 if (valid_antennae & (1 << i))
502 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
503
504 D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
505 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
506 max_rssi, agc);
507
508 /* dBm = max_rssi dB - agc dB - constant.
509 * Higher AGC (higher radio gain) means lower signal. */
510 return max_rssi - agc - IL4965_RSSI_OFFSET;
511}
512
513
514static u32 il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
515{
516 u32 decrypt_out = 0;
517
518 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
519 RX_RES_STATUS_STATION_FOUND)
520 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
521 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
522
523 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
524
525 /* packet was not encrypted */
526 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
527 RX_RES_STATUS_SEC_TYPE_NONE)
528 return decrypt_out;
529
530 /* packet was encrypted with unknown alg */
531 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
532 RX_RES_STATUS_SEC_TYPE_ERR)
533 return decrypt_out;
534
535 /* decryption was not done in HW */
536 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
537 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
538 return decrypt_out;
539
540 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
541
542 case RX_RES_STATUS_SEC_TYPE_CCMP:
543 /* alg is CCM: check MIC only */
544 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
545 /* Bad MIC */
546 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
547 else
548 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
549
550 break;
551
552 case RX_RES_STATUS_SEC_TYPE_TKIP:
553 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
554 /* Bad TTAK */
555 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
556 break;
557 }
558 /* fall through if TTAK OK */
559 default:
560 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
561 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
562 else
563 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
564 break;
565 }
566
567 D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n",
568 decrypt_in, decrypt_out);
569
570 return decrypt_out;
571}
572
573static void il4965_pass_packet_to_mac80211(struct il_priv *il,
574 struct ieee80211_hdr *hdr,
575 u16 len,
576 u32 ampdu_status,
577 struct il_rx_buf *rxb,
578 struct ieee80211_rx_status *stats)
579{
580 struct sk_buff *skb;
581 __le16 fc = hdr->frame_control;
582
583 /* We only process data packets if the interface is open */
584 if (unlikely(!il->is_open)) {
585 D_DROP(
586 "Dropping packet while interface is not open.\n");
587 return;
588 }
589
590 /* In case of HW accelerated crypto and bad decryption, drop */
591 if (!il->cfg->mod_params->sw_crypto &&
592 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
593 return;
594
595 skb = dev_alloc_skb(128);
596 if (!skb) {
597 IL_ERR("dev_alloc_skb failed\n");
598 return;
599 }
600
601 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
602
603 il_update_stats(il, false, fc, len);
604 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
605
606 ieee80211_rx(il->hw, skb);
607 il->alloc_rxb_page--;
608 rxb->page = NULL;
609}
610
611/* Called for REPLY_RX (legacy ABG frames), or
612 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
613void il4965_rx_reply_rx(struct il_priv *il,
614 struct il_rx_buf *rxb)
615{
616 struct ieee80211_hdr *header;
617 struct ieee80211_rx_status rx_status;
618 struct il_rx_pkt *pkt = rxb_addr(rxb);
619 struct il_rx_phy_res *phy_res;
620 __le32 rx_pkt_status;
621 struct il_rx_mpdu_res_start *amsdu;
622 u32 len;
623 u32 ampdu_status;
624 u32 rate_n_flags;
625
626 /**
627 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
628 * REPLY_RX: physical layer info is in this buffer
629 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
630 * command and cached in il->last_phy_res
631 *
632 * Here we set up local variables depending on which command is
633 * received.
634 */
635 if (pkt->hdr.cmd == REPLY_RX) {
636 phy_res = (struct il_rx_phy_res *)pkt->u.raw;
637 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
638 + phy_res->cfg_phy_cnt);
639
640 len = le16_to_cpu(phy_res->byte_count);
641 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
642 phy_res->cfg_phy_cnt + len);
643 ampdu_status = le32_to_cpu(rx_pkt_status);
644 } else {
645 if (!il->_4965.last_phy_res_valid) {
646 IL_ERR("MPDU frame without cached PHY data\n");
647 return;
648 }
649 phy_res = &il->_4965.last_phy_res;
650 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
651 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
652 len = le16_to_cpu(amsdu->byte_count);
653 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
654 ampdu_status = il4965_translate_rx_status(il,
655 le32_to_cpu(rx_pkt_status));
656 }
657
658 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
659 D_DROP("dsp size out of range [0,20]: %d/n",
660 phy_res->cfg_phy_cnt);
661 return;
662 }
663
664 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
665 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
666 D_RX("Bad CRC or FIFO: 0x%08X.\n",
667 le32_to_cpu(rx_pkt_status));
668 return;
669 }
670
671 /* This will be used in several places later */
672 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
673
674 /* rx_status carries information about the packet to mac80211 */
675 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
676 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
677 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
678 rx_status.freq =
679 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
680 rx_status.band);
681 rx_status.rate_idx =
682 il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
683 rx_status.flag = 0;
684
685 /* TSF isn't reliable. In order to allow smooth user experience,
686 * this W/A doesn't propagate it to the mac80211 */
687 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
688
689 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
690
691 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
692 rx_status.signal = il4965_calc_rssi(il, phy_res);
693
694 il_dbg_log_rx_data_frame(il, len, header);
695 D_STATS("Rssi %d, TSF %llu\n",
696 rx_status.signal, (unsigned long long)rx_status.mactime);
697
698 /*
699 * "antenna number"
700 *
701 * It seems that the antenna field in the phy flags value
702 * is actually a bit field. This is undefined by radiotap,
703 * it wants an actual antenna number but I always get "7"
704 * for most legacy frames I receive indicating that the
705 * same frame was received on all three RX chains.
706 *
707 * I think this field should be removed in favor of a
708 * new 802.11n radiotap field "RX chains" that is defined
709 * as a bitmask.
710 */
711 rx_status.antenna =
712 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
713 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
714
715 /* set the preamble flag if appropriate */
716 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
717 rx_status.flag |= RX_FLAG_SHORTPRE;
718
719 /* Set up the HT phy flags */
720 if (rate_n_flags & RATE_MCS_HT_MSK)
721 rx_status.flag |= RX_FLAG_HT;
722 if (rate_n_flags & RATE_MCS_HT40_MSK)
723 rx_status.flag |= RX_FLAG_40MHZ;
724 if (rate_n_flags & RATE_MCS_SGI_MSK)
725 rx_status.flag |= RX_FLAG_SHORT_GI;
726
727 il4965_pass_packet_to_mac80211(il, header, len, ampdu_status,
728 rxb, &rx_status);
729}
730
731/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
732 * This will be used later in il_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
733void il4965_rx_reply_rx_phy(struct il_priv *il,
734 struct il_rx_buf *rxb)
735{
736 struct il_rx_pkt *pkt = rxb_addr(rxb);
737 il->_4965.last_phy_res_valid = true;
738 memcpy(&il->_4965.last_phy_res, pkt->u.raw,
739 sizeof(struct il_rx_phy_res));
740}
741
742static int il4965_get_channels_for_scan(struct il_priv *il,
743 struct ieee80211_vif *vif,
744 enum ieee80211_band band,
745 u8 is_active, u8 n_probes,
746 struct il_scan_channel *scan_ch)
747{
748 struct ieee80211_channel *chan;
749 const struct ieee80211_supported_band *sband;
750 const struct il_channel_info *ch_info;
751 u16 passive_dwell = 0;
752 u16 active_dwell = 0;
753 int added, i;
754 u16 channel;
755
756 sband = il_get_hw_mode(il, band);
757 if (!sband)
758 return 0;
759
760 active_dwell = il_get_active_dwell_time(il, band, n_probes);
761 passive_dwell = il_get_passive_dwell_time(il, band, vif);
762
763 if (passive_dwell <= active_dwell)
764 passive_dwell = active_dwell + 1;
765
766 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
767 chan = il->scan_request->channels[i];
768
769 if (chan->band != band)
770 continue;
771
772 channel = chan->hw_value;
773 scan_ch->channel = cpu_to_le16(channel);
774
775 ch_info = il_get_channel_info(il, band, channel);
776 if (!il_is_channel_valid(ch_info)) {
777 D_SCAN(
778 "Channel %d is INVALID for this band.\n",
779 channel);
780 continue;
781 }
782
783 if (!is_active || il_is_channel_passive(ch_info) ||
784 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
785 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
786 else
787 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
788
789 if (n_probes)
790 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
791
792 scan_ch->active_dwell = cpu_to_le16(active_dwell);
793 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
794
795 /* Set txpower levels to defaults */
796 scan_ch->dsp_atten = 110;
797
798 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
799 * power level:
800 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
801 */
802 if (band == IEEE80211_BAND_5GHZ)
803 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
804 else
805 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
806
807 D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n",
808 channel, le32_to_cpu(scan_ch->type),
809 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
810 "ACTIVE" : "PASSIVE",
811 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
812 active_dwell : passive_dwell);
813
814 scan_ch++;
815 added++;
816 }
817
818 D_SCAN("total channels to scan %d\n", added);
819 return added;
820}
821
af038f40
SG
822static inline u32 il4965_ant_idx_to_flags(u8 ant_idx)
823{
824 return BIT(ant_idx) << RATE_MCS_ANT_POS;
825}
826
fcb74588
SG
827int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
828{
829 struct il_host_cmd cmd = {
830 .id = REPLY_SCAN_CMD,
831 .len = sizeof(struct il_scan_cmd),
832 .flags = CMD_SIZE_HUGE,
833 };
834 struct il_scan_cmd *scan;
835 struct il_rxon_context *ctx = &il->ctx;
836 u32 rate_flags = 0;
837 u16 cmd_len;
838 u16 rx_chain = 0;
839 enum ieee80211_band band;
840 u8 n_probes = 0;
841 u8 rx_ant = il->hw_params.valid_rx_ant;
842 u8 rate;
843 bool is_active = false;
844 int chan_mod;
845 u8 active_chains;
846 u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
847 int ret;
848
849 lockdep_assert_held(&il->mutex);
850
851 if (vif)
852 ctx = il_rxon_ctx_from_vif(vif);
853
854 if (!il->scan_cmd) {
855 il->scan_cmd = kmalloc(sizeof(struct il_scan_cmd) +
856 IL_MAX_SCAN_SIZE, GFP_KERNEL);
857 if (!il->scan_cmd) {
858 D_SCAN(
859 "fail to allocate memory for scan\n");
860 return -ENOMEM;
861 }
862 }
863 scan = il->scan_cmd;
864 memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
865
866 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
867 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
868
869 if (il_is_any_associated(il)) {
870 u16 interval;
871 u32 extra;
872 u32 suspend_time = 100;
873 u32 scan_suspend_time = 100;
874
875 D_INFO("Scanning while associated...\n");
876 interval = vif->bss_conf.beacon_int;
877
878 scan->suspend_time = 0;
879 scan->max_out_time = cpu_to_le32(200 * 1024);
880 if (!interval)
881 interval = suspend_time;
882
883 extra = (suspend_time / interval) << 22;
884 scan_suspend_time = (extra |
885 ((suspend_time % interval) * 1024));
886 scan->suspend_time = cpu_to_le32(scan_suspend_time);
887 D_SCAN("suspend_time 0x%X beacon interval %d\n",
888 scan_suspend_time, interval);
889 }
890
891 if (il->scan_request->n_ssids) {
892 int i, p = 0;
893 D_SCAN("Kicking off active scan\n");
894 for (i = 0; i < il->scan_request->n_ssids; i++) {
895 /* always does wildcard anyway */
896 if (!il->scan_request->ssids[i].ssid_len)
897 continue;
898 scan->direct_scan[p].id = WLAN_EID_SSID;
899 scan->direct_scan[p].len =
900 il->scan_request->ssids[i].ssid_len;
901 memcpy(scan->direct_scan[p].ssid,
902 il->scan_request->ssids[i].ssid,
903 il->scan_request->ssids[i].ssid_len);
904 n_probes++;
905 p++;
906 }
907 is_active = true;
908 } else
909 D_SCAN("Start passive scan.\n");
910
911 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
912 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
913 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
914
915 switch (il->scan_band) {
916 case IEEE80211_BAND_2GHZ:
917 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
918 chan_mod = le32_to_cpu(
919 il->ctx.active.flags &
920 RXON_FLG_CHANNEL_MODE_MSK)
921 >> RXON_FLG_CHANNEL_MODE_POS;
922 if (chan_mod == CHANNEL_MODE_PURE_40) {
923 rate = RATE_6M_PLCP;
924 } else {
925 rate = RATE_1M_PLCP;
926 rate_flags = RATE_MCS_CCK_MSK;
927 }
928 break;
929 case IEEE80211_BAND_5GHZ:
930 rate = RATE_6M_PLCP;
931 break;
932 default:
933 IL_WARN("Invalid scan band\n");
934 return -EIO;
935 }
936
937 /*
938 * If active scanning is requested but a certain channel is
939 * marked passive, we can do active scanning if we detect
940 * transmissions.
941 *
942 * There is an issue with some firmware versions that triggers
943 * a sysassert on a "good CRC threshold" of zero (== disabled),
944 * on a radar channel even though this means that we should NOT
945 * send probes.
946 *
947 * The "good CRC threshold" is the number of frames that we
948 * need to receive during our dwell time on a channel before
949 * sending out probes -- setting this to a huge value will
950 * mean we never reach it, but at the same time work around
951 * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
952 * here instead of IL_GOOD_CRC_TH_DISABLED.
953 */
954 scan->good_CRC_th = is_active ? IL_GOOD_CRC_TH_DEFAULT :
955 IL_GOOD_CRC_TH_NEVER;
956
957 band = il->scan_band;
958
959 if (il->cfg->scan_rx_antennas[band])
960 rx_ant = il->cfg->scan_rx_antennas[band];
961
962 il->scan_tx_ant[band] = il4965_toggle_tx_ant(il,
963 il->scan_tx_ant[band],
964 scan_tx_antennas);
965 rate_flags |= il4965_ant_idx_to_flags(il->scan_tx_ant[band]);
966 scan->tx_cmd.rate_n_flags = il4965_hw_set_rate_n_flags(rate, rate_flags);
967
968 /* In power save mode use one chain, otherwise use all chains */
a6766ccd 969 if (test_bit(S_POWER_PMI, &il->status)) {
fcb74588
SG
970 /* rx_ant has been set to all valid chains previously */
971 active_chains = rx_ant &
972 ((u8)(il->chain_noise_data.active_chains));
973 if (!active_chains)
974 active_chains = rx_ant;
975
976 D_SCAN("chain_noise_data.active_chains: %u\n",
977 il->chain_noise_data.active_chains);
978
979 rx_ant = il4965_first_antenna(active_chains);
980 }
981
982 /* MIMO is not used here, but value is required */
983 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
984 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
985 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
986 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
987 scan->rx_chain = cpu_to_le16(rx_chain);
988
989 cmd_len = il_fill_probe_req(il,
990 (struct ieee80211_mgmt *)scan->data,
991 vif->addr,
992 il->scan_request->ie,
993 il->scan_request->ie_len,
994 IL_MAX_SCAN_SIZE - sizeof(*scan));
995 scan->tx_cmd.len = cpu_to_le16(cmd_len);
996
997 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
998 RXON_FILTER_BCON_AWARE_MSK);
999
1000 scan->channel_count = il4965_get_channels_for_scan(il, vif, band,
1001 is_active, n_probes,
1002 (void *)&scan->data[cmd_len]);
1003 if (scan->channel_count == 0) {
1004 D_SCAN("channel count %d\n", scan->channel_count);
1005 return -EIO;
1006 }
1007
1008 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
1009 scan->channel_count * sizeof(struct il_scan_channel);
1010 cmd.data = scan;
1011 scan->len = cpu_to_le16(cmd.len);
1012
a6766ccd 1013 set_bit(S_SCAN_HW, &il->status);
fcb74588
SG
1014
1015 ret = il_send_cmd_sync(il, &cmd);
1016 if (ret)
a6766ccd 1017 clear_bit(S_SCAN_HW, &il->status);
fcb74588
SG
1018
1019 return ret;
1020}
1021
1022int il4965_manage_ibss_station(struct il_priv *il,
1023 struct ieee80211_vif *vif, bool add)
1024{
1025 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1026
1027 if (add)
1028 return il4965_add_bssid_station(il, vif_priv->ctx,
1029 vif->bss_conf.bssid,
1030 &vif_priv->ibss_bssid_sta_id);
1031 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
1032 vif->bss_conf.bssid);
1033}
1034
1035void il4965_free_tfds_in_queue(struct il_priv *il,
1036 int sta_id, int tid, int freed)
1037{
1038 lockdep_assert_held(&il->sta_lock);
1039
1040 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1041 il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1042 else {
1043 D_TX("free more than tfds_in_queue (%u:%d)\n",
1044 il->stations[sta_id].tid[tid].tfds_in_queue,
1045 freed);
1046 il->stations[sta_id].tid[tid].tfds_in_queue = 0;
1047 }
1048}
1049
1050#define IL_TX_QUEUE_MSK 0xfffff
1051
1052static bool il4965_is_single_rx_stream(struct il_priv *il)
1053{
1054 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1055 il->current_ht_config.single_chain_sufficient;
1056}
1057
1058#define IL_NUM_RX_CHAINS_MULTIPLE 3
1059#define IL_NUM_RX_CHAINS_SINGLE 2
1060#define IL_NUM_IDLE_CHAINS_DUAL 2
1061#define IL_NUM_IDLE_CHAINS_SINGLE 1
1062
1063/*
1064 * Determine how many receiver/antenna chains to use.
1065 *
1066 * More provides better reception via diversity. Fewer saves power
1067 * at the expense of throughput, but only when not in powersave to
1068 * start with.
1069 *
1070 * MIMO (dual stream) requires at least 2, but works better with 3.
1071 * This does not determine *which* chains to use, just how many.
1072 */
1073static int il4965_get_active_rx_chain_count(struct il_priv *il)
1074{
1075 /* # of Rx chains to use when expecting MIMO. */
1076 if (il4965_is_single_rx_stream(il))
1077 return IL_NUM_RX_CHAINS_SINGLE;
1078 else
1079 return IL_NUM_RX_CHAINS_MULTIPLE;
1080}
1081
1082/*
1083 * When we are in power saving mode, unless device support spatial
1084 * multiplexing power save, use the active count for rx chain count.
1085 */
1086static int
1087il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
1088{
1089 /* # Rx chains when idling, depending on SMPS mode */
1090 switch (il->current_ht_config.smps) {
1091 case IEEE80211_SMPS_STATIC:
1092 case IEEE80211_SMPS_DYNAMIC:
1093 return IL_NUM_IDLE_CHAINS_SINGLE;
1094 case IEEE80211_SMPS_OFF:
1095 return active_cnt;
1096 default:
1097 WARN(1, "invalid SMPS mode %d",
1098 il->current_ht_config.smps);
1099 return active_cnt;
1100 }
1101}
1102
1103/* up to 4 chains */
1104static u8 il4965_count_chain_bitmap(u32 chain_bitmap)
1105{
1106 u8 res;
1107 res = (chain_bitmap & BIT(0)) >> 0;
1108 res += (chain_bitmap & BIT(1)) >> 1;
1109 res += (chain_bitmap & BIT(2)) >> 2;
1110 res += (chain_bitmap & BIT(3)) >> 3;
1111 return res;
1112}
1113
1114/**
1115 * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1116 *
1117 * Selects how many and which Rx receivers/antennas/chains to use.
1118 * This should not be used for scan command ... it puts data in wrong place.
1119 */
1120void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
1121{
1122 bool is_single = il4965_is_single_rx_stream(il);
a6766ccd 1123 bool is_cam = !test_bit(S_POWER_PMI, &il->status);
fcb74588
SG
1124 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1125 u32 active_chains;
1126 u16 rx_chain;
1127
1128 /* Tell uCode which antennas are actually connected.
1129 * Before first association, we assume all antennas are connected.
1130 * Just after first association, il4965_chain_noise_calibration()
1131 * checks which antennas actually *are* connected. */
1132 if (il->chain_noise_data.active_chains)
1133 active_chains = il->chain_noise_data.active_chains;
1134 else
1135 active_chains = il->hw_params.valid_rx_ant;
1136
1137 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1138
1139 /* How many receivers should we use? */
1140 active_rx_cnt = il4965_get_active_rx_chain_count(il);
1141 idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
1142
1143
1144 /* correct rx chain count according hw settings
1145 * and chain noise calibration
1146 */
1147 valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
1148 if (valid_rx_cnt < active_rx_cnt)
1149 active_rx_cnt = valid_rx_cnt;
1150
1151 if (valid_rx_cnt < idle_rx_cnt)
1152 idle_rx_cnt = valid_rx_cnt;
1153
1154 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1155 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1156
1157 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1158
1159 if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
1160 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1161 else
1162 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1163
1164 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n",
1165 ctx->staging.rx_chain,
1166 active_rx_cnt, idle_rx_cnt);
1167
1168 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1169 active_rx_cnt < idle_rx_cnt);
1170}
1171
1172u8 il4965_toggle_tx_ant(struct il_priv *il, u8 ant, u8 valid)
1173{
1174 int i;
1175 u8 ind = ant;
1176
1177 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1178 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1179 if (valid & BIT(ind))
1180 return ind;
1181 }
1182 return ant;
1183}
1184
1185static const char *il4965_get_fh_string(int cmd)
1186{
1187 switch (cmd) {
1188 IL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1189 IL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1190 IL_CMD(FH_RSCSR_CHNL0_WPTR);
1191 IL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1192 IL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1193 IL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1194 IL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1195 IL_CMD(FH_TSSR_TX_STATUS_REG);
1196 IL_CMD(FH_TSSR_TX_ERROR_REG);
1197 default:
1198 return "UNKNOWN";
1199 }
1200}
1201
1202int il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1203{
1204 int i;
1205#ifdef CONFIG_IWLEGACY_DEBUG
1206 int pos = 0;
1207 size_t bufsz = 0;
1208#endif
1209 static const u32 fh_tbl[] = {
1210 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1211 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1212 FH_RSCSR_CHNL0_WPTR,
1213 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1214 FH_MEM_RSSR_SHARED_CTRL_REG,
1215 FH_MEM_RSSR_RX_STATUS_REG,
1216 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1217 FH_TSSR_TX_STATUS_REG,
1218 FH_TSSR_TX_ERROR_REG
1219 };
1220#ifdef CONFIG_IWLEGACY_DEBUG
1221 if (display) {
1222 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1223 *buf = kmalloc(bufsz, GFP_KERNEL);
1224 if (!*buf)
1225 return -ENOMEM;
1226 pos += scnprintf(*buf + pos, bufsz - pos,
1227 "FH register values:\n");
1228 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1229 pos += scnprintf(*buf + pos, bufsz - pos,
1230 " %34s: 0X%08x\n",
1231 il4965_get_fh_string(fh_tbl[i]),
1232 il_rd(il, fh_tbl[i]));
1233 }
1234 return pos;
1235 }
1236#endif
1237 IL_ERR("FH register values:\n");
1238 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1239 IL_ERR(" %34s: 0X%08x\n",
1240 il4965_get_fh_string(fh_tbl[i]),
1241 il_rd(il, fh_tbl[i]));
1242 }
1243 return 0;
1244}
a1751b22
SG
1245void il4965_rx_missed_beacon_notif(struct il_priv *il,
1246 struct il_rx_buf *rxb)
1247
1248{
1249 struct il_rx_pkt *pkt = rxb_addr(rxb);
1250 struct il_missed_beacon_notif *missed_beacon;
1251
1252 missed_beacon = &pkt->u.missed_beacon;
1253 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
1254 il->missed_beacon_threshold) {
1255 D_CALIB(
1256 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
1257 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
1258 le32_to_cpu(missed_beacon->total_missed_becons),
1259 le32_to_cpu(missed_beacon->num_recvd_beacons),
1260 le32_to_cpu(missed_beacon->num_expected_beacons));
a6766ccd 1261 if (!test_bit(S_SCANNING, &il->status))
a1751b22
SG
1262 il4965_init_sensitivity(il);
1263 }
1264}
1265
1266/* Calculate noise level, based on measurements during network silence just
1267 * before arriving beacon. This measurement can be done only if we know
1268 * exactly when to expect beacons, therefore only when we're associated. */
1269static void il4965_rx_calc_noise(struct il_priv *il)
1270{
1271 struct stats_rx_non_phy *rx_info;
1272 int num_active_rx = 0;
1273 int total_silence = 0;
1274 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
1275 int last_rx_noise;
1276
1277 rx_info = &(il->_4965.stats.rx.general);
1278 bcn_silence_a =
1279 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
1280 bcn_silence_b =
1281 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
1282 bcn_silence_c =
1283 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
1284
1285 if (bcn_silence_a) {
1286 total_silence += bcn_silence_a;
1287 num_active_rx++;
1288 }
1289 if (bcn_silence_b) {
1290 total_silence += bcn_silence_b;
1291 num_active_rx++;
1292 }
1293 if (bcn_silence_c) {
1294 total_silence += bcn_silence_c;
1295 num_active_rx++;
1296 }
1297
1298 /* Average among active antennas */
1299 if (num_active_rx)
1300 last_rx_noise = (total_silence / num_active_rx) - 107;
1301 else
1302 last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
1303
1304 D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
1305 bcn_silence_a, bcn_silence_b, bcn_silence_c,
1306 last_rx_noise);
1307}
1308
1309#ifdef CONFIG_IWLEGACY_DEBUGFS
1310/*
1311 * based on the assumption of all stats counter are in DWORD
1312 * FIXME: This function is for debugging, do not deal with
1313 * the case of counters roll-over.
1314 */
1315static void il4965_accumulative_stats(struct il_priv *il,
1316 __le32 *stats)
1317{
1318 int i, size;
1319 __le32 *prev_stats;
1320 u32 *accum_stats;
1321 u32 *delta, *max_delta;
1322 struct stats_general_common *general, *accum_general;
1323 struct stats_tx *tx, *accum_tx;
1324
1325 prev_stats = (__le32 *)&il->_4965.stats;
1326 accum_stats = (u32 *)&il->_4965.accum_stats;
1327 size = sizeof(struct il_notif_stats);
1328 general = &il->_4965.stats.general.common;
1329 accum_general = &il->_4965.accum_stats.general.common;
1330 tx = &il->_4965.stats.tx;
1331 accum_tx = &il->_4965.accum_stats.tx;
1332 delta = (u32 *)&il->_4965.delta_stats;
1333 max_delta = (u32 *)&il->_4965.max_delta;
1334
1335 for (i = sizeof(__le32); i < size;
1336 i += sizeof(__le32), stats++, prev_stats++, delta++,
1337 max_delta++, accum_stats++) {
1338 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
1339 *delta = (le32_to_cpu(*stats) -
1340 le32_to_cpu(*prev_stats));
1341 *accum_stats += *delta;
1342 if (*delta > *max_delta)
1343 *max_delta = *delta;
1344 }
1345 }
1346
1347 /* reset accumulative stats for "no-counter" type stats */
1348 accum_general->temperature = general->temperature;
1349 accum_general->ttl_timestamp = general->ttl_timestamp;
1350}
1351#endif
1352
1353#define REG_RECALIB_PERIOD (60)
1354
1355void il4965_rx_stats(struct il_priv *il,
1356 struct il_rx_buf *rxb)
1357{
1358 int change;
1359 struct il_rx_pkt *pkt = rxb_addr(rxb);
1360
1361 D_RX(
1362 "Statistics notification received (%d vs %d).\n",
1363 (int)sizeof(struct il_notif_stats),
1364 le32_to_cpu(pkt->len_n_flags) &
1365 FH_RSCSR_FRAME_SIZE_MSK);
1366
1367 change = ((il->_4965.stats.general.common.temperature !=
1368 pkt->u.stats.general.common.temperature) ||
1369 ((il->_4965.stats.flag &
1370 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
1371 (pkt->u.stats.flag &
1372 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
1373#ifdef CONFIG_IWLEGACY_DEBUGFS
1374 il4965_accumulative_stats(il, (__le32 *)&pkt->u.stats);
1375#endif
1376
1377 /* TODO: reading some of stats is unneeded */
1378 memcpy(&il->_4965.stats, &pkt->u.stats,
1379 sizeof(il->_4965.stats));
1380
a6766ccd 1381 set_bit(S_STATISTICS, &il->status);
a1751b22
SG
1382
1383 /* Reschedule the stats timer to occur in
1384 * REG_RECALIB_PERIOD seconds to ensure we get a
1385 * thermal update even if the uCode doesn't give
1386 * us one */
1387 mod_timer(&il->stats_periodic, jiffies +
1388 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
1389
a6766ccd 1390 if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
a1751b22
SG
1391 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
1392 il4965_rx_calc_noise(il);
1393 queue_work(il->workqueue, &il->run_time_calib_work);
1394 }
1395 if (il->cfg->ops->lib->temp_ops.temperature && change)
1396 il->cfg->ops->lib->temp_ops.temperature(il);
1397}
1398
1399void il4965_reply_stats(struct il_priv *il,
1400 struct il_rx_buf *rxb)
1401{
1402 struct il_rx_pkt *pkt = rxb_addr(rxb);
1403
1404 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
1405#ifdef CONFIG_IWLEGACY_DEBUGFS
1406 memset(&il->_4965.accum_stats, 0,
1407 sizeof(struct il_notif_stats));
1408 memset(&il->_4965.delta_stats, 0,
1409 sizeof(struct il_notif_stats));
1410 memset(&il->_4965.max_delta, 0,
1411 sizeof(struct il_notif_stats));
1412#endif
1413 D_RX("Statistics have been cleared\n");
1414 }
1415 il4965_rx_stats(il, rxb);
1416}
1417
8f29b456
SG
1418
1419/*
1420 * mac80211 queues, ACs, hardware queues, FIFOs.
1421 *
1422 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
1423 *
1424 * Mac80211 uses the following numbers, which we get as from it
1425 * by way of skb_get_queue_mapping(skb):
1426 *
1427 * VO 0
1428 * VI 1
1429 * BE 2
1430 * BK 3
1431 *
1432 *
1433 * Regular (not A-MPDU) frames are put into hardware queues corresponding
1434 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
1435 * own queue per aggregation session (RA/TID combination), such queues are
1436 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
1437 * order to map frames to the right queue, we also need an AC->hw queue
1438 * mapping. This is implemented here.
1439 *
1440 * Due to the way hw queues are set up (by the hw specific modules like
af038f40 1441 * 4965.c), the AC->hw queue mapping is the identity
8f29b456
SG
1442 * mapping.
1443 */
1444
a1751b22
SG
1445static const u8 tid_to_ac[] = {
1446 IEEE80211_AC_BE,
1447 IEEE80211_AC_BK,
1448 IEEE80211_AC_BK,
1449 IEEE80211_AC_BE,
1450 IEEE80211_AC_VI,
1451 IEEE80211_AC_VI,
1452 IEEE80211_AC_VO,
1453 IEEE80211_AC_VO
1454};
1455
1456static inline int il4965_get_ac_from_tid(u16 tid)
1457{
1458 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1459 return tid_to_ac[tid];
1460
1461 /* no support for TIDs 8-15 yet */
1462 return -EINVAL;
1463}
1464
1465static inline int
1466il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
1467{
1468 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1469 return ctx->ac_to_fifo[tid_to_ac[tid]];
1470
1471 /* no support for TIDs 8-15 yet */
1472 return -EINVAL;
1473}
1474
1475/*
1476 * handle build REPLY_TX command notification.
1477 */
1478static void il4965_tx_cmd_build_basic(struct il_priv *il,
1479 struct sk_buff *skb,
1480 struct il_tx_cmd *tx_cmd,
1481 struct ieee80211_tx_info *info,
1482 struct ieee80211_hdr *hdr,
1483 u8 std_id)
1484{
1485 __le16 fc = hdr->frame_control;
1486 __le32 tx_flags = tx_cmd->tx_flags;
1487
1488 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1489 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1490 tx_flags |= TX_CMD_FLG_ACK_MSK;
1491 if (ieee80211_is_mgmt(fc))
1492 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1493 if (ieee80211_is_probe_resp(fc) &&
1494 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1495 tx_flags |= TX_CMD_FLG_TSF_MSK;
1496 } else {
1497 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1498 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1499 }
1500
1501 if (ieee80211_is_back_req(fc))
1502 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
1503
1504 tx_cmd->sta_id = std_id;
1505 if (ieee80211_has_morefrags(fc))
1506 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1507
1508 if (ieee80211_is_data_qos(fc)) {
1509 u8 *qc = ieee80211_get_qos_ctl(hdr);
1510 tx_cmd->tid_tspec = qc[0] & 0xf;
1511 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1512 } else {
1513 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1514 }
1515
1516 il_tx_cmd_protection(il, info, fc, &tx_flags);
1517
1518 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1519 if (ieee80211_is_mgmt(fc)) {
1520 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1521 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
1522 else
1523 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
1524 } else {
1525 tx_cmd->timeout.pm_frame_timeout = 0;
1526 }
1527
1528 tx_cmd->driver_txop = 0;
1529 tx_cmd->tx_flags = tx_flags;
1530 tx_cmd->next_frame_len = 0;
1531}
1532
1533#define RTS_DFAULT_RETRY_LIMIT 60
1534
1535static void il4965_tx_cmd_build_rate(struct il_priv *il,
1536 struct il_tx_cmd *tx_cmd,
1537 struct ieee80211_tx_info *info,
1538 __le16 fc)
1539{
1540 u32 rate_flags;
1541 int rate_idx;
1542 u8 rts_retry_limit;
1543 u8 data_retry_limit;
1544 u8 rate_plcp;
1545
1546 /* Set retry limit on DATA packets and Probe Responses*/
1547 if (ieee80211_is_probe_resp(fc))
1548 data_retry_limit = 3;
1549 else
1550 data_retry_limit = IL4965_DEFAULT_TX_RETRY;
1551 tx_cmd->data_retry_limit = data_retry_limit;
1552
1553 /* Set retry limit on RTS packets */
1554 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
1555 if (data_retry_limit < rts_retry_limit)
1556 rts_retry_limit = data_retry_limit;
1557 tx_cmd->rts_retry_limit = rts_retry_limit;
1558
1559 /* DATA packets will use the uCode station table for rate/antenna
1560 * selection */
1561 if (ieee80211_is_data(fc)) {
1562 tx_cmd->initial_rate_idx = 0;
1563 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
1564 return;
1565 }
1566
1567 /**
1568 * If the current TX rate stored in mac80211 has the MCS bit set, it's
1569 * not really a TX rate. Thus, we use the lowest supported rate for
1570 * this band. Also use the lowest supported rate if the stored rate
1571 * idx is invalid.
1572 */
1573 rate_idx = info->control.rates[0].idx;
1574 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) ||
1575 rate_idx < 0 || rate_idx > RATE_COUNT_LEGACY)
1576 rate_idx = rate_lowest_index(&il->bands[info->band],
1577 info->control.sta);
1578 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
1579 if (info->band == IEEE80211_BAND_5GHZ)
1580 rate_idx += IL_FIRST_OFDM_RATE;
1581 /* Get PLCP rate for tx_cmd->rate_n_flags */
1582 rate_plcp = il_rates[rate_idx].plcp;
1583 /* Zero out flags for this packet */
1584 rate_flags = 0;
1585
1586 /* Set CCK flag as needed */
1587 if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
1588 rate_flags |= RATE_MCS_CCK_MSK;
1589
1590 /* Set up antennas */
1591 il->mgmt_tx_ant = il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
1592 il->hw_params.valid_tx_ant);
1593
1594 rate_flags |= il4965_ant_idx_to_flags(il->mgmt_tx_ant);
1595
1596 /* Set the rate in the TX cmd */
1597 tx_cmd->rate_n_flags = il4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
1598}
1599
1600static void il4965_tx_cmd_build_hwcrypto(struct il_priv *il,
1601 struct ieee80211_tx_info *info,
1602 struct il_tx_cmd *tx_cmd,
1603 struct sk_buff *skb_frag,
1604 int sta_id)
1605{
1606 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1607
1608 switch (keyconf->cipher) {
1609 case WLAN_CIPHER_SUITE_CCMP:
1610 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1611 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1612 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1613 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1614 D_TX("tx_cmd with AES hwcrypto\n");
1615 break;
1616
1617 case WLAN_CIPHER_SUITE_TKIP:
1618 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
1619 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
1620 D_TX("tx_cmd with tkip hwcrypto\n");
1621 break;
1622
1623 case WLAN_CIPHER_SUITE_WEP104:
1624 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
1625 /* fall through */
1626 case WLAN_CIPHER_SUITE_WEP40:
1627 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
1628 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
1629
1630 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
1631
1632 D_TX("Configuring packet for WEP encryption "
1633 "with key %d\n", keyconf->keyidx);
1634 break;
1635
1636 default:
1637 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
1638 break;
1639 }
1640}
1641
1642/*
1643 * start REPLY_TX command process
1644 */
1645int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
1646{
1647 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1648 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1649 struct ieee80211_sta *sta = info->control.sta;
1650 struct il_station_priv *sta_priv = NULL;
1651 struct il_tx_queue *txq;
1652 struct il_queue *q;
1653 struct il_device_cmd *out_cmd;
1654 struct il_cmd_meta *out_meta;
1655 struct il_tx_cmd *tx_cmd;
1656 struct il_rxon_context *ctx = &il->ctx;
1657 int txq_id;
1658 dma_addr_t phys_addr;
1659 dma_addr_t txcmd_phys;
1660 dma_addr_t scratch_phys;
1661 u16 len, firstlen, secondlen;
1662 u16 seq_number = 0;
1663 __le16 fc;
1664 u8 hdr_len;
1665 u8 sta_id;
1666 u8 wait_write_ptr = 0;
1667 u8 tid = 0;
1668 u8 *qc = NULL;
1669 unsigned long flags;
1670 bool is_agg = false;
1671
1672 if (info->control.vif)
1673 ctx = il_rxon_ctx_from_vif(info->control.vif);
1674
1675 spin_lock_irqsave(&il->lock, flags);
1676 if (il_is_rfkill(il)) {
1677 D_DROP("Dropping - RF KILL\n");
1678 goto drop_unlock;
1679 }
1680
1681 fc = hdr->frame_control;
1682
1683#ifdef CONFIG_IWLEGACY_DEBUG
1684 if (ieee80211_is_auth(fc))
1685 D_TX("Sending AUTH frame\n");
1686 else if (ieee80211_is_assoc_req(fc))
1687 D_TX("Sending ASSOC frame\n");
1688 else if (ieee80211_is_reassoc_req(fc))
1689 D_TX("Sending REASSOC frame\n");
1690#endif
1691
1692 hdr_len = ieee80211_hdrlen(fc);
1693
1694 /* For management frames use broadcast id to do not break aggregation */
1695 if (!ieee80211_is_data(fc))
1696 sta_id = ctx->bcast_sta_id;
1697 else {
1698 /* Find idx into station table for destination station */
1699 sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
1700
1701 if (sta_id == IL_INVALID_STATION) {
1702 D_DROP("Dropping - INVALID STATION: %pM\n",
1703 hdr->addr1);
1704 goto drop_unlock;
1705 }
1706 }
1707
1708 D_TX("station Id %d\n", sta_id);
1709
1710 if (sta)
1711 sta_priv = (void *)sta->drv_priv;
1712
1713 if (sta_priv && sta_priv->asleep &&
1714 (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
1715 /*
1716 * This sends an asynchronous command to the device,
1717 * but we can rely on it being processed before the
1718 * next frame is processed -- and the next frame to
1719 * this station is the one that will consume this
1720 * counter.
1721 * For now set the counter to just 1 since we do not
1722 * support uAPSD yet.
1723 */
1724 il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
1725 }
1726
1727 /*
1728 * Send this frame after DTIM -- there's a special queue
1729 * reserved for this for contexts that support AP mode.
1730 */
1731 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1732 txq_id = ctx->mcast_queue;
1733 /*
1734 * The microcode will clear the more data
1735 * bit in the last frame it transmits.
1736 */
1737 hdr->frame_control |=
1738 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1739 } else
1740 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
1741
1742 /* irqs already disabled/saved above when locking il->lock */
1743 spin_lock(&il->sta_lock);
1744
1745 if (ieee80211_is_data_qos(fc)) {
1746 qc = ieee80211_get_qos_ctl(hdr);
1747 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1748 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
1749 spin_unlock(&il->sta_lock);
1750 goto drop_unlock;
1751 }
1752 seq_number = il->stations[sta_id].tid[tid].seq_number;
1753 seq_number &= IEEE80211_SCTL_SEQ;
1754 hdr->seq_ctrl = hdr->seq_ctrl &
1755 cpu_to_le16(IEEE80211_SCTL_FRAG);
1756 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1757 seq_number += 0x10;
1758 /* aggregation is on for this <sta,tid> */
1759 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1760 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
1761 txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
1762 is_agg = true;
1763 }
1764 }
1765
1766 txq = &il->txq[txq_id];
1767 q = &txq->q;
1768
1769 if (unlikely(il_queue_space(q) < q->high_mark)) {
1770 spin_unlock(&il->sta_lock);
1771 goto drop_unlock;
1772 }
1773
1774 if (ieee80211_is_data_qos(fc)) {
1775 il->stations[sta_id].tid[tid].tfds_in_queue++;
1776 if (!ieee80211_has_morefrags(fc))
1777 il->stations[sta_id].tid[tid].seq_number = seq_number;
1778 }
1779
1780 spin_unlock(&il->sta_lock);
1781
1782 /* Set up driver data for this TFD */
1783 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
1784 txq->txb[q->write_ptr].skb = skb;
1785 txq->txb[q->write_ptr].ctx = ctx;
1786
1787 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1788 out_cmd = txq->cmd[q->write_ptr];
1789 out_meta = &txq->meta[q->write_ptr];
1790 tx_cmd = &out_cmd->cmd.tx;
1791 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1792 memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
1793
1794 /*
1795 * Set up the Tx-command (not MAC!) header.
1796 * Store the chosen Tx queue and TFD idx within the sequence field;
1797 * after Tx, uCode's Tx response will return this value so driver can
1798 * locate the frame within the tx queue and do post-tx processing.
1799 */
1800 out_cmd->hdr.cmd = REPLY_TX;
1801 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1802 IDX_TO_SEQ(q->write_ptr)));
1803
1804 /* Copy MAC header from skb into command buffer */
1805 memcpy(tx_cmd->hdr, hdr, hdr_len);
1806
1807
1808 /* Total # bytes to be transmitted */
1809 len = (u16)skb->len;
1810 tx_cmd->len = cpu_to_le16(len);
1811
1812 if (info->control.hw_key)
1813 il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
1814
1815 /* TODO need this for burst mode later on */
1816 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1817 il_dbg_log_tx_data_frame(il, len, hdr);
1818
1819 il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
1820
1821 il_update_stats(il, true, fc, len);
1822 /*
1823 * Use the first empty entry in this queue's command buffer array
1824 * to contain the Tx command and MAC header concatenated together
1825 * (payload data will be in another buffer).
1826 * Size of this varies, due to varying MAC header length.
1827 * If end is not dword aligned, we'll have 2 extra bytes at the end
1828 * of the MAC header (device reads on dword boundaries).
1829 * We'll tell device about this padding later.
1830 */
1831 len = sizeof(struct il_tx_cmd) +
1832 sizeof(struct il_cmd_header) + hdr_len;
1833 firstlen = (len + 3) & ~3;
1834
1835 /* Tell NIC about any 2-byte padding after MAC header */
1836 if (firstlen != len)
1837 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1838
1839 /* Physical address of this Tx command's header (not MAC header!),
1840 * within command buffer array. */
1841 txcmd_phys = pci_map_single(il->pci_dev,
1842 &out_cmd->hdr, firstlen,
1843 PCI_DMA_BIDIRECTIONAL);
1844 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1845 dma_unmap_len_set(out_meta, len, firstlen);
1846 /* Add buffer containing Tx command and MAC(!) header to TFD's
1847 * first entry */
1848 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq,
1849 txcmd_phys, firstlen, 1, 0);
1850
1851 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1852 txq->need_update = 1;
1853 } else {
1854 wait_write_ptr = 1;
1855 txq->need_update = 0;
1856 }
1857
1858 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1859 * if any (802.11 null frames have no payload). */
1860 secondlen = skb->len - hdr_len;
1861 if (secondlen > 0) {
1862 phys_addr = pci_map_single(il->pci_dev, skb->data + hdr_len,
1863 secondlen, PCI_DMA_TODEVICE);
1864 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq,
1865 phys_addr, secondlen,
1866 0, 0);
1867 }
1868
1869 scratch_phys = txcmd_phys + sizeof(struct il_cmd_header) +
1870 offsetof(struct il_tx_cmd, scratch);
1871
1872 /* take back ownership of DMA buffer to enable update */
1873 pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys,
1874 firstlen, PCI_DMA_BIDIRECTIONAL);
1875 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1876 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
1877
1878 D_TX("sequence nr = 0X%x\n",
1879 le16_to_cpu(out_cmd->hdr.sequence));
1880 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1881 il_print_hex_dump(il, IL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1882 il_print_hex_dump(il, IL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1883
1884 /* Set up entry for this TFD in Tx byte-count array */
1885 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1886 il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
1887 le16_to_cpu(tx_cmd->len));
1888
1889 pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys,
1890 firstlen, PCI_DMA_BIDIRECTIONAL);
1891
1892 /* Tell device the write idx *just past* this latest filled TFD */
1893 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
1894 il_txq_update_write_ptr(il, txq);
1895 spin_unlock_irqrestore(&il->lock, flags);
1896
1897 /*
1898 * At this point the frame is "transmitted" successfully
1899 * and we will get a TX status notification eventually,
1900 * regardless of the value of ret. "ret" only indicates
1901 * whether or not we should update the write pointer.
1902 */
1903
1904 /*
1905 * Avoid atomic ops if it isn't an associated client.
1906 * Also, if this is a packet for aggregation, don't
1907 * increase the counter because the ucode will stop
1908 * aggregation queues when their respective station
1909 * goes to sleep.
1910 */
1911 if (sta_priv && sta_priv->client && !is_agg)
1912 atomic_inc(&sta_priv->pending_frames);
1913
1914 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
1915 if (wait_write_ptr) {
1916 spin_lock_irqsave(&il->lock, flags);
1917 txq->need_update = 1;
1918 il_txq_update_write_ptr(il, txq);
1919 spin_unlock_irqrestore(&il->lock, flags);
1920 } else {
1921 il_stop_queue(il, txq);
1922 }
1923 }
1924
1925 return 0;
1926
1927drop_unlock:
1928 spin_unlock_irqrestore(&il->lock, flags);
1929 return -1;
1930}
1931
1932static inline int il4965_alloc_dma_ptr(struct il_priv *il,
1933 struct il_dma_ptr *ptr, size_t size)
1934{
1935 ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
1936 GFP_KERNEL);
1937 if (!ptr->addr)
1938 return -ENOMEM;
1939 ptr->size = size;
1940 return 0;
1941}
1942
1943static inline void il4965_free_dma_ptr(struct il_priv *il,
1944 struct il_dma_ptr *ptr)
1945{
1946 if (unlikely(!ptr->addr))
1947 return;
1948
1949 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1950 memset(ptr, 0, sizeof(*ptr));
1951}
1952
1953/**
1954 * il4965_hw_txq_ctx_free - Free TXQ Context
1955 *
1956 * Destroy all TX DMA queues and structures
1957 */
1958void il4965_hw_txq_ctx_free(struct il_priv *il)
1959{
1960 int txq_id;
1961
1962 /* Tx queues */
1963 if (il->txq) {
1964 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1965 if (txq_id == il->cmd_queue)
1966 il_cmd_queue_free(il);
1967 else
1968 il_tx_queue_free(il, txq_id);
1969 }
1970 il4965_free_dma_ptr(il, &il->kw);
1971
1972 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
1973
1974 /* free tx queue structure */
1975 il_txq_mem(il);
1976}
1977
1978/**
1979 * il4965_txq_ctx_alloc - allocate TX queue context
1980 * Allocate all Tx DMA structures and initialize them
1981 *
1982 * @param il
1983 * @return error code
1984 */
1985int il4965_txq_ctx_alloc(struct il_priv *il)
1986{
1987 int ret;
1988 int txq_id, slots_num;
1989 unsigned long flags;
1990
1991 /* Free all tx/cmd queues and keep-warm buffer */
1992 il4965_hw_txq_ctx_free(il);
1993
1994 ret = il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
1995 il->hw_params.scd_bc_tbls_size);
1996 if (ret) {
1997 IL_ERR("Scheduler BC Table allocation failed\n");
1998 goto error_bc_tbls;
1999 }
2000 /* Alloc keep-warm buffer */
2001 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
2002 if (ret) {
2003 IL_ERR("Keep Warm allocation failed\n");
2004 goto error_kw;
2005 }
2006
2007 /* allocate tx queue structure */
2008 ret = il_alloc_txq_mem(il);
2009 if (ret)
2010 goto error;
2011
2012 spin_lock_irqsave(&il->lock, flags);
2013
2014 /* Turn off all Tx DMA fifos */
2015 il4965_txq_set_sched(il, 0);
2016
2017 /* Tell NIC where to find the "keep warm" buffer */
2018 il_wr(il, FH_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2019
2020 spin_unlock_irqrestore(&il->lock, flags);
2021
2022 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
2023 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2024 slots_num = (txq_id == il->cmd_queue) ?
2025 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
2026 ret = il_tx_queue_init(il,
2027 &il->txq[txq_id], slots_num,
2028 txq_id);
2029 if (ret) {
2030 IL_ERR("Tx %d queue init failed\n", txq_id);
2031 goto error;
2032 }
2033 }
2034
2035 return ret;
2036
2037 error:
2038 il4965_hw_txq_ctx_free(il);
2039 il4965_free_dma_ptr(il, &il->kw);
2040 error_kw:
2041 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
2042 error_bc_tbls:
2043 return ret;
2044}
2045
2046void il4965_txq_ctx_reset(struct il_priv *il)
2047{
2048 int txq_id, slots_num;
2049 unsigned long flags;
2050
2051 spin_lock_irqsave(&il->lock, flags);
2052
2053 /* Turn off all Tx DMA fifos */
2054 il4965_txq_set_sched(il, 0);
2055
2056 /* Tell NIC where to find the "keep warm" buffer */
2057 il_wr(il, FH_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2058
2059 spin_unlock_irqrestore(&il->lock, flags);
2060
2061 /* Alloc and init all Tx queues, including the command queue (#4) */
2062 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2063 slots_num = txq_id == il->cmd_queue ?
2064 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
2065 il_tx_queue_reset(il, &il->txq[txq_id],
2066 slots_num, txq_id);
2067 }
2068}
2069
2070/**
2071 * il4965_txq_ctx_stop - Stop all Tx DMA channels
2072 */
2073void il4965_txq_ctx_stop(struct il_priv *il)
2074{
2075 int ch, txq_id;
2076 unsigned long flags;
2077
2078 /* Turn off all Tx DMA fifos */
2079 spin_lock_irqsave(&il->lock, flags);
2080
2081 il4965_txq_set_sched(il, 0);
2082
2083 /* Stop each Tx DMA channel, and wait for it to be idle */
2084 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
2085 il_wr(il,
2086 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
2087 if (il_poll_bit(il, FH_TSSR_TX_STATUS_REG,
2088 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
2089 1000))
2090 IL_ERR("Failing on timeout while stopping"
2091 " DMA channel %d [0x%08x]", ch,
2092 il_rd(il,
2093 FH_TSSR_TX_STATUS_REG));
2094 }
2095 spin_unlock_irqrestore(&il->lock, flags);
2096
2097 if (!il->txq)
2098 return;
2099
2100 /* Unmap DMA from host system and free skb's */
2101 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2102 if (txq_id == il->cmd_queue)
2103 il_cmd_queue_unmap(il);
2104 else
2105 il_tx_queue_unmap(il, txq_id);
2106}
2107
2108/*
2109 * Find first available (lowest unused) Tx Queue, mark it "active".
2110 * Called only when finding queue for aggregation.
2111 * Should never return anything < 7, because they should already
2112 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
2113 */
2114static int il4965_txq_ctx_activate_free(struct il_priv *il)
2115{
2116 int txq_id;
2117
2118 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2119 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
2120 return txq_id;
2121 return -1;
2122}
2123
2124/**
2125 * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
2126 */
2127static void il4965_tx_queue_stop_scheduler(struct il_priv *il,
2128 u16 txq_id)
2129{
2130 /* Simply stop the queue, but don't change any configuration;
2131 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
2132 il_wr_prph(il,
2133 IL49_SCD_QUEUE_STATUS_BITS(txq_id),
2134 (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
2135 (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2136}
2137
2138/**
2139 * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
2140 */
2141static int il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid,
2142 u16 txq_id)
2143{
2144 u32 tbl_dw_addr;
2145 u32 tbl_dw;
2146 u16 scd_q2ratid;
2147
2148 scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
2149
2150 tbl_dw_addr = il->scd_base_addr +
2151 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
2152
2153 tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
2154
2155 if (txq_id & 0x1)
2156 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
2157 else
2158 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
2159
2160 il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
2161
2162 return 0;
2163}
2164
2165/**
2166 * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
2167 *
2168 * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
2169 * i.e. it must be one of the higher queues used for aggregation
2170 */
2171static int il4965_txq_agg_enable(struct il_priv *il, int txq_id,
2172 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
2173{
2174 unsigned long flags;
2175 u16 ra_tid;
2176 int ret;
2177
2178 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2179 (IL49_FIRST_AMPDU_QUEUE +
2180 il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
2181 IL_WARN(
2182 "queue number out of range: %d, must be %d to %d\n",
2183 txq_id, IL49_FIRST_AMPDU_QUEUE,
2184 IL49_FIRST_AMPDU_QUEUE +
2185 il->cfg->base_params->num_of_ampdu_queues - 1);
2186 return -EINVAL;
2187 }
2188
2189 ra_tid = BUILD_RAxTID(sta_id, tid);
2190
2191 /* Modify device's station table to Tx this TID */
2192 ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
2193 if (ret)
2194 return ret;
2195
2196 spin_lock_irqsave(&il->lock, flags);
2197
2198 /* Stop this Tx queue before configuring it */
2199 il4965_tx_queue_stop_scheduler(il, txq_id);
2200
2201 /* Map receiver-address / traffic-ID to this queue */
2202 il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
2203
2204 /* Set this queue as a chain-building queue */
2205 il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2206
2207 /* Place first TFD at idx corresponding to start sequence number.
2208 * Assumes that ssn_idx is valid (!= 0xFFF) */
2209 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2210 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2211 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2212
2213 /* Set up Tx win size and frame limit for this queue */
2214 il_write_targ_mem(il,
2215 il->scd_base_addr + IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2216 (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
2217 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2218
2219 il_write_targ_mem(il, il->scd_base_addr +
2220 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2221 (SCD_FRAME_LIMIT << IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
2222 & IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2223
2224 il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2225
2226 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
2227 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
2228
2229 spin_unlock_irqrestore(&il->lock, flags);
2230
2231 return 0;
2232}
2233
2234
2235int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2236 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2237{
2238 int sta_id;
2239 int tx_fifo;
2240 int txq_id;
2241 int ret;
2242 unsigned long flags;
2243 struct il_tid_data *tid_data;
2244
2245 tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
2246 if (unlikely(tx_fifo < 0))
2247 return tx_fifo;
2248
2249 IL_WARN("%s on ra = %pM tid = %d\n",
2250 __func__, sta->addr, tid);
2251
2252 sta_id = il_sta_id(sta);
2253 if (sta_id == IL_INVALID_STATION) {
2254 IL_ERR("Start AGG on invalid station\n");
2255 return -ENXIO;
2256 }
2257 if (unlikely(tid >= MAX_TID_COUNT))
2258 return -EINVAL;
2259
2260 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
2261 IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
2262 return -ENXIO;
2263 }
2264
2265 txq_id = il4965_txq_ctx_activate_free(il);
2266 if (txq_id == -1) {
2267 IL_ERR("No free aggregation queue available\n");
2268 return -ENXIO;
2269 }
2270
2271 spin_lock_irqsave(&il->sta_lock, flags);
2272 tid_data = &il->stations[sta_id].tid[tid];
2273 *ssn = SEQ_TO_SN(tid_data->seq_number);
2274 tid_data->agg.txq_id = txq_id;
2275 il_set_swq_id(&il->txq[txq_id],
2276 il4965_get_ac_from_tid(tid), txq_id);
2277 spin_unlock_irqrestore(&il->sta_lock, flags);
2278
2279 ret = il4965_txq_agg_enable(il, txq_id, tx_fifo,
2280 sta_id, tid, *ssn);
2281 if (ret)
2282 return ret;
2283
2284 spin_lock_irqsave(&il->sta_lock, flags);
2285 tid_data = &il->stations[sta_id].tid[tid];
2286 if (tid_data->tfds_in_queue == 0) {
2287 D_HT("HW queue is empty\n");
2288 tid_data->agg.state = IL_AGG_ON;
2289 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2290 } else {
2291 D_HT(
2292 "HW queue is NOT empty: %d packets in HW queue\n",
2293 tid_data->tfds_in_queue);
2294 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
2295 }
2296 spin_unlock_irqrestore(&il->sta_lock, flags);
2297 return ret;
2298}
2299
2300/**
2301 * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
2302 * il->lock must be held by the caller
2303 */
2304static int il4965_txq_agg_disable(struct il_priv *il, u16 txq_id,
2305 u16 ssn_idx, u8 tx_fifo)
2306{
2307 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2308 (IL49_FIRST_AMPDU_QUEUE +
2309 il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
2310 IL_WARN(
2311 "queue number out of range: %d, must be %d to %d\n",
2312 txq_id, IL49_FIRST_AMPDU_QUEUE,
2313 IL49_FIRST_AMPDU_QUEUE +
2314 il->cfg->base_params->num_of_ampdu_queues - 1);
2315 return -EINVAL;
2316 }
2317
2318 il4965_tx_queue_stop_scheduler(il, txq_id);
2319
2320 il_clear_bits_prph(il,
2321 IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2322
2323 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2324 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2325 /* supposes that ssn_idx is valid (!= 0xFFF) */
2326 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2327
2328 il_clear_bits_prph(il,
2329 IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2330 il_txq_ctx_deactivate(il, txq_id);
2331 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
2332
2333 return 0;
2334}
2335
2336int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
2337 struct ieee80211_sta *sta, u16 tid)
2338{
2339 int tx_fifo_id, txq_id, sta_id, ssn;
2340 struct il_tid_data *tid_data;
2341 int write_ptr, read_ptr;
2342 unsigned long flags;
2343
2344 tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
2345 if (unlikely(tx_fifo_id < 0))
2346 return tx_fifo_id;
2347
2348 sta_id = il_sta_id(sta);
2349
2350 if (sta_id == IL_INVALID_STATION) {
2351 IL_ERR("Invalid station for AGG tid %d\n", tid);
2352 return -ENXIO;
2353 }
2354
2355 spin_lock_irqsave(&il->sta_lock, flags);
2356
2357 tid_data = &il->stations[sta_id].tid[tid];
2358 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
2359 txq_id = tid_data->agg.txq_id;
2360
2361 switch (il->stations[sta_id].tid[tid].agg.state) {
2362 case IL_EMPTYING_HW_QUEUE_ADDBA:
2363 /*
2364 * This can happen if the peer stops aggregation
2365 * again before we've had a chance to drain the
2366 * queue we selected previously, i.e. before the
2367 * session was really started completely.
2368 */
2369 D_HT("AGG stop before setup done\n");
2370 goto turn_off;
2371 case IL_AGG_ON:
2372 break;
2373 default:
2374 IL_WARN("Stopping AGG while state not ON or starting\n");
2375 }
2376
2377 write_ptr = il->txq[txq_id].q.write_ptr;
2378 read_ptr = il->txq[txq_id].q.read_ptr;
2379
2380 /* The queue is not empty */
2381 if (write_ptr != read_ptr) {
2382 D_HT("Stopping a non empty AGG HW QUEUE\n");
2383 il->stations[sta_id].tid[tid].agg.state =
2384 IL_EMPTYING_HW_QUEUE_DELBA;
2385 spin_unlock_irqrestore(&il->sta_lock, flags);
2386 return 0;
2387 }
2388
2389 D_HT("HW queue is empty\n");
2390 turn_off:
2391 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
2392
2393 /* do not restore/save irqs */
2394 spin_unlock(&il->sta_lock);
2395 spin_lock(&il->lock);
2396
2397 /*
2398 * the only reason this call can fail is queue number out of range,
2399 * which can happen if uCode is reloaded and all the station
2400 * information are lost. if it is outside the range, there is no need
2401 * to deactivate the uCode queue, just return "success" to allow
2402 * mac80211 to clean up it own data.
2403 */
2404 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
2405 spin_unlock_irqrestore(&il->lock, flags);
2406
2407 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2408
2409 return 0;
2410}
2411
2412int il4965_txq_check_empty(struct il_priv *il,
2413 int sta_id, u8 tid, int txq_id)
2414{
2415 struct il_queue *q = &il->txq[txq_id].q;
2416 u8 *addr = il->stations[sta_id].sta.sta.addr;
2417 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
2418 struct il_rxon_context *ctx;
2419
2420 ctx = &il->ctx;
2421
2422 lockdep_assert_held(&il->sta_lock);
2423
2424 switch (il->stations[sta_id].tid[tid].agg.state) {
2425 case IL_EMPTYING_HW_QUEUE_DELBA:
2426 /* We are reclaiming the last packet of the */
2427 /* aggregated HW queue */
2428 if (txq_id == tid_data->agg.txq_id &&
2429 q->read_ptr == q->write_ptr) {
2430 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
2431 int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
2432 D_HT(
2433 "HW queue empty: continue DELBA flow\n");
2434 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
2435 tid_data->agg.state = IL_AGG_OFF;
2436 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
2437 }
2438 break;
2439 case IL_EMPTYING_HW_QUEUE_ADDBA:
2440 /* We are reclaiming the last packet of the queue */
2441 if (tid_data->tfds_in_queue == 0) {
2442 D_HT(
2443 "HW queue empty: continue ADDBA flow\n");
2444 tid_data->agg.state = IL_AGG_ON;
2445 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
2446 }
2447 break;
2448 }
2449
2450 return 0;
2451}
2452
2453static void il4965_non_agg_tx_status(struct il_priv *il,
2454 struct il_rxon_context *ctx,
2455 const u8 *addr1)
2456{
2457 struct ieee80211_sta *sta;
2458 struct il_station_priv *sta_priv;
2459
2460 rcu_read_lock();
2461 sta = ieee80211_find_sta(ctx->vif, addr1);
2462 if (sta) {
2463 sta_priv = (void *)sta->drv_priv;
2464 /* avoid atomic ops if this isn't a client */
2465 if (sta_priv->client &&
2466 atomic_dec_return(&sta_priv->pending_frames) == 0)
2467 ieee80211_sta_block_awake(il->hw, sta, false);
2468 }
2469 rcu_read_unlock();
2470}
2471
2472static void
2473il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info,
2474 bool is_agg)
2475{
2476 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
2477
2478 if (!is_agg)
2479 il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
2480
2481 ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
2482}
2483
2484int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
2485{
2486 struct il_tx_queue *txq = &il->txq[txq_id];
2487 struct il_queue *q = &txq->q;
2488 struct il_tx_info *tx_info;
2489 int nfreed = 0;
2490 struct ieee80211_hdr *hdr;
2491
2492 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
2493 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
2494 "is out of range [0-%d] %d %d.\n", txq_id,
2495 idx, q->n_bd, q->write_ptr, q->read_ptr);
2496 return 0;
2497 }
2498
2499 for (idx = il_queue_inc_wrap(idx, q->n_bd);
2500 q->read_ptr != idx;
2501 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2502
2503 tx_info = &txq->txb[txq->q.read_ptr];
2504
2505 if (WARN_ON_ONCE(tx_info->skb == NULL))
2506 continue;
2507
2508 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
2509 if (ieee80211_is_data_qos(hdr->frame_control))
2510 nfreed++;
2511
2512 il4965_tx_status(il, tx_info,
2513 txq_id >= IL4965_FIRST_AMPDU_QUEUE);
2514 tx_info->skb = NULL;
2515
2516 il->cfg->ops->lib->txq_free_tfd(il, txq);
2517 }
2518 return nfreed;
2519}
2520
2521/**
2522 * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
2523 *
2524 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
2525 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
2526 */
2527static int il4965_tx_status_reply_compressed_ba(struct il_priv *il,
2528 struct il_ht_agg *agg,
2529 struct il_compressed_ba_resp *ba_resp)
2530
2531{
2532 int i, sh, ack;
2533 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2534 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2535 int successes = 0;
2536 struct ieee80211_tx_info *info;
2537 u64 bitmap, sent_bitmap;
2538
2539 if (unlikely(!agg->wait_for_ba)) {
2540 if (unlikely(ba_resp->bitmap))
2541 IL_ERR("Received BA when not expected\n");
2542 return -EINVAL;
2543 }
2544
2545 /* Mark that the expected block-ack response arrived */
2546 agg->wait_for_ba = 0;
2547 D_TX_REPLY("BA %d %d\n", agg->start_idx,
2548 ba_resp->seq_ctl);
2549
2550 /* Calculate shift to align block-ack bits with our Tx win bits */
2551 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
2552 if (sh < 0) /* tbw something is wrong with indices */
2553 sh += 0x100;
2554
2555 if (agg->frame_count > (64 - sh)) {
2556 D_TX_REPLY("more frames than bitmap size");
2557 return -1;
2558 }
2559
2560 /* don't use 64-bit values for now */
2561 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
2562
2563 /* check for success or failure according to the
2564 * transmitted bitmap and block-ack bitmap */
2565 sent_bitmap = bitmap & agg->bitmap;
2566
2567 /* For each frame attempted in aggregation,
2568 * update driver's record of tx frame's status. */
2569 i = 0;
2570 while (sent_bitmap) {
2571 ack = sent_bitmap & 1ULL;
2572 successes += ack;
2573 D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
2574 ack ? "ACK" : "NACK", i,
2575 (agg->start_idx + i) & 0xff,
2576 agg->start_idx + i);
2577 sent_bitmap >>= 1;
2578 ++i;
2579 }
2580
2581 D_TX_REPLY("Bitmap %llx\n",
2582 (unsigned long long)bitmap);
2583
2584 info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
2585 memset(&info->status, 0, sizeof(info->status));
2586 info->flags |= IEEE80211_TX_STAT_ACK;
2587 info->flags |= IEEE80211_TX_STAT_AMPDU;
2588 info->status.ampdu_ack_len = successes;
2589 info->status.ampdu_len = agg->frame_count;
2590 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
2591
2592 return 0;
2593}
2594
2595/**
2596 * translate ucode response to mac80211 tx status control values
2597 */
2598void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2599 struct ieee80211_tx_info *info)
2600{
2601 struct ieee80211_tx_rate *r = &info->control.rates[0];
2602
2603 info->antenna_sel_tx =
2604 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2605 if (rate_n_flags & RATE_MCS_HT_MSK)
2606 r->flags |= IEEE80211_TX_RC_MCS;
2607 if (rate_n_flags & RATE_MCS_GF_MSK)
2608 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
2609 if (rate_n_flags & RATE_MCS_HT40_MSK)
2610 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2611 if (rate_n_flags & RATE_MCS_DUP_MSK)
2612 r->flags |= IEEE80211_TX_RC_DUP_DATA;
2613 if (rate_n_flags & RATE_MCS_SGI_MSK)
2614 r->flags |= IEEE80211_TX_RC_SHORT_GI;
2615 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
2616}
2617
2618/**
2619 * il4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
2620 *
2621 * Handles block-acknowledge notification from device, which reports success
2622 * of frames sent via aggregation.
2623 */
2624void il4965_rx_reply_compressed_ba(struct il_priv *il,
2625 struct il_rx_buf *rxb)
2626{
2627 struct il_rx_pkt *pkt = rxb_addr(rxb);
2628 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
2629 struct il_tx_queue *txq = NULL;
2630 struct il_ht_agg *agg;
2631 int idx;
2632 int sta_id;
2633 int tid;
2634 unsigned long flags;
2635
2636 /* "flow" corresponds to Tx queue */
2637 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2638
2639 /* "ssn" is start of block-ack Tx win, corresponds to idx
2640 * (in Tx queue's circular buffer) of first TFD/frame in win */
2641 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
2642
2643 if (scd_flow >= il->hw_params.max_txq_num) {
2644 IL_ERR(
2645 "BUG_ON scd_flow is bigger than number of queues\n");
2646 return;
2647 }
2648
2649 txq = &il->txq[scd_flow];
2650 sta_id = ba_resp->sta_id;
2651 tid = ba_resp->tid;
2652 agg = &il->stations[sta_id].tid[tid].agg;
2653 if (unlikely(agg->txq_id != scd_flow)) {
2654 /*
2655 * FIXME: this is a uCode bug which need to be addressed,
2656 * log the information and return for now!
2657 * since it is possible happen very often and in order
2658 * not to fill the syslog, don't enable the logging by default
2659 */
2660 D_TX_REPLY(
2661 "BA scd_flow %d does not match txq_id %d\n",
2662 scd_flow, agg->txq_id);
2663 return;
2664 }
2665
2666 /* Find idx just before block-ack win */
2667 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2668
2669 spin_lock_irqsave(&il->sta_lock, flags);
2670
2671 D_TX_REPLY("REPLY_COMPRESSED_BA [%d] Received from %pM, "
2672 "sta_id = %d\n",
2673 agg->wait_for_ba,
2674 (u8 *) &ba_resp->sta_addr_lo32,
2675 ba_resp->sta_id);
2676 D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
2677 "scd_flow = "
2678 "%d, scd_ssn = %d\n",
2679 ba_resp->tid,
2680 ba_resp->seq_ctl,
2681 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
2682 ba_resp->scd_flow,
2683 ba_resp->scd_ssn);
2684 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n",
2685 agg->start_idx,
2686 (unsigned long long)agg->bitmap);
2687
2688 /* Update driver's record of ACK vs. not for each frame in win */
2689 il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
2690
2691 /* Release all TFDs before the SSN, i.e. all TFDs in front of
2692 * block-ack win (we assume that they've been successfully
2693 * transmitted ... if not, it's too late anyway). */
2694 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
2695 /* calculate mac80211 ampdu sw queue to wake */
2696 int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
2697 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2698
2699 if (il_queue_space(&txq->q) > txq->q.low_mark &&
2700 il->mac80211_registered &&
2701 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2702 il_wake_queue(il, txq);
2703
2704 il4965_txq_check_empty(il, sta_id, tid, scd_flow);
2705 }
2706
2707 spin_unlock_irqrestore(&il->sta_lock, flags);
2708}
2709
2710#ifdef CONFIG_IWLEGACY_DEBUG
2711const char *il4965_get_tx_fail_reason(u32 status)
2712{
2713#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
2714#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
2715
2716 switch (status & TX_STATUS_MSK) {
2717 case TX_STATUS_SUCCESS:
2718 return "SUCCESS";
2719 TX_STATUS_POSTPONE(DELAY);
2720 TX_STATUS_POSTPONE(FEW_BYTES);
2721 TX_STATUS_POSTPONE(QUIET_PERIOD);
2722 TX_STATUS_POSTPONE(CALC_TTAK);
2723 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
2724 TX_STATUS_FAIL(SHORT_LIMIT);
2725 TX_STATUS_FAIL(LONG_LIMIT);
2726 TX_STATUS_FAIL(FIFO_UNDERRUN);
2727 TX_STATUS_FAIL(DRAIN_FLOW);
2728 TX_STATUS_FAIL(RFKILL_FLUSH);
2729 TX_STATUS_FAIL(LIFE_EXPIRE);
2730 TX_STATUS_FAIL(DEST_PS);
2731 TX_STATUS_FAIL(HOST_ABORTED);
2732 TX_STATUS_FAIL(BT_RETRY);
2733 TX_STATUS_FAIL(STA_INVALID);
2734 TX_STATUS_FAIL(FRAG_DROPPED);
2735 TX_STATUS_FAIL(TID_DISABLE);
2736 TX_STATUS_FAIL(FIFO_FLUSHED);
2737 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
2738 TX_STATUS_FAIL(PASSIVE_NO_RX);
2739 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
2740 }
2741
2742 return "UNKNOWN";
2743
2744#undef TX_STATUS_FAIL
2745#undef TX_STATUS_POSTPONE
2746}
2747#endif /* CONFIG_IWLEGACY_DEBUG */
2748
eb3cdfb7
SG
2749static struct il_link_quality_cmd *
2750il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
2751{
2752 int i, r;
2753 struct il_link_quality_cmd *link_cmd;
2754 u32 rate_flags = 0;
2755 __le32 rate_n_flags;
2756
2757 link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
2758 if (!link_cmd) {
2759 IL_ERR("Unable to allocate memory for LQ cmd.\n");
2760 return NULL;
2761 }
2762 /* Set up the rate scaling to start at selected rate, fall back
2763 * all the way down to 1M in IEEE order, and then spin on 1M */
2764 if (il->band == IEEE80211_BAND_5GHZ)
2765 r = RATE_6M_IDX;
2766 else
2767 r = RATE_1M_IDX;
2768
2769 if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
2770 rate_flags |= RATE_MCS_CCK_MSK;
2771
2772 rate_flags |= il4965_first_antenna(il->hw_params.valid_tx_ant) <<
2773 RATE_MCS_ANT_POS;
2774 rate_n_flags = il4965_hw_set_rate_n_flags(il_rates[r].plcp,
2775 rate_flags);
2776 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2777 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
2778
2779 link_cmd->general_params.single_stream_ant_msk =
2780 il4965_first_antenna(il->hw_params.valid_tx_ant);
2781
2782 link_cmd->general_params.dual_stream_ant_msk =
2783 il->hw_params.valid_tx_ant &
2784 ~il4965_first_antenna(il->hw_params.valid_tx_ant);
2785 if (!link_cmd->general_params.dual_stream_ant_msk) {
2786 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
2787 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
2788 link_cmd->general_params.dual_stream_ant_msk =
2789 il->hw_params.valid_tx_ant;
2790 }
2791
2792 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2793 link_cmd->agg_params.agg_time_limit =
2794 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2795
2796 link_cmd->sta_id = sta_id;
2797
2798 return link_cmd;
2799}
2800
2801/*
2802 * il4965_add_bssid_station - Add the special IBSS BSSID station
2803 *
2804 * Function sleeps.
2805 */
2806int
2807il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
2808 const u8 *addr, u8 *sta_id_r)
2809{
2810 int ret;
2811 u8 sta_id;
2812 struct il_link_quality_cmd *link_cmd;
2813 unsigned long flags;
2814
2815 if (sta_id_r)
2816 *sta_id_r = IL_INVALID_STATION;
2817
2818 ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
2819 if (ret) {
2820 IL_ERR("Unable to add station %pM\n", addr);
2821 return ret;
2822 }
2823
2824 if (sta_id_r)
2825 *sta_id_r = sta_id;
2826
2827 spin_lock_irqsave(&il->sta_lock, flags);
2828 il->stations[sta_id].used |= IL_STA_LOCAL;
2829 spin_unlock_irqrestore(&il->sta_lock, flags);
2830
2831 /* Set up default rate scaling table in device's station table */
2832 link_cmd = il4965_sta_alloc_lq(il, sta_id);
2833 if (!link_cmd) {
2834 IL_ERR(
2835 "Unable to initialize rate scaling for station %pM.\n",
2836 addr);
2837 return -ENOMEM;
2838 }
2839
2840 ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
2841 if (ret)
2842 IL_ERR("Link quality command failed (%d)\n", ret);
2843
2844 spin_lock_irqsave(&il->sta_lock, flags);
2845 il->stations[sta_id].lq = link_cmd;
2846 spin_unlock_irqrestore(&il->sta_lock, flags);
2847
2848 return 0;
2849}
2850
2851static int il4965_static_wepkey_cmd(struct il_priv *il,
2852 struct il_rxon_context *ctx,
2853 bool send_if_empty)
2854{
2855 int i, not_empty = 0;
2856 u8 buff[sizeof(struct il_wep_cmd) +
2857 sizeof(struct il_wep_key) * WEP_KEYS_MAX];
2858 struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
2859 size_t cmd_size = sizeof(struct il_wep_cmd);
2860 struct il_host_cmd cmd = {
2861 .id = ctx->wep_key_cmd,
2862 .data = wep_cmd,
2863 .flags = CMD_SYNC,
2864 };
2865
2866 might_sleep();
2867
2868 memset(wep_cmd, 0, cmd_size +
2869 (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
2870
2871 for (i = 0; i < WEP_KEYS_MAX ; i++) {
2872 wep_cmd->key[i].key_idx = i;
2873 if (ctx->wep_keys[i].key_size) {
2874 wep_cmd->key[i].key_offset = i;
2875 not_empty = 1;
2876 } else {
2877 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
2878 }
2879
2880 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
2881 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
2882 ctx->wep_keys[i].key_size);
2883 }
2884
2885 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
2886 wep_cmd->num_keys = WEP_KEYS_MAX;
2887
2888 cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
2889
2890 cmd.len = cmd_size;
2891
2892 if (not_empty || send_if_empty)
2893 return il_send_cmd(il, &cmd);
2894 else
2895 return 0;
2896}
2897
2898int il4965_restore_default_wep_keys(struct il_priv *il,
2899 struct il_rxon_context *ctx)
2900{
2901 lockdep_assert_held(&il->mutex);
2902
2903 return il4965_static_wepkey_cmd(il, ctx, false);
2904}
2905
2906int il4965_remove_default_wep_key(struct il_priv *il,
2907 struct il_rxon_context *ctx,
2908 struct ieee80211_key_conf *keyconf)
2909{
2910 int ret;
2911
2912 lockdep_assert_held(&il->mutex);
2913
2914 D_WEP("Removing default WEP key: idx=%d\n",
2915 keyconf->keyidx);
2916
2917 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
2918 if (il_is_rfkill(il)) {
2919 D_WEP(
2920 "Not sending REPLY_WEPKEY command due to RFKILL.\n");
2921 /* but keys in device are clear anyway so return success */
2922 return 0;
2923 }
2924 ret = il4965_static_wepkey_cmd(il, ctx, 1);
2925 D_WEP("Remove default WEP key: idx=%d ret=%d\n",
2926 keyconf->keyidx, ret);
2927
2928 return ret;
2929}
2930
2931int il4965_set_default_wep_key(struct il_priv *il,
2932 struct il_rxon_context *ctx,
2933 struct ieee80211_key_conf *keyconf)
2934{
2935 int ret;
2936
2937 lockdep_assert_held(&il->mutex);
2938
2939 if (keyconf->keylen != WEP_KEY_LEN_128 &&
2940 keyconf->keylen != WEP_KEY_LEN_64) {
2941 D_WEP("Bad WEP key length %d\n", keyconf->keylen);
2942 return -EINVAL;
2943 }
2944
2945 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
2946 keyconf->hw_key_idx = HW_KEY_DEFAULT;
2947 il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
2948
2949 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
2950 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
2951 keyconf->keylen);
2952
2953 ret = il4965_static_wepkey_cmd(il, ctx, false);
2954 D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n",
2955 keyconf->keylen, keyconf->keyidx, ret);
2956
2957 return ret;
2958}
2959
2960static int il4965_set_wep_dynamic_key_info(struct il_priv *il,
2961 struct il_rxon_context *ctx,
2962 struct ieee80211_key_conf *keyconf,
2963 u8 sta_id)
2964{
2965 unsigned long flags;
2966 __le16 key_flags = 0;
2967 struct il_addsta_cmd sta_cmd;
2968
2969 lockdep_assert_held(&il->mutex);
2970
2971 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
2972
2973 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
2974 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
2975 key_flags &= ~STA_KEY_FLG_INVALID;
2976
2977 if (keyconf->keylen == WEP_KEY_LEN_128)
2978 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
2979
2980 if (sta_id == ctx->bcast_sta_id)
2981 key_flags |= STA_KEY_MULTICAST_MSK;
2982
2983 spin_lock_irqsave(&il->sta_lock, flags);
2984
2985 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
2986 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
2987 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
2988
2989 memcpy(il->stations[sta_id].keyinfo.key,
2990 keyconf->key, keyconf->keylen);
2991
2992 memcpy(&il->stations[sta_id].sta.key.key[3],
2993 keyconf->key, keyconf->keylen);
2994
2995 if ((il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
2996 == STA_KEY_FLG_NO_ENC)
2997 il->stations[sta_id].sta.key.key_offset =
2998 il_get_free_ucode_key_idx(il);
2999 /* else, we are overriding an existing key => no need to allocated room
3000 * in uCode. */
3001
3002 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3003 "no space for a new key");
3004
3005 il->stations[sta_id].sta.key.key_flags = key_flags;
3006 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3007 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3008
3009 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3010 sizeof(struct il_addsta_cmd));
3011 spin_unlock_irqrestore(&il->sta_lock, flags);
3012
3013 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3014}
3015
3016static int il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
3017 struct il_rxon_context *ctx,
3018 struct ieee80211_key_conf *keyconf,
3019 u8 sta_id)
3020{
3021 unsigned long flags;
3022 __le16 key_flags = 0;
3023 struct il_addsta_cmd sta_cmd;
3024
3025 lockdep_assert_held(&il->mutex);
3026
3027 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
3028 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3029 key_flags &= ~STA_KEY_FLG_INVALID;
3030
3031 if (sta_id == ctx->bcast_sta_id)
3032 key_flags |= STA_KEY_MULTICAST_MSK;
3033
3034 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3035
3036 spin_lock_irqsave(&il->sta_lock, flags);
3037 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3038 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3039
3040 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key,
3041 keyconf->keylen);
3042
3043 memcpy(il->stations[sta_id].sta.key.key, keyconf->key,
3044 keyconf->keylen);
3045
3046 if ((il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
3047 == STA_KEY_FLG_NO_ENC)
3048 il->stations[sta_id].sta.key.key_offset =
3049 il_get_free_ucode_key_idx(il);
3050 /* else, we are overriding an existing key => no need to allocated room
3051 * in uCode. */
3052
3053 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3054 "no space for a new key");
3055
3056 il->stations[sta_id].sta.key.key_flags = key_flags;
3057 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3058 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3059
3060 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3061 sizeof(struct il_addsta_cmd));
3062 spin_unlock_irqrestore(&il->sta_lock, flags);
3063
3064 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3065}
3066
3067static int il4965_set_tkip_dynamic_key_info(struct il_priv *il,
3068 struct il_rxon_context *ctx,
3069 struct ieee80211_key_conf *keyconf,
3070 u8 sta_id)
3071{
3072 unsigned long flags;
3073 int ret = 0;
3074 __le16 key_flags = 0;
3075
3076 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3077 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3078 key_flags &= ~STA_KEY_FLG_INVALID;
3079
3080 if (sta_id == ctx->bcast_sta_id)
3081 key_flags |= STA_KEY_MULTICAST_MSK;
3082
3083 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3084 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3085
3086 spin_lock_irqsave(&il->sta_lock, flags);
3087
3088 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3089 il->stations[sta_id].keyinfo.keylen = 16;
3090
3091 if ((il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
3092 == STA_KEY_FLG_NO_ENC)
3093 il->stations[sta_id].sta.key.key_offset =
3094 il_get_free_ucode_key_idx(il);
3095 /* else, we are overriding an existing key => no need to allocated room
3096 * in uCode. */
3097
3098 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3099 "no space for a new key");
3100
3101 il->stations[sta_id].sta.key.key_flags = key_flags;
3102
3103
3104 /* This copy is acutally not needed: we get the key with each TX */
3105 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
3106
3107 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
3108
3109 spin_unlock_irqrestore(&il->sta_lock, flags);
3110
3111 return ret;
3112}
3113
3114void il4965_update_tkip_key(struct il_priv *il,
3115 struct il_rxon_context *ctx,
3116 struct ieee80211_key_conf *keyconf,
3117 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
3118{
3119 u8 sta_id;
3120 unsigned long flags;
3121 int i;
3122
3123 if (il_scan_cancel(il)) {
3124 /* cancel scan failed, just live w/ bad key and rely
3125 briefly on SW decryption */
3126 return;
3127 }
3128
3129 sta_id = il_sta_id_or_broadcast(il, ctx, sta);
3130 if (sta_id == IL_INVALID_STATION)
3131 return;
3132
3133 spin_lock_irqsave(&il->sta_lock, flags);
3134
3135 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3136
3137 for (i = 0; i < 5; i++)
3138 il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3139 cpu_to_le16(phase1key[i]);
3140
3141 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3142 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3143
3144 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3145
3146 spin_unlock_irqrestore(&il->sta_lock, flags);
3147
3148}
3149
3150int il4965_remove_dynamic_key(struct il_priv *il,
3151 struct il_rxon_context *ctx,
3152 struct ieee80211_key_conf *keyconf,
3153 u8 sta_id)
3154{
3155 unsigned long flags;
3156 u16 key_flags;
3157 u8 keyidx;
3158 struct il_addsta_cmd sta_cmd;
3159
3160 lockdep_assert_held(&il->mutex);
3161
3162 ctx->key_mapping_keys--;
3163
3164 spin_lock_irqsave(&il->sta_lock, flags);
3165 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
3166 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
3167
3168 D_WEP("Remove dynamic key: idx=%d sta=%d\n",
3169 keyconf->keyidx, sta_id);
3170
3171 if (keyconf->keyidx != keyidx) {
3172 /* We need to remove a key with idx different that the one
3173 * in the uCode. This means that the key we need to remove has
3174 * been replaced by another one with different idx.
3175 * Don't do anything and return ok
3176 */
3177 spin_unlock_irqrestore(&il->sta_lock, flags);
3178 return 0;
3179 }
3180
3181 if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
3182 IL_WARN("Removing wrong key %d 0x%x\n",
3183 keyconf->keyidx, key_flags);
3184 spin_unlock_irqrestore(&il->sta_lock, flags);
3185 return 0;
3186 }
3187
3188 if (!test_and_clear_bit(il->stations[sta_id].sta.key.key_offset,
3189 &il->ucode_key_table))
3190 IL_ERR("idx %d not used in uCode key table.\n",
3191 il->stations[sta_id].sta.key.key_offset);
3192 memset(&il->stations[sta_id].keyinfo, 0,
3193 sizeof(struct il_hw_key));
3194 memset(&il->stations[sta_id].sta.key, 0,
3195 sizeof(struct il4965_keyinfo));
3196 il->stations[sta_id].sta.key.key_flags =
3197 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3198 il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
3199 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3200 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3201
3202 if (il_is_rfkill(il)) {
3203 D_WEP(
3204 "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
3205 spin_unlock_irqrestore(&il->sta_lock, flags);
3206 return 0;
3207 }
3208 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3209 sizeof(struct il_addsta_cmd));
3210 spin_unlock_irqrestore(&il->sta_lock, flags);
3211
3212 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3213}
3214
3215int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
3216 struct ieee80211_key_conf *keyconf, u8 sta_id)
3217{
3218 int ret;
3219
3220 lockdep_assert_held(&il->mutex);
3221
3222 ctx->key_mapping_keys++;
3223 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
3224
3225 switch (keyconf->cipher) {
3226 case WLAN_CIPHER_SUITE_CCMP:
3227 ret = il4965_set_ccmp_dynamic_key_info(il, ctx,
3228 keyconf, sta_id);
3229 break;
3230 case WLAN_CIPHER_SUITE_TKIP:
3231 ret = il4965_set_tkip_dynamic_key_info(il, ctx,
3232 keyconf, sta_id);
3233 break;
3234 case WLAN_CIPHER_SUITE_WEP40:
3235 case WLAN_CIPHER_SUITE_WEP104:
3236 ret = il4965_set_wep_dynamic_key_info(il, ctx,
3237 keyconf, sta_id);
3238 break;
3239 default:
3240 IL_ERR(
3241 "Unknown alg: %s cipher = %x\n", __func__,
3242 keyconf->cipher);
3243 ret = -EINVAL;
3244 }
3245
3246 D_WEP(
3247 "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
3248 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3249 sta_id, ret);
3250
3251 return ret;
3252}
3253
3254/**
3255 * il4965_alloc_bcast_station - add broadcast station into driver's station table.
3256 *
3257 * This adds the broadcast station into the driver's station table
3258 * and marks it driver active, so that it will be restored to the
3259 * device at the next best time.
3260 */
3261int il4965_alloc_bcast_station(struct il_priv *il,
3262 struct il_rxon_context *ctx)
3263{
3264 struct il_link_quality_cmd *link_cmd;
3265 unsigned long flags;
3266 u8 sta_id;
3267
3268 spin_lock_irqsave(&il->sta_lock, flags);
3269 sta_id = il_prep_station(il, ctx, il_bcast_addr,
3270 false, NULL);
3271 if (sta_id == IL_INVALID_STATION) {
3272 IL_ERR("Unable to prepare broadcast station\n");
3273 spin_unlock_irqrestore(&il->sta_lock, flags);
3274
3275 return -EINVAL;
3276 }
3277
3278 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
3279 il->stations[sta_id].used |= IL_STA_BCAST;
3280 spin_unlock_irqrestore(&il->sta_lock, flags);
3281
3282 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3283 if (!link_cmd) {
3284 IL_ERR(
3285 "Unable to initialize rate scaling for bcast station.\n");
3286 return -ENOMEM;
3287 }
3288
3289 spin_lock_irqsave(&il->sta_lock, flags);
3290 il->stations[sta_id].lq = link_cmd;
3291 spin_unlock_irqrestore(&il->sta_lock, flags);
3292
3293 return 0;
3294}
3295
3296/**
3297 * il4965_update_bcast_station - update broadcast station's LQ command
3298 *
3299 * Only used by iwl4965. Placed here to have all bcast station management
3300 * code together.
3301 */
3302static int il4965_update_bcast_station(struct il_priv *il,
3303 struct il_rxon_context *ctx)
3304{
3305 unsigned long flags;
3306 struct il_link_quality_cmd *link_cmd;
3307 u8 sta_id = ctx->bcast_sta_id;
3308
3309 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3310 if (!link_cmd) {
3311 IL_ERR(
3312 "Unable to initialize rate scaling for bcast station.\n");
3313 return -ENOMEM;
3314 }
3315
3316 spin_lock_irqsave(&il->sta_lock, flags);
3317 if (il->stations[sta_id].lq)
3318 kfree(il->stations[sta_id].lq);
3319 else
3320 D_INFO(
3321 "Bcast station rate scaling has not been initialized yet.\n");
3322 il->stations[sta_id].lq = link_cmd;
3323 spin_unlock_irqrestore(&il->sta_lock, flags);
3324
3325 return 0;
3326}
3327
3328int il4965_update_bcast_stations(struct il_priv *il)
3329{
3330 return il4965_update_bcast_station(il, &il->ctx);
3331}
3332
3333/**
3334 * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
3335 */
3336int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
3337{
3338 unsigned long flags;
3339 struct il_addsta_cmd sta_cmd;
3340
3341 lockdep_assert_held(&il->mutex);
3342
3343 /* Remove "disable" flag, to enable Tx for this TID */
3344 spin_lock_irqsave(&il->sta_lock, flags);
3345 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3346 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3347 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3348 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3349 sizeof(struct il_addsta_cmd));
3350 spin_unlock_irqrestore(&il->sta_lock, flags);
3351
3352 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3353}
3354
3355int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
3356 int tid, u16 ssn)
3357{
3358 unsigned long flags;
3359 int sta_id;
3360 struct il_addsta_cmd sta_cmd;
3361
3362 lockdep_assert_held(&il->mutex);
3363
3364 sta_id = il_sta_id(sta);
3365 if (sta_id == IL_INVALID_STATION)
3366 return -ENXIO;
3367
3368 spin_lock_irqsave(&il->sta_lock, flags);
3369 il->stations[sta_id].sta.station_flags_msk = 0;
3370 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3371 il->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
3372 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3373 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3374 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3375 sizeof(struct il_addsta_cmd));
3376 spin_unlock_irqrestore(&il->sta_lock, flags);
3377
3378 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3379}
3380
3381int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
3382 int tid)
3383{
3384 unsigned long flags;
3385 int sta_id;
3386 struct il_addsta_cmd sta_cmd;
3387
3388 lockdep_assert_held(&il->mutex);
3389
3390 sta_id = il_sta_id(sta);
3391 if (sta_id == IL_INVALID_STATION) {
3392 IL_ERR("Invalid station for AGG tid %d\n", tid);
3393 return -ENXIO;
3394 }
3395
3396 spin_lock_irqsave(&il->sta_lock, flags);
3397 il->stations[sta_id].sta.station_flags_msk = 0;
3398 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3399 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
3400 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3401 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3402 sizeof(struct il_addsta_cmd));
3403 spin_unlock_irqrestore(&il->sta_lock, flags);
3404
3405 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3406}
3407
3408void
3409il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
3410{
3411 unsigned long flags;
3412
3413 spin_lock_irqsave(&il->sta_lock, flags);
3414 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
3415 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3416 il->stations[sta_id].sta.sta.modify_mask =
3417 STA_MODIFY_SLEEP_TX_COUNT_MSK;
3418 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
3419 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3420 il_send_add_sta(il,
3421 &il->stations[sta_id].sta, CMD_ASYNC);
3422 spin_unlock_irqrestore(&il->sta_lock, flags);
3423
3424}
3425
46bc8d4b 3426void il4965_update_chain_flags(struct il_priv *il)
be663ab6 3427{
46bc8d4b 3428 if (il->cfg->ops->hcmd->set_rxon_chain) {
17d6e557
SG
3429 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
3430 if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
3431 il_commit_rxon(il, &il->ctx);
be663ab6
WYG
3432 }
3433}
3434
46bc8d4b 3435static void il4965_clear_free_frames(struct il_priv *il)
be663ab6
WYG
3436{
3437 struct list_head *element;
3438
58de00a4 3439 D_INFO("%d frames on pre-allocated heap on clear.\n",
46bc8d4b 3440 il->frames_count);
be663ab6 3441
46bc8d4b
SG
3442 while (!list_empty(&il->free_frames)) {
3443 element = il->free_frames.next;
be663ab6 3444 list_del(element);
e2ebc833 3445 kfree(list_entry(element, struct il_frame, list));
46bc8d4b 3446 il->frames_count--;
be663ab6
WYG
3447 }
3448
46bc8d4b 3449 if (il->frames_count) {
9406f797 3450 IL_WARN("%d frames still in use. Did we lose one?\n",
46bc8d4b
SG
3451 il->frames_count);
3452 il->frames_count = 0;
be663ab6
WYG
3453 }
3454}
3455
46bc8d4b 3456static struct il_frame *il4965_get_free_frame(struct il_priv *il)
be663ab6 3457{
e2ebc833 3458 struct il_frame *frame;
be663ab6 3459 struct list_head *element;
46bc8d4b 3460 if (list_empty(&il->free_frames)) {
be663ab6
WYG
3461 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
3462 if (!frame) {
9406f797 3463 IL_ERR("Could not allocate frame!\n");
be663ab6
WYG
3464 return NULL;
3465 }
3466
46bc8d4b 3467 il->frames_count++;
be663ab6
WYG
3468 return frame;
3469 }
3470
46bc8d4b 3471 element = il->free_frames.next;
be663ab6 3472 list_del(element);
e2ebc833 3473 return list_entry(element, struct il_frame, list);
be663ab6
WYG
3474}
3475
46bc8d4b 3476static void il4965_free_frame(struct il_priv *il, struct il_frame *frame)
be663ab6
WYG
3477{
3478 memset(frame, 0, sizeof(*frame));
46bc8d4b 3479 list_add(&frame->list, &il->free_frames);
be663ab6
WYG
3480}
3481
46bc8d4b 3482static u32 il4965_fill_beacon_frame(struct il_priv *il,
be663ab6
WYG
3483 struct ieee80211_hdr *hdr,
3484 int left)
3485{
46bc8d4b 3486 lockdep_assert_held(&il->mutex);
be663ab6 3487
46bc8d4b 3488 if (!il->beacon_skb)
be663ab6
WYG
3489 return 0;
3490
46bc8d4b 3491 if (il->beacon_skb->len > left)
be663ab6
WYG
3492 return 0;
3493
46bc8d4b 3494 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
be663ab6 3495
46bc8d4b 3496 return il->beacon_skb->len;
be663ab6
WYG
3497}
3498
3499/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
46bc8d4b 3500static void il4965_set_beacon_tim(struct il_priv *il,
e2ebc833 3501 struct il_tx_beacon_cmd *tx_beacon_cmd,
be663ab6
WYG
3502 u8 *beacon, u32 frame_size)
3503{
3504 u16 tim_idx;
3505 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
3506
3507 /*
0c2c8852 3508 * The idx is relative to frame start but we start looking at the
be663ab6
WYG
3509 * variable-length part of the beacon.
3510 */
3511 tim_idx = mgmt->u.beacon.variable - beacon;
3512
3513 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
3514 while ((tim_idx < (frame_size - 2)) &&
3515 (beacon[tim_idx] != WLAN_EID_TIM))
3516 tim_idx += beacon[tim_idx+1] + 2;
3517
3518 /* If TIM field was found, set variables */
3519 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
3520 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
3521 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
3522 } else
9406f797 3523 IL_WARN("Unable to find TIM Element in beacon\n");
be663ab6
WYG
3524}
3525
46bc8d4b 3526static unsigned int il4965_hw_get_beacon_cmd(struct il_priv *il,
e2ebc833 3527 struct il_frame *frame)
be663ab6 3528{
e2ebc833 3529 struct il_tx_beacon_cmd *tx_beacon_cmd;
be663ab6
WYG
3530 u32 frame_size;
3531 u32 rate_flags;
3532 u32 rate;
3533 /*
3534 * We have to set up the TX command, the TX Beacon command, and the
3535 * beacon contents.
3536 */
3537
46bc8d4b 3538 lockdep_assert_held(&il->mutex);
be663ab6 3539
46bc8d4b 3540 if (!il->beacon_ctx) {
9406f797 3541 IL_ERR("trying to build beacon w/o beacon context!\n");
be663ab6
WYG
3542 return 0;
3543 }
3544
3545 /* Initialize memory */
3546 tx_beacon_cmd = &frame->u.beacon;
3547 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3548
3549 /* Set up TX beacon contents */
46bc8d4b 3550 frame_size = il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
be663ab6
WYG
3551 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3552 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
3553 return 0;
3554 if (!frame_size)
3555 return 0;
3556
3557 /* Set up TX command fields */
3558 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
46bc8d4b 3559 tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
be663ab6
WYG
3560 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3561 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
3562 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
3563
3564 /* Set up TX beacon command fields */
46bc8d4b 3565 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
be663ab6
WYG
3566 frame_size);
3567
3568 /* Set up packet rate and flags */
46bc8d4b
SG
3569 rate = il_get_lowest_plcp(il, il->beacon_ctx);
3570 il->mgmt_tx_ant = il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
3571 il->hw_params.valid_tx_ant);
3572 rate_flags = il4965_ant_idx_to_flags(il->mgmt_tx_ant);
e2ebc833 3573 if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
be663ab6 3574 rate_flags |= RATE_MCS_CCK_MSK;
e2ebc833 3575 tx_beacon_cmd->tx.rate_n_flags = il4965_hw_set_rate_n_flags(rate,
be663ab6
WYG
3576 rate_flags);
3577
3578 return sizeof(*tx_beacon_cmd) + frame_size;
3579}
3580
46bc8d4b 3581int il4965_send_beacon_cmd(struct il_priv *il)
be663ab6 3582{
e2ebc833 3583 struct il_frame *frame;
be663ab6
WYG
3584 unsigned int frame_size;
3585 int rc;
3586
46bc8d4b 3587 frame = il4965_get_free_frame(il);
be663ab6 3588 if (!frame) {
9406f797 3589 IL_ERR("Could not obtain free frame buffer for beacon "
be663ab6
WYG
3590 "command.\n");
3591 return -ENOMEM;
3592 }
3593
46bc8d4b 3594 frame_size = il4965_hw_get_beacon_cmd(il, frame);
be663ab6 3595 if (!frame_size) {
9406f797 3596 IL_ERR("Error configuring the beacon command\n");
46bc8d4b 3597 il4965_free_frame(il, frame);
be663ab6
WYG
3598 return -EINVAL;
3599 }
3600
46bc8d4b 3601 rc = il_send_cmd_pdu(il, REPLY_TX_BEACON, frame_size,
be663ab6
WYG
3602 &frame->u.cmd[0]);
3603
46bc8d4b 3604 il4965_free_frame(il, frame);
be663ab6
WYG
3605
3606 return rc;
3607}
3608
e2ebc833 3609static inline dma_addr_t il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
be663ab6 3610{
e2ebc833 3611 struct il_tfd_tb *tb = &tfd->tbs[idx];
be663ab6
WYG
3612
3613 dma_addr_t addr = get_unaligned_le32(&tb->lo);
3614 if (sizeof(dma_addr_t) > sizeof(u32))
3615 addr |=
3616 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
3617
3618 return addr;
3619}
3620
e2ebc833 3621static inline u16 il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
be663ab6 3622{
e2ebc833 3623 struct il_tfd_tb *tb = &tfd->tbs[idx];
be663ab6
WYG
3624
3625 return le16_to_cpu(tb->hi_n_len) >> 4;
3626}
3627
e2ebc833 3628static inline void il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx,
be663ab6
WYG
3629 dma_addr_t addr, u16 len)
3630{
e2ebc833 3631 struct il_tfd_tb *tb = &tfd->tbs[idx];
be663ab6
WYG
3632 u16 hi_n_len = len << 4;
3633
3634 put_unaligned_le32(addr, &tb->lo);
3635 if (sizeof(dma_addr_t) > sizeof(u32))
3636 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
3637
3638 tb->hi_n_len = cpu_to_le16(hi_n_len);
3639
3640 tfd->num_tbs = idx + 1;
3641}
3642
e2ebc833 3643static inline u8 il4965_tfd_get_num_tbs(struct il_tfd *tfd)
be663ab6
WYG
3644{
3645 return tfd->num_tbs & 0x1f;
3646}
3647
3648/**
e2ebc833 3649 * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
46bc8d4b 3650 * @il - driver ilate data
be663ab6
WYG
3651 * @txq - tx queue
3652 *
0c2c8852 3653 * Does NOT advance any TFD circular buffer read/write idxes
be663ab6
WYG
3654 * Does NOT free the TFD itself (which is within circular buffer)
3655 */
46bc8d4b 3656void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
be663ab6 3657{
e2ebc833
SG
3658 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
3659 struct il_tfd *tfd;
46bc8d4b 3660 struct pci_dev *dev = il->pci_dev;
0c2c8852 3661 int idx = txq->q.read_ptr;
be663ab6
WYG
3662 int i;
3663 int num_tbs;
3664
0c2c8852 3665 tfd = &tfd_tmp[idx];
be663ab6
WYG
3666
3667 /* Sanity check on number of chunks */
e2ebc833 3668 num_tbs = il4965_tfd_get_num_tbs(tfd);
be663ab6 3669
e2ebc833 3670 if (num_tbs >= IL_NUM_OF_TBS) {
9406f797 3671 IL_ERR("Too many chunks: %i\n", num_tbs);
be663ab6
WYG
3672 /* @todo issue fatal error, it is quite serious situation */
3673 return;
3674 }
3675
3676 /* Unmap tx_cmd */
3677 if (num_tbs)
3678 pci_unmap_single(dev,
0c2c8852
SG
3679 dma_unmap_addr(&txq->meta[idx], mapping),
3680 dma_unmap_len(&txq->meta[idx], len),
be663ab6
WYG
3681 PCI_DMA_BIDIRECTIONAL);
3682
3683 /* Unmap chunks, if any. */
3684 for (i = 1; i < num_tbs; i++)
e2ebc833
SG
3685 pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
3686 il4965_tfd_tb_get_len(tfd, i),
be663ab6
WYG
3687 PCI_DMA_TODEVICE);
3688
3689 /* free SKB */
3690 if (txq->txb) {
3691 struct sk_buff *skb;
3692
3693 skb = txq->txb[txq->q.read_ptr].skb;
3694
3695 /* can be called from irqs-disabled context */
3696 if (skb) {
3697 dev_kfree_skb_any(skb);
3698 txq->txb[txq->q.read_ptr].skb = NULL;
3699 }
3700 }
3701}
3702
46bc8d4b 3703int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il,
e2ebc833 3704 struct il_tx_queue *txq,
be663ab6
WYG
3705 dma_addr_t addr, u16 len,
3706 u8 reset, u8 pad)
3707{
e2ebc833
SG
3708 struct il_queue *q;
3709 struct il_tfd *tfd, *tfd_tmp;
be663ab6
WYG
3710 u32 num_tbs;
3711
3712 q = &txq->q;
e2ebc833 3713 tfd_tmp = (struct il_tfd *)txq->tfds;
be663ab6
WYG
3714 tfd = &tfd_tmp[q->write_ptr];
3715
3716 if (reset)
3717 memset(tfd, 0, sizeof(*tfd));
3718
e2ebc833 3719 num_tbs = il4965_tfd_get_num_tbs(tfd);
be663ab6
WYG
3720
3721 /* Each TFD can point to a maximum 20 Tx buffers */
e2ebc833 3722 if (num_tbs >= IL_NUM_OF_TBS) {
9406f797 3723 IL_ERR("Error can not send more than %d chunks\n",
e2ebc833 3724 IL_NUM_OF_TBS);
be663ab6
WYG
3725 return -EINVAL;
3726 }
3727
3728 BUG_ON(addr & ~DMA_BIT_MASK(36));
e2ebc833 3729 if (unlikely(addr & ~IL_TX_DMA_MASK))
9406f797 3730 IL_ERR("Unaligned address = %llx\n",
be663ab6
WYG
3731 (unsigned long long)addr);
3732
e2ebc833 3733 il4965_tfd_set_tb(tfd, num_tbs, addr, len);
be663ab6
WYG
3734
3735 return 0;
3736}
3737
3738/*
3739 * Tell nic where to find circular buffer of Tx Frame Descriptors for
3740 * given Tx queue, and enable the DMA channel used for that queue.
3741 *
3742 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3743 * channels supported in hardware.
3744 */
46bc8d4b 3745int il4965_hw_tx_queue_init(struct il_priv *il,
e2ebc833 3746 struct il_tx_queue *txq)
be663ab6
WYG
3747{
3748 int txq_id = txq->q.id;
3749
3750 /* Circular buffer (TFD queue in DRAM) physical base address */
0c1a94e2 3751 il_wr(il, FH_MEM_CBBC_QUEUE(txq_id),
be663ab6
WYG
3752 txq->q.dma_addr >> 8);
3753
3754 return 0;
3755}
3756
3757/******************************************************************************
3758 *
3759 * Generic RX handler implementations
3760 *
3761 ******************************************************************************/
46bc8d4b 3762static void il4965_rx_reply_alive(struct il_priv *il,
b73bb5f1 3763 struct il_rx_buf *rxb)
be663ab6 3764{
dcae1c64 3765 struct il_rx_pkt *pkt = rxb_addr(rxb);
e2ebc833 3766 struct il_alive_resp *palive;
be663ab6
WYG
3767 struct delayed_work *pwork;
3768
3769 palive = &pkt->u.alive_frame;
3770
58de00a4 3771 D_INFO("Alive ucode status 0x%08X revision "
be663ab6
WYG
3772 "0x%01X 0x%01X\n",
3773 palive->is_valid, palive->ver_type,
3774 palive->ver_subtype);
3775
3776 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
58de00a4 3777 D_INFO("Initialization Alive received.\n");
46bc8d4b 3778 memcpy(&il->card_alive_init,
be663ab6 3779 &pkt->u.alive_frame,
e2ebc833 3780 sizeof(struct il_init_alive_resp));
46bc8d4b 3781 pwork = &il->init_alive_start;
be663ab6 3782 } else {
58de00a4 3783 D_INFO("Runtime Alive received.\n");
46bc8d4b 3784 memcpy(&il->card_alive, &pkt->u.alive_frame,
e2ebc833 3785 sizeof(struct il_alive_resp));
46bc8d4b 3786 pwork = &il->alive_start;
be663ab6
WYG
3787 }
3788
3789 /* We delay the ALIVE response by 5ms to
3790 * give the HW RF Kill time to activate... */
3791 if (palive->is_valid == UCODE_VALID_OK)
46bc8d4b 3792 queue_delayed_work(il->workqueue, pwork,
be663ab6
WYG
3793 msecs_to_jiffies(5));
3794 else
9406f797 3795 IL_WARN("uCode did not respond OK.\n");
be663ab6
WYG
3796}
3797
3798/**
ebf0d90d 3799 * il4965_bg_stats_periodic - Timer callback to queue stats
be663ab6 3800 *
ebf0d90d 3801 * This callback is provided in order to send a stats request.
be663ab6
WYG
3802 *
3803 * This timer function is continually reset to execute within
3804 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
ebf0d90d 3805 * was received. We need to ensure we receive the stats in order
be663ab6
WYG
3806 * to update the temperature used for calibrating the TXPOWER.
3807 */
ebf0d90d 3808static void il4965_bg_stats_periodic(unsigned long data)
be663ab6 3809{
46bc8d4b 3810 struct il_priv *il = (struct il_priv *)data;
be663ab6 3811
a6766ccd 3812 if (test_bit(S_EXIT_PENDING, &il->status))
be663ab6
WYG
3813 return;
3814
3815 /* dont send host command if rf-kill is on */
46bc8d4b 3816 if (!il_is_ready_rf(il))
be663ab6
WYG
3817 return;
3818
ebf0d90d 3819 il_send_stats_request(il, CMD_ASYNC, false);
be663ab6
WYG
3820}
3821
46bc8d4b 3822static void il4965_rx_beacon_notif(struct il_priv *il,
b73bb5f1 3823 struct il_rx_buf *rxb)
be663ab6 3824{
dcae1c64 3825 struct il_rx_pkt *pkt = rxb_addr(rxb);
e2ebc833
SG
3826 struct il4965_beacon_notif *beacon =
3827 (struct il4965_beacon_notif *)pkt->u.raw;
d3175167 3828#ifdef CONFIG_IWLEGACY_DEBUG
e2ebc833 3829 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
be663ab6 3830
58de00a4 3831 D_RX("beacon status %x retries %d iss %d "
be663ab6
WYG
3832 "tsf %d %d rate %d\n",
3833 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
3834 beacon->beacon_notify_hdr.failure_frame,
3835 le32_to_cpu(beacon->ibss_mgr_status),
3836 le32_to_cpu(beacon->high_tsf),
3837 le32_to_cpu(beacon->low_tsf), rate);
3838#endif
3839
46bc8d4b 3840 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
be663ab6
WYG
3841}
3842
46bc8d4b 3843static void il4965_perform_ct_kill_task(struct il_priv *il)
be663ab6
WYG
3844{
3845 unsigned long flags;
3846
58de00a4 3847 D_POWER("Stop all queues\n");
be663ab6 3848
46bc8d4b
SG
3849 if (il->mac80211_registered)
3850 ieee80211_stop_queues(il->hw);
be663ab6 3851
841b2cca 3852 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
be663ab6 3853 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
841b2cca 3854 _il_rd(il, CSR_UCODE_DRV_GP1);
be663ab6 3855
46bc8d4b 3856 spin_lock_irqsave(&il->reg_lock, flags);
13882269
SG
3857 if (!_il_grab_nic_access(il))
3858 _il_release_nic_access(il);
46bc8d4b 3859 spin_unlock_irqrestore(&il->reg_lock, flags);
be663ab6
WYG
3860}
3861
3862/* Handle notification from uCode that card's power state is changing
3863 * due to software, hardware, or critical temperature RFKILL */
46bc8d4b 3864static void il4965_rx_card_state_notif(struct il_priv *il,
b73bb5f1 3865 struct il_rx_buf *rxb)
be663ab6 3866{
dcae1c64 3867 struct il_rx_pkt *pkt = rxb_addr(rxb);
be663ab6 3868 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
46bc8d4b 3869 unsigned long status = il->status;
be663ab6 3870
58de00a4 3871 D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
be663ab6
WYG
3872 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3873 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
3874 (flags & CT_CARD_DISABLED) ?
3875 "Reached" : "Not reached");
3876
3877 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
3878 CT_CARD_DISABLED)) {
3879
841b2cca 3880 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
be663ab6
WYG
3881 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3882
0c1a94e2 3883 il_wr(il, HBUS_TARG_MBX_C,
be663ab6
WYG
3884 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3885
3886 if (!(flags & RXON_CARD_DISABLED)) {
841b2cca 3887 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
be663ab6 3888 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
0c1a94e2 3889 il_wr(il, HBUS_TARG_MBX_C,
be663ab6
WYG
3890 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3891 }
3892 }
3893
3894 if (flags & CT_CARD_DISABLED)
46bc8d4b 3895 il4965_perform_ct_kill_task(il);
be663ab6
WYG
3896
3897 if (flags & HW_CARD_DISABLED)
a6766ccd 3898 set_bit(S_RF_KILL_HW, &il->status);
be663ab6 3899 else
a6766ccd 3900 clear_bit(S_RF_KILL_HW, &il->status);
be663ab6
WYG
3901
3902 if (!(flags & RXON_CARD_DISABLED))
46bc8d4b 3903 il_scan_cancel(il);
be663ab6 3904
a6766ccd
SG
3905 if ((test_bit(S_RF_KILL_HW, &status) !=
3906 test_bit(S_RF_KILL_HW, &il->status)))
46bc8d4b 3907 wiphy_rfkill_set_hw_state(il->hw->wiphy,
a6766ccd 3908 test_bit(S_RF_KILL_HW, &il->status));
be663ab6 3909 else
46bc8d4b 3910 wake_up(&il->wait_command_queue);
be663ab6
WYG
3911}
3912
3913/**
e2ebc833 3914 * il4965_setup_rx_handlers - Initialize Rx handler callbacks
be663ab6
WYG
3915 *
3916 * Setup the RX handlers for each of the reply types sent from the uCode
3917 * to the host.
3918 *
3919 * This function chains into the hardware specific files for them to setup
3920 * any hardware specific handlers as well.
3921 */
46bc8d4b 3922static void il4965_setup_rx_handlers(struct il_priv *il)
be663ab6 3923{
46bc8d4b
SG
3924 il->rx_handlers[REPLY_ALIVE] = il4965_rx_reply_alive;
3925 il->rx_handlers[REPLY_ERROR] = il_rx_reply_error;
3926 il->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = il_rx_csa;
3927 il->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
e2ebc833 3928 il_rx_spectrum_measure_notif;
46bc8d4b
SG
3929 il->rx_handlers[PM_SLEEP_NOTIFICATION] = il_rx_pm_sleep_notif;
3930 il->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
ebf0d90d 3931 il_rx_pm_debug_stats_notif;
46bc8d4b 3932 il->rx_handlers[BEACON_NOTIFICATION] = il4965_rx_beacon_notif;
be663ab6
WYG
3933
3934 /*
3935 * The same handler is used for both the REPLY to a discrete
ebf0d90d
SG
3936 * stats request from the host as well as for the periodic
3937 * stats notifications (after received beacons) from the uCode.
be663ab6 3938 */
ebf0d90d
SG
3939 il->rx_handlers[REPLY_STATISTICS_CMD] = il4965_reply_stats;
3940 il->rx_handlers[STATISTICS_NOTIFICATION] = il4965_rx_stats;
be663ab6 3941
46bc8d4b 3942 il_setup_rx_scan_handlers(il);
be663ab6
WYG
3943
3944 /* status change handler */
46bc8d4b 3945 il->rx_handlers[CARD_STATE_NOTIFICATION] =
e2ebc833 3946 il4965_rx_card_state_notif;
be663ab6 3947
46bc8d4b 3948 il->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
e2ebc833 3949 il4965_rx_missed_beacon_notif;
be663ab6 3950 /* Rx handlers */
46bc8d4b
SG
3951 il->rx_handlers[REPLY_RX_PHY_CMD] = il4965_rx_reply_rx_phy;
3952 il->rx_handlers[REPLY_RX_MPDU_CMD] = il4965_rx_reply_rx;
be663ab6 3953 /* block ack */
46bc8d4b 3954 il->rx_handlers[REPLY_COMPRESSED_BA] = il4965_rx_reply_compressed_ba;
be663ab6 3955 /* Set up hardware specific Rx handlers */
46bc8d4b 3956 il->cfg->ops->lib->rx_handler_setup(il);
be663ab6
WYG
3957}
3958
3959/**
e2ebc833 3960 * il4965_rx_handle - Main entry function for receiving responses from uCode
be663ab6 3961 *
46bc8d4b 3962 * Uses the il->rx_handlers callback function array to invoke
be663ab6
WYG
3963 * the appropriate handlers, including command responses,
3964 * frame-received notifications, and other notifications.
3965 */
46bc8d4b 3966void il4965_rx_handle(struct il_priv *il)
be663ab6 3967{
b73bb5f1 3968 struct il_rx_buf *rxb;
dcae1c64 3969 struct il_rx_pkt *pkt;
46bc8d4b 3970 struct il_rx_queue *rxq = &il->rxq;
be663ab6
WYG
3971 u32 r, i;
3972 int reclaim;
3973 unsigned long flags;
3974 u8 fill_rx = 0;
3975 u32 count = 8;
3976 int total_empty;
3977
0c2c8852 3978 /* uCode's read idx (stored in shared DRAM) indicates the last Rx
be663ab6
WYG
3979 * buffer that the driver may process (last buffer filled by ucode). */
3980 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
3981 i = rxq->read;
3982
3983 /* Rx interrupt, but nothing sent from uCode */
3984 if (i == r)
58de00a4 3985 D_RX("r = %d, i = %d\n", r, i);
be663ab6
WYG
3986
3987 /* calculate total frames need to be restock after handling RX */
3988 total_empty = r - rxq->write_actual;
3989 if (total_empty < 0)
3990 total_empty += RX_QUEUE_SIZE;
3991
3992 if (total_empty > (RX_QUEUE_SIZE / 2))
3993 fill_rx = 1;
3994
3995 while (i != r) {
3996 int len;
3997
3998 rxb = rxq->queue[i];
3999
4000 /* If an RXB doesn't have a Rx queue slot associated with it,
4001 * then a bug has been introduced in the queue refilling
4002 * routines -- catch it here */
4003 BUG_ON(rxb == NULL);
4004
4005 rxq->queue[i] = NULL;
4006
46bc8d4b
SG
4007 pci_unmap_page(il->pci_dev, rxb->page_dma,
4008 PAGE_SIZE << il->hw_params.rx_page_order,
be663ab6
WYG
4009 PCI_DMA_FROMDEVICE);
4010 pkt = rxb_addr(rxb);
4011
4012 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
4013 len += sizeof(u32); /* account for status word */
be663ab6
WYG
4014
4015 /* Reclaim a command buffer only if this packet is a response
4016 * to a (driver-originated) command.
4017 * If the packet (e.g. Rx frame) originated from uCode,
4018 * there is no command buffer to reclaim.
4019 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4020 * but apparently a few don't get set; catch them here. */
4021 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4022 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
4023 (pkt->hdr.cmd != REPLY_RX) &&
4024 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
4025 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
4026 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4027 (pkt->hdr.cmd != REPLY_TX);
4028
4029 /* Based on type of command response or notification,
4030 * handle those that need handling via function in
e2ebc833 4031 * rx_handlers table. See il4965_setup_rx_handlers() */
46bc8d4b 4032 if (il->rx_handlers[pkt->hdr.cmd]) {
58de00a4 4033 D_RX("r = %d, i = %d, %s, 0x%02x\n", r,
e2ebc833 4034 i, il_get_cmd_string(pkt->hdr.cmd),
be663ab6 4035 pkt->hdr.cmd);
46bc8d4b
SG
4036 il->isr_stats.rx_handlers[pkt->hdr.cmd]++;
4037 il->rx_handlers[pkt->hdr.cmd] (il, rxb);
be663ab6
WYG
4038 } else {
4039 /* No handling needed */
58de00a4 4040 D_RX(
be663ab6 4041 "r %d i %d No handler needed for %s, 0x%02x\n",
e2ebc833 4042 r, i, il_get_cmd_string(pkt->hdr.cmd),
be663ab6
WYG
4043 pkt->hdr.cmd);
4044 }
4045
4046 /*
4047 * XXX: After here, we should always check rxb->page
4048 * against NULL before touching it or its virtual
4049 * memory (pkt). Because some rx_handler might have
4050 * already taken or freed the pages.
4051 */
4052
4053 if (reclaim) {
4054 /* Invoke any callbacks, transfer the buffer to caller,
e2ebc833 4055 * and fire off the (possibly) blocking il_send_cmd()
be663ab6
WYG
4056 * as we reclaim the driver command queue */
4057 if (rxb->page)
46bc8d4b 4058 il_tx_cmd_complete(il, rxb);
be663ab6 4059 else
9406f797 4060 IL_WARN("Claim null rxb?\n");
be663ab6
WYG
4061 }
4062
4063 /* Reuse the page if possible. For notification packets and
4064 * SKBs that fail to Rx correctly, add them back into the
4065 * rx_free list for reuse later. */
4066 spin_lock_irqsave(&rxq->lock, flags);
4067 if (rxb->page != NULL) {
46bc8d4b
SG
4068 rxb->page_dma = pci_map_page(il->pci_dev, rxb->page,
4069 0, PAGE_SIZE << il->hw_params.rx_page_order,
be663ab6
WYG
4070 PCI_DMA_FROMDEVICE);
4071 list_add_tail(&rxb->list, &rxq->rx_free);
4072 rxq->free_count++;
4073 } else
4074 list_add_tail(&rxb->list, &rxq->rx_used);
4075
4076 spin_unlock_irqrestore(&rxq->lock, flags);
4077
4078 i = (i + 1) & RX_QUEUE_MASK;
4079 /* If there are a lot of unused frames,
4080 * restock the Rx queue so ucode wont assert. */
4081 if (fill_rx) {
4082 count++;
4083 if (count >= 8) {
4084 rxq->read = i;
46bc8d4b 4085 il4965_rx_replenish_now(il);
be663ab6
WYG
4086 count = 0;
4087 }
4088 }
4089 }
4090
4091 /* Backtrack one entry */
4092 rxq->read = i;
4093 if (fill_rx)
46bc8d4b 4094 il4965_rx_replenish_now(il);
be663ab6 4095 else
46bc8d4b 4096 il4965_rx_queue_restock(il);
be663ab6
WYG
4097}
4098
4099/* call this function to flush any scheduled tasklet */
46bc8d4b 4100static inline void il4965_synchronize_irq(struct il_priv *il)
be663ab6
WYG
4101{
4102 /* wait to make sure we flush pending tasklet*/
46bc8d4b
SG
4103 synchronize_irq(il->pci_dev->irq);
4104 tasklet_kill(&il->irq_tasklet);
be663ab6
WYG
4105}
4106
46bc8d4b 4107static void il4965_irq_tasklet(struct il_priv *il)
be663ab6
WYG
4108{
4109 u32 inta, handled = 0;
4110 u32 inta_fh;
4111 unsigned long flags;
4112 u32 i;
d3175167 4113#ifdef CONFIG_IWLEGACY_DEBUG
be663ab6
WYG
4114 u32 inta_mask;
4115#endif
4116
46bc8d4b 4117 spin_lock_irqsave(&il->lock, flags);
be663ab6
WYG
4118
4119 /* Ack/clear/reset pending uCode interrupts.
4120 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4121 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
841b2cca
SG
4122 inta = _il_rd(il, CSR_INT);
4123 _il_wr(il, CSR_INT, inta);
be663ab6
WYG
4124
4125 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4126 * Any new interrupts that happen after this, either while we're
4127 * in this tasklet, or later, will show up in next ISR/tasklet. */
841b2cca
SG
4128 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4129 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
be663ab6 4130
d3175167 4131#ifdef CONFIG_IWLEGACY_DEBUG
46bc8d4b 4132 if (il_get_debug_level(il) & IL_DL_ISR) {
be663ab6 4133 /* just for debug */
841b2cca 4134 inta_mask = _il_rd(il, CSR_INT_MASK);
58de00a4 4135 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
be663ab6
WYG
4136 inta, inta_mask, inta_fh);
4137 }
4138#endif
4139
46bc8d4b 4140 spin_unlock_irqrestore(&il->lock, flags);
be663ab6
WYG
4141
4142 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4143 * atomic, make sure that inta covers all the interrupts that
4144 * we've discovered, even if FH interrupt came in just after
4145 * reading CSR_INT. */
4146 if (inta_fh & CSR49_FH_INT_RX_MASK)
4147 inta |= CSR_INT_BIT_FH_RX;
4148 if (inta_fh & CSR49_FH_INT_TX_MASK)
4149 inta |= CSR_INT_BIT_FH_TX;
4150
4151 /* Now service all interrupt bits discovered above. */
4152 if (inta & CSR_INT_BIT_HW_ERR) {
9406f797 4153 IL_ERR("Hardware error detected. Restarting.\n");
be663ab6
WYG
4154
4155 /* Tell the device to stop sending interrupts */
46bc8d4b 4156 il_disable_interrupts(il);
be663ab6 4157
46bc8d4b
SG
4158 il->isr_stats.hw++;
4159 il_irq_handle_error(il);
be663ab6
WYG
4160
4161 handled |= CSR_INT_BIT_HW_ERR;
4162
4163 return;
4164 }
4165
d3175167 4166#ifdef CONFIG_IWLEGACY_DEBUG
46bc8d4b 4167 if (il_get_debug_level(il) & (IL_DL_ISR)) {
be663ab6
WYG
4168 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4169 if (inta & CSR_INT_BIT_SCD) {
58de00a4 4170 D_ISR("Scheduler finished to transmit "
be663ab6 4171 "the frame/frames.\n");
46bc8d4b 4172 il->isr_stats.sch++;
be663ab6
WYG
4173 }
4174
4175 /* Alive notification via Rx interrupt will do the real work */
4176 if (inta & CSR_INT_BIT_ALIVE) {
58de00a4 4177 D_ISR("Alive interrupt\n");
46bc8d4b 4178 il->isr_stats.alive++;
be663ab6
WYG
4179 }
4180 }
4181#endif
4182 /* Safely ignore these bits for debug checks below */
4183 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4184
4185 /* HW RF KILL switch toggled */
4186 if (inta & CSR_INT_BIT_RF_KILL) {
4187 int hw_rf_kill = 0;
841b2cca 4188 if (!(_il_rd(il, CSR_GP_CNTRL) &
be663ab6
WYG
4189 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4190 hw_rf_kill = 1;
4191
9406f797 4192 IL_WARN("RF_KILL bit toggled to %s.\n",
be663ab6
WYG
4193 hw_rf_kill ? "disable radio" : "enable radio");
4194
46bc8d4b 4195 il->isr_stats.rfkill++;
be663ab6
WYG
4196
4197 /* driver only loads ucode once setting the interface up.
4198 * the driver allows loading the ucode even if the radio
4199 * is killed. Hence update the killswitch state here. The
4200 * rfkill handler will care about restarting if needed.
4201 */
a6766ccd 4202 if (!test_bit(S_ALIVE, &il->status)) {
be663ab6 4203 if (hw_rf_kill)
a6766ccd 4204 set_bit(S_RF_KILL_HW, &il->status);
be663ab6 4205 else
a6766ccd 4206 clear_bit(S_RF_KILL_HW, &il->status);
46bc8d4b 4207 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
be663ab6
WYG
4208 }
4209
4210 handled |= CSR_INT_BIT_RF_KILL;
4211 }
4212
4213 /* Chip got too hot and stopped itself */
4214 if (inta & CSR_INT_BIT_CT_KILL) {
9406f797 4215 IL_ERR("Microcode CT kill error detected.\n");
46bc8d4b 4216 il->isr_stats.ctkill++;
be663ab6
WYG
4217 handled |= CSR_INT_BIT_CT_KILL;
4218 }
4219
4220 /* Error detected by uCode */
4221 if (inta & CSR_INT_BIT_SW_ERR) {
9406f797 4222 IL_ERR("Microcode SW error detected. "
be663ab6 4223 " Restarting 0x%X.\n", inta);
46bc8d4b
SG
4224 il->isr_stats.sw++;
4225 il_irq_handle_error(il);
be663ab6
WYG
4226 handled |= CSR_INT_BIT_SW_ERR;
4227 }
4228
4229 /*
4230 * uCode wakes up after power-down sleep.
4231 * Tell device about any new tx or host commands enqueued,
4232 * and about any Rx buffers made available while asleep.
4233 */
4234 if (inta & CSR_INT_BIT_WAKEUP) {
58de00a4 4235 D_ISR("Wakeup interrupt\n");
46bc8d4b
SG
4236 il_rx_queue_update_write_ptr(il, &il->rxq);
4237 for (i = 0; i < il->hw_params.max_txq_num; i++)
4238 il_txq_update_write_ptr(il, &il->txq[i]);
4239 il->isr_stats.wakeup++;
be663ab6
WYG
4240 handled |= CSR_INT_BIT_WAKEUP;
4241 }
4242
4243 /* All uCode command responses, including Tx command responses,
4244 * Rx "responses" (frame-received notification), and other
4245 * notifications from uCode come through here*/
4246 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
46bc8d4b
SG
4247 il4965_rx_handle(il);
4248 il->isr_stats.rx++;
be663ab6
WYG
4249 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4250 }
4251
4252 /* This "Tx" DMA channel is used only for loading uCode */
4253 if (inta & CSR_INT_BIT_FH_TX) {
58de00a4 4254 D_ISR("uCode load interrupt\n");
46bc8d4b 4255 il->isr_stats.tx++;
be663ab6
WYG
4256 handled |= CSR_INT_BIT_FH_TX;
4257 /* Wake up uCode load routine, now that load is complete */
46bc8d4b
SG
4258 il->ucode_write_complete = 1;
4259 wake_up(&il->wait_command_queue);
be663ab6
WYG
4260 }
4261
4262 if (inta & ~handled) {
9406f797 4263 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
46bc8d4b 4264 il->isr_stats.unhandled++;
be663ab6
WYG
4265 }
4266
46bc8d4b 4267 if (inta & ~(il->inta_mask)) {
9406f797 4268 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
46bc8d4b 4269 inta & ~il->inta_mask);
9406f797 4270 IL_WARN(" with FH_INT = 0x%08x\n", inta_fh);
be663ab6
WYG
4271 }
4272
4273 /* Re-enable all interrupts */
93fd74e3 4274 /* only Re-enable if disabled by irq */
a6766ccd 4275 if (test_bit(S_INT_ENABLED, &il->status))
46bc8d4b 4276 il_enable_interrupts(il);
a078a1fd
SG
4277 /* Re-enable RF_KILL if it occurred */
4278 else if (handled & CSR_INT_BIT_RF_KILL)
46bc8d4b 4279 il_enable_rfkill_int(il);
be663ab6 4280
d3175167 4281#ifdef CONFIG_IWLEGACY_DEBUG
46bc8d4b 4282 if (il_get_debug_level(il) & (IL_DL_ISR)) {
841b2cca
SG
4283 inta = _il_rd(il, CSR_INT);
4284 inta_mask = _il_rd(il, CSR_INT_MASK);
4285 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
58de00a4 4286 D_ISR(
be663ab6
WYG
4287 "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4288 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4289 }
4290#endif
4291}
4292
4293/*****************************************************************************
4294 *
4295 * sysfs attributes
4296 *
4297 *****************************************************************************/
4298
d3175167 4299#ifdef CONFIG_IWLEGACY_DEBUG
be663ab6
WYG
4300
4301/*
4302 * The following adds a new attribute to the sysfs representation
4303 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
4304 * used for controlling the debug level.
4305 *
4306 * See the level definitions in iwl for details.
4307 *
4308 * The debug_level being managed using sysfs below is a per device debug
4309 * level that is used instead of the global debug level if it (the per
4310 * device debug level) is set.
4311 */
e2ebc833 4312static ssize_t il4965_show_debug_level(struct device *d,
be663ab6
WYG
4313 struct device_attribute *attr, char *buf)
4314{
46bc8d4b
SG
4315 struct il_priv *il = dev_get_drvdata(d);
4316 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
be663ab6 4317}
e2ebc833 4318static ssize_t il4965_store_debug_level(struct device *d,
be663ab6
WYG
4319 struct device_attribute *attr,
4320 const char *buf, size_t count)
4321{
46bc8d4b 4322 struct il_priv *il = dev_get_drvdata(d);
be663ab6
WYG
4323 unsigned long val;
4324 int ret;
4325
4326 ret = strict_strtoul(buf, 0, &val);
4327 if (ret)
9406f797 4328 IL_ERR("%s is not in hex or decimal form.\n", buf);
be663ab6 4329 else {
46bc8d4b
SG
4330 il->debug_level = val;
4331 if (il_alloc_traffic_mem(il))
9406f797 4332 IL_ERR(
be663ab6
WYG
4333 "Not enough memory to generate traffic log\n");
4334 }
4335 return strnlen(buf, count);
4336}
4337
4338static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
e2ebc833 4339 il4965_show_debug_level, il4965_store_debug_level);
be663ab6
WYG
4340
4341
d3175167 4342#endif /* CONFIG_IWLEGACY_DEBUG */
be663ab6
WYG
4343
4344
e2ebc833 4345static ssize_t il4965_show_temperature(struct device *d,
be663ab6
WYG
4346 struct device_attribute *attr, char *buf)
4347{
46bc8d4b 4348 struct il_priv *il = dev_get_drvdata(d);
be663ab6 4349
46bc8d4b 4350 if (!il_is_alive(il))
be663ab6
WYG
4351 return -EAGAIN;
4352
46bc8d4b 4353 return sprintf(buf, "%d\n", il->temperature);
be663ab6
WYG
4354}
4355
e2ebc833 4356static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
be663ab6 4357
e2ebc833 4358static ssize_t il4965_show_tx_power(struct device *d,
be663ab6
WYG
4359 struct device_attribute *attr, char *buf)
4360{
46bc8d4b 4361 struct il_priv *il = dev_get_drvdata(d);
be663ab6 4362
46bc8d4b 4363 if (!il_is_ready_rf(il))
be663ab6
WYG
4364 return sprintf(buf, "off\n");
4365 else
46bc8d4b 4366 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
be663ab6
WYG
4367}
4368
e2ebc833 4369static ssize_t il4965_store_tx_power(struct device *d,
be663ab6
WYG
4370 struct device_attribute *attr,
4371 const char *buf, size_t count)
4372{
46bc8d4b 4373 struct il_priv *il = dev_get_drvdata(d);
be663ab6
WYG
4374 unsigned long val;
4375 int ret;
4376
4377 ret = strict_strtoul(buf, 10, &val);
4378 if (ret)
9406f797 4379 IL_INFO("%s is not in decimal form.\n", buf);
be663ab6 4380 else {
46bc8d4b 4381 ret = il_set_tx_power(il, val, false);
be663ab6 4382 if (ret)
9406f797 4383 IL_ERR("failed setting tx power (0x%d).\n",
be663ab6
WYG
4384 ret);
4385 else
4386 ret = count;
4387 }
4388 return ret;
4389}
4390
4391static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
e2ebc833 4392 il4965_show_tx_power, il4965_store_tx_power);
be663ab6 4393
e2ebc833 4394static struct attribute *il_sysfs_entries[] = {
be663ab6
WYG
4395 &dev_attr_temperature.attr,
4396 &dev_attr_tx_power.attr,
d3175167 4397#ifdef CONFIG_IWLEGACY_DEBUG
be663ab6
WYG
4398 &dev_attr_debug_level.attr,
4399#endif
4400 NULL
4401};
4402
e2ebc833 4403static struct attribute_group il_attribute_group = {
be663ab6 4404 .name = NULL, /* put in device directory */
e2ebc833 4405 .attrs = il_sysfs_entries,
be663ab6
WYG
4406};
4407
4408/******************************************************************************
4409 *
4410 * uCode download functions
4411 *
4412 ******************************************************************************/
4413
46bc8d4b 4414static void il4965_dealloc_ucode_pci(struct il_priv *il)
be663ab6 4415{
46bc8d4b
SG
4416 il_free_fw_desc(il->pci_dev, &il->ucode_code);
4417 il_free_fw_desc(il->pci_dev, &il->ucode_data);
4418 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
4419 il_free_fw_desc(il->pci_dev, &il->ucode_init);
4420 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
4421 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
be663ab6
WYG
4422}
4423
46bc8d4b 4424static void il4965_nic_start(struct il_priv *il)
be663ab6
WYG
4425{
4426 /* Remove all resets to allow NIC to operate */
841b2cca 4427 _il_wr(il, CSR_RESET, 0);
be663ab6
WYG
4428}
4429
e2ebc833 4430static void il4965_ucode_callback(const struct firmware *ucode_raw,
be663ab6 4431 void *context);
46bc8d4b 4432static int il4965_mac_setup_register(struct il_priv *il,
be663ab6
WYG
4433 u32 max_probe_length);
4434
46bc8d4b 4435static int __must_check il4965_request_firmware(struct il_priv *il, bool first)
be663ab6 4436{
46bc8d4b 4437 const char *name_pre = il->cfg->fw_name_pre;
be663ab6
WYG
4438 char tag[8];
4439
4440 if (first) {
0c2c8852
SG
4441 il->fw_idx = il->cfg->ucode_api_max;
4442 sprintf(tag, "%d", il->fw_idx);
be663ab6 4443 } else {
0c2c8852
SG
4444 il->fw_idx--;
4445 sprintf(tag, "%d", il->fw_idx);
be663ab6
WYG
4446 }
4447
0c2c8852 4448 if (il->fw_idx < il->cfg->ucode_api_min) {
9406f797 4449 IL_ERR("no suitable firmware found!\n");
be663ab6
WYG
4450 return -ENOENT;
4451 }
4452
46bc8d4b 4453 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
be663ab6 4454
58de00a4 4455 D_INFO("attempting to load firmware '%s'\n",
46bc8d4b 4456 il->firmware_name);
be663ab6 4457
46bc8d4b
SG
4458 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
4459 &il->pci_dev->dev, GFP_KERNEL, il,
e2ebc833 4460 il4965_ucode_callback);
be663ab6
WYG
4461}
4462
e2ebc833 4463struct il4965_firmware_pieces {
be663ab6
WYG
4464 const void *inst, *data, *init, *init_data, *boot;
4465 size_t inst_size, data_size, init_size, init_data_size, boot_size;
4466};
4467
46bc8d4b 4468static int il4965_load_firmware(struct il_priv *il,
be663ab6 4469 const struct firmware *ucode_raw,
e2ebc833 4470 struct il4965_firmware_pieces *pieces)
be663ab6 4471{
e2ebc833 4472 struct il_ucode_header *ucode = (void *)ucode_raw->data;
be663ab6
WYG
4473 u32 api_ver, hdr_size;
4474 const u8 *src;
4475
46bc8d4b
SG
4476 il->ucode_ver = le32_to_cpu(ucode->ver);
4477 api_ver = IL_UCODE_API(il->ucode_ver);
be663ab6
WYG
4478
4479 switch (api_ver) {
4480 default:
4481 case 0:
4482 case 1:
4483 case 2:
4484 hdr_size = 24;
4485 if (ucode_raw->size < hdr_size) {
9406f797 4486 IL_ERR("File size too small!\n");
be663ab6
WYG
4487 return -EINVAL;
4488 }
4489 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
4490 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
4491 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
4492 pieces->init_data_size =
4493 le32_to_cpu(ucode->v1.init_data_size);
4494 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
4495 src = ucode->v1.data;
4496 break;
4497 }
4498
4499 /* Verify size of file vs. image size info in file's header */
4500 if (ucode_raw->size != hdr_size + pieces->inst_size +
4501 pieces->data_size + pieces->init_size +
4502 pieces->init_data_size + pieces->boot_size) {
4503
9406f797 4504 IL_ERR(
be663ab6
WYG
4505 "uCode file size %d does not match expected size\n",
4506 (int)ucode_raw->size);
4507 return -EINVAL;
4508 }
4509
4510 pieces->inst = src;
4511 src += pieces->inst_size;
4512 pieces->data = src;
4513 src += pieces->data_size;
4514 pieces->init = src;
4515 src += pieces->init_size;
4516 pieces->init_data = src;
4517 src += pieces->init_data_size;
4518 pieces->boot = src;
4519 src += pieces->boot_size;
4520
4521 return 0;
4522}
4523
4524/**
e2ebc833 4525 * il4965_ucode_callback - callback when firmware was loaded
be663ab6
WYG
4526 *
4527 * If loaded successfully, copies the firmware into buffers
4528 * for the card to fetch (via DMA).
4529 */
4530static void
e2ebc833 4531il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
be663ab6 4532{
46bc8d4b 4533 struct il_priv *il = context;
e2ebc833 4534 struct il_ucode_header *ucode;
be663ab6 4535 int err;
e2ebc833 4536 struct il4965_firmware_pieces pieces;
46bc8d4b
SG
4537 const unsigned int api_max = il->cfg->ucode_api_max;
4538 const unsigned int api_min = il->cfg->ucode_api_min;
be663ab6
WYG
4539 u32 api_ver;
4540
4541 u32 max_probe_length = 200;
4542 u32 standard_phy_calibration_size =
e2ebc833 4543 IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
be663ab6
WYG
4544
4545 memset(&pieces, 0, sizeof(pieces));
4546
4547 if (!ucode_raw) {
0c2c8852 4548 if (il->fw_idx <= il->cfg->ucode_api_max)
9406f797 4549 IL_ERR(
be663ab6 4550 "request for firmware file '%s' failed.\n",
46bc8d4b 4551 il->firmware_name);
be663ab6
WYG
4552 goto try_again;
4553 }
4554
58de00a4 4555 D_INFO("Loaded firmware file '%s' (%zd bytes).\n",
46bc8d4b 4556 il->firmware_name, ucode_raw->size);
be663ab6
WYG
4557
4558 /* Make sure that we got at least the API version number */
4559 if (ucode_raw->size < 4) {
9406f797 4560 IL_ERR("File size way too small!\n");
be663ab6
WYG
4561 goto try_again;
4562 }
4563
4564 /* Data from ucode file: header followed by uCode images */
e2ebc833 4565 ucode = (struct il_ucode_header *)ucode_raw->data;
be663ab6 4566
46bc8d4b 4567 err = il4965_load_firmware(il, ucode_raw, &pieces);
be663ab6
WYG
4568
4569 if (err)
4570 goto try_again;
4571
46bc8d4b 4572 api_ver = IL_UCODE_API(il->ucode_ver);
be663ab6
WYG
4573
4574 /*
4575 * api_ver should match the api version forming part of the
4576 * firmware filename ... but we don't check for that and only rely
4577 * on the API version read from firmware header from here on forward
4578 */
4579 if (api_ver < api_min || api_ver > api_max) {
9406f797 4580 IL_ERR(
be663ab6
WYG
4581 "Driver unable to support your firmware API. "
4582 "Driver supports v%u, firmware is v%u.\n",
4583 api_max, api_ver);
4584 goto try_again;
4585 }
4586
4587 if (api_ver != api_max)
9406f797 4588 IL_ERR(
be663ab6
WYG
4589 "Firmware has old API version. Expected v%u, "
4590 "got v%u. New firmware can be obtained "
4591 "from http://www.intellinuxwireless.org.\n",
4592 api_max, api_ver);
4593
9406f797 4594 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
46bc8d4b
SG
4595 IL_UCODE_MAJOR(il->ucode_ver),
4596 IL_UCODE_MINOR(il->ucode_ver),
4597 IL_UCODE_API(il->ucode_ver),
4598 IL_UCODE_SERIAL(il->ucode_ver));
be663ab6 4599
46bc8d4b
SG
4600 snprintf(il->hw->wiphy->fw_version,
4601 sizeof(il->hw->wiphy->fw_version),
be663ab6 4602 "%u.%u.%u.%u",
46bc8d4b
SG
4603 IL_UCODE_MAJOR(il->ucode_ver),
4604 IL_UCODE_MINOR(il->ucode_ver),
4605 IL_UCODE_API(il->ucode_ver),
4606 IL_UCODE_SERIAL(il->ucode_ver));
be663ab6
WYG
4607
4608 /*
4609 * For any of the failures below (before allocating pci memory)
4610 * we will try to load a version with a smaller API -- maybe the
4611 * user just got a corrupted version of the latest API.
4612 */
4613
58de00a4 4614 D_INFO("f/w package hdr ucode version raw = 0x%x\n",
46bc8d4b 4615 il->ucode_ver);
58de00a4 4616 D_INFO("f/w package hdr runtime inst size = %Zd\n",
be663ab6 4617 pieces.inst_size);
58de00a4 4618 D_INFO("f/w package hdr runtime data size = %Zd\n",
be663ab6 4619 pieces.data_size);
58de00a4 4620 D_INFO("f/w package hdr init inst size = %Zd\n",
be663ab6 4621 pieces.init_size);
58de00a4 4622 D_INFO("f/w package hdr init data size = %Zd\n",
be663ab6 4623 pieces.init_data_size);
58de00a4 4624 D_INFO("f/w package hdr boot inst size = %Zd\n",
be663ab6
WYG
4625 pieces.boot_size);
4626
4627 /* Verify that uCode images will fit in card's SRAM */
46bc8d4b 4628 if (pieces.inst_size > il->hw_params.max_inst_size) {
9406f797 4629 IL_ERR("uCode instr len %Zd too large to fit in\n",
be663ab6
WYG
4630 pieces.inst_size);
4631 goto try_again;
4632 }
4633
46bc8d4b 4634 if (pieces.data_size > il->hw_params.max_data_size) {
9406f797 4635 IL_ERR("uCode data len %Zd too large to fit in\n",
be663ab6
WYG
4636 pieces.data_size);
4637 goto try_again;
4638 }
4639
46bc8d4b 4640 if (pieces.init_size > il->hw_params.max_inst_size) {
9406f797 4641 IL_ERR("uCode init instr len %Zd too large to fit in\n",
be663ab6
WYG
4642 pieces.init_size);
4643 goto try_again;
4644 }
4645
46bc8d4b 4646 if (pieces.init_data_size > il->hw_params.max_data_size) {
9406f797 4647 IL_ERR("uCode init data len %Zd too large to fit in\n",
be663ab6
WYG
4648 pieces.init_data_size);
4649 goto try_again;
4650 }
4651
46bc8d4b 4652 if (pieces.boot_size > il->hw_params.max_bsm_size) {
9406f797 4653 IL_ERR("uCode boot instr len %Zd too large to fit in\n",
be663ab6
WYG
4654 pieces.boot_size);
4655 goto try_again;
4656 }
4657
4658 /* Allocate ucode buffers for card's bus-master loading ... */
4659
4660 /* Runtime instructions and 2 copies of data:
4661 * 1) unmodified from disk
4662 * 2) backup cache for save/restore during power-downs */
46bc8d4b
SG
4663 il->ucode_code.len = pieces.inst_size;
4664 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
be663ab6 4665
46bc8d4b
SG
4666 il->ucode_data.len = pieces.data_size;
4667 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
be663ab6 4668
46bc8d4b
SG
4669 il->ucode_data_backup.len = pieces.data_size;
4670 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
be663ab6 4671
46bc8d4b
SG
4672 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
4673 !il->ucode_data_backup.v_addr)
be663ab6
WYG
4674 goto err_pci_alloc;
4675
4676 /* Initialization instructions and data */
4677 if (pieces.init_size && pieces.init_data_size) {
46bc8d4b
SG
4678 il->ucode_init.len = pieces.init_size;
4679 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
be663ab6 4680
46bc8d4b
SG
4681 il->ucode_init_data.len = pieces.init_data_size;
4682 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
be663ab6 4683
46bc8d4b 4684 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
be663ab6
WYG
4685 goto err_pci_alloc;
4686 }
4687
4688 /* Bootstrap (instructions only, no data) */
4689 if (pieces.boot_size) {
46bc8d4b
SG
4690 il->ucode_boot.len = pieces.boot_size;
4691 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
be663ab6 4692
46bc8d4b 4693 if (!il->ucode_boot.v_addr)
be663ab6
WYG
4694 goto err_pci_alloc;
4695 }
4696
4697 /* Now that we can no longer fail, copy information */
4698
46bc8d4b 4699 il->sta_key_max_num = STA_KEY_MAX_NUM;
be663ab6
WYG
4700
4701 /* Copy images into buffers for card's bus-master reads ... */
4702
4703 /* Runtime instructions (first block of data in file) */
58de00a4 4704 D_INFO("Copying (but not loading) uCode instr len %Zd\n",
be663ab6 4705 pieces.inst_size);
46bc8d4b 4706 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
be663ab6 4707
58de00a4 4708 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
46bc8d4b 4709 il->ucode_code.v_addr, (u32)il->ucode_code.p_addr);
be663ab6
WYG
4710
4711 /*
4712 * Runtime data
e2ebc833 4713 * NOTE: Copy into backup buffer will be done in il_up()
be663ab6 4714 */
58de00a4 4715 D_INFO("Copying (but not loading) uCode data len %Zd\n",
be663ab6 4716 pieces.data_size);
46bc8d4b
SG
4717 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
4718 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
be663ab6
WYG
4719
4720 /* Initialization instructions */
4721 if (pieces.init_size) {
58de00a4 4722 D_INFO(
be663ab6
WYG
4723 "Copying (but not loading) init instr len %Zd\n",
4724 pieces.init_size);
46bc8d4b 4725 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
be663ab6
WYG
4726 }
4727
4728 /* Initialization data */
4729 if (pieces.init_data_size) {
58de00a4 4730 D_INFO(
be663ab6
WYG
4731 "Copying (but not loading) init data len %Zd\n",
4732 pieces.init_data_size);
46bc8d4b 4733 memcpy(il->ucode_init_data.v_addr, pieces.init_data,
be663ab6
WYG
4734 pieces.init_data_size);
4735 }
4736
4737 /* Bootstrap instructions */
58de00a4 4738 D_INFO("Copying (but not loading) boot instr len %Zd\n",
be663ab6 4739 pieces.boot_size);
46bc8d4b 4740 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
be663ab6
WYG
4741
4742 /*
4743 * figure out the offset of chain noise reset and gain commands
4744 * base on the size of standard phy calibration commands table size
4745 */
46bc8d4b 4746 il->_4965.phy_calib_chain_noise_reset_cmd =
be663ab6 4747 standard_phy_calibration_size;
46bc8d4b 4748 il->_4965.phy_calib_chain_noise_gain_cmd =
be663ab6
WYG
4749 standard_phy_calibration_size + 1;
4750
4751 /**************************************************
4752 * This is still part of probe() in a sense...
4753 *
4754 * 9. Setup and register with mac80211 and debugfs
4755 **************************************************/
46bc8d4b 4756 err = il4965_mac_setup_register(il, max_probe_length);
be663ab6
WYG
4757 if (err)
4758 goto out_unbind;
4759
46bc8d4b 4760 err = il_dbgfs_register(il, DRV_NAME);
be663ab6 4761 if (err)
9406f797 4762 IL_ERR(
be663ab6
WYG
4763 "failed to create debugfs files. Ignoring error: %d\n", err);
4764
46bc8d4b 4765 err = sysfs_create_group(&il->pci_dev->dev.kobj,
e2ebc833 4766 &il_attribute_group);
be663ab6 4767 if (err) {
9406f797 4768 IL_ERR("failed to create sysfs device attributes\n");
be663ab6
WYG
4769 goto out_unbind;
4770 }
4771
4772 /* We have our copies now, allow OS release its copies */
4773 release_firmware(ucode_raw);
46bc8d4b 4774 complete(&il->_4965.firmware_loading_complete);
be663ab6
WYG
4775 return;
4776
4777 try_again:
4778 /* try next, if any */
46bc8d4b 4779 if (il4965_request_firmware(il, false))
be663ab6
WYG
4780 goto out_unbind;
4781 release_firmware(ucode_raw);
4782 return;
4783
4784 err_pci_alloc:
9406f797 4785 IL_ERR("failed to allocate pci memory\n");
46bc8d4b 4786 il4965_dealloc_ucode_pci(il);
be663ab6 4787 out_unbind:
46bc8d4b
SG
4788 complete(&il->_4965.firmware_loading_complete);
4789 device_release_driver(&il->pci_dev->dev);
be663ab6
WYG
4790 release_firmware(ucode_raw);
4791}
4792
4793static const char * const desc_lookup_text[] = {
4794 "OK",
4795 "FAIL",
4796 "BAD_PARAM",
4797 "BAD_CHECKSUM",
4798 "NMI_INTERRUPT_WDG",
4799 "SYSASSERT",
4800 "FATAL_ERROR",
4801 "BAD_COMMAND",
4802 "HW_ERROR_TUNE_LOCK",
4803 "HW_ERROR_TEMPERATURE",
4804 "ILLEGAL_CHAN_FREQ",
3b98c7f4 4805 "VCC_NOT_STBL",
be663ab6
WYG
4806 "FH_ERROR",
4807 "NMI_INTERRUPT_HOST",
4808 "NMI_INTERRUPT_ACTION_PT",
4809 "NMI_INTERRUPT_UNKNOWN",
4810 "UCODE_VERSION_MISMATCH",
4811 "HW_ERROR_ABS_LOCK",
4812 "HW_ERROR_CAL_LOCK_FAIL",
4813 "NMI_INTERRUPT_INST_ACTION_PT",
4814 "NMI_INTERRUPT_DATA_ACTION_PT",
4815 "NMI_TRM_HW_ER",
4816 "NMI_INTERRUPT_TRM",
861d9c3f 4817 "NMI_INTERRUPT_BREAK_POINT",
be663ab6
WYG
4818 "DEBUG_0",
4819 "DEBUG_1",
4820 "DEBUG_2",
4821 "DEBUG_3",
4822};
4823
4824static struct { char *name; u8 num; } advanced_lookup[] = {
4825 { "NMI_INTERRUPT_WDG", 0x34 },
4826 { "SYSASSERT", 0x35 },
4827 { "UCODE_VERSION_MISMATCH", 0x37 },
4828 { "BAD_COMMAND", 0x38 },
4829 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4830 { "FATAL_ERROR", 0x3D },
4831 { "NMI_TRM_HW_ERR", 0x46 },
4832 { "NMI_INTERRUPT_TRM", 0x4C },
4833 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4834 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4835 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4836 { "NMI_INTERRUPT_HOST", 0x66 },
4837 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4838 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4839 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4840 { "ADVANCED_SYSASSERT", 0 },
4841};
4842
e2ebc833 4843static const char *il4965_desc_lookup(u32 num)
be663ab6
WYG
4844{
4845 int i;
4846 int max = ARRAY_SIZE(desc_lookup_text);
4847
4848 if (num < max)
4849 return desc_lookup_text[num];
4850
4851 max = ARRAY_SIZE(advanced_lookup) - 1;
4852 for (i = 0; i < max; i++) {
4853 if (advanced_lookup[i].num == num)
4854 break;
4855 }
4856 return advanced_lookup[i].name;
4857}
4858
4859#define ERROR_START_OFFSET (1 * sizeof(u32))
4860#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4861
46bc8d4b 4862void il4965_dump_nic_error_log(struct il_priv *il)
be663ab6
WYG
4863{
4864 u32 data2, line;
4865 u32 desc, time, count, base, data1;
4866 u32 blink1, blink2, ilink1, ilink2;
4867 u32 pc, hcmd;
4868
46bc8d4b
SG
4869 if (il->ucode_type == UCODE_INIT) {
4870 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
be663ab6 4871 } else {
46bc8d4b 4872 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
be663ab6
WYG
4873 }
4874
46bc8d4b 4875 if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
9406f797 4876 IL_ERR(
be663ab6 4877 "Not valid error log pointer 0x%08X for %s uCode\n",
46bc8d4b 4878 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
be663ab6
WYG
4879 return;
4880 }
4881
46bc8d4b 4882 count = il_read_targ_mem(il, base);
be663ab6
WYG
4883
4884 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
9406f797
SG
4885 IL_ERR("Start IWL Error Log Dump:\n");
4886 IL_ERR("Status: 0x%08lX, count: %d\n",
46bc8d4b
SG
4887 il->status, count);
4888 }
4889
4890 desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
4891 il->isr_stats.err_code = desc;
4892 pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
4893 blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
4894 blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
4895 ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
4896 ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
4897 data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
4898 data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
4899 line = il_read_targ_mem(il, base + 9 * sizeof(u32));
4900 time = il_read_targ_mem(il, base + 11 * sizeof(u32));
4901 hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
4902
9406f797 4903 IL_ERR("Desc Time "
be663ab6 4904 "data1 data2 line\n");
9406f797 4905 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
e2ebc833 4906 il4965_desc_lookup(desc), desc, time, data1, data2, line);
9406f797
SG
4907 IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
4908 IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
be663ab6
WYG
4909 pc, blink1, blink2, ilink1, ilink2, hcmd);
4910}
4911
46bc8d4b 4912static void il4965_rf_kill_ct_config(struct il_priv *il)
be663ab6 4913{
e2ebc833 4914 struct il_ct_kill_config cmd;
be663ab6
WYG
4915 unsigned long flags;
4916 int ret = 0;
4917
46bc8d4b 4918 spin_lock_irqsave(&il->lock, flags);
841b2cca 4919 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
be663ab6 4920 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
46bc8d4b 4921 spin_unlock_irqrestore(&il->lock, flags);
be663ab6
WYG
4922
4923 cmd.critical_temperature_R =
46bc8d4b 4924 cpu_to_le32(il->hw_params.ct_kill_threshold);
be663ab6 4925
46bc8d4b 4926 ret = il_send_cmd_pdu(il, REPLY_CT_KILL_CONFIG_CMD,
be663ab6
WYG
4927 sizeof(cmd), &cmd);
4928 if (ret)
9406f797 4929 IL_ERR("REPLY_CT_KILL_CONFIG_CMD failed\n");
be663ab6 4930 else
58de00a4 4931 D_INFO("REPLY_CT_KILL_CONFIG_CMD "
be663ab6
WYG
4932 "succeeded, "
4933 "critical temperature is %d\n",
46bc8d4b 4934 il->hw_params.ct_kill_threshold);
be663ab6
WYG
4935}
4936
4937static const s8 default_queue_to_tx_fifo[] = {
e2ebc833
SG
4938 IL_TX_FIFO_VO,
4939 IL_TX_FIFO_VI,
4940 IL_TX_FIFO_BE,
4941 IL_TX_FIFO_BK,
d3175167 4942 IL49_CMD_FIFO_NUM,
e2ebc833
SG
4943 IL_TX_FIFO_UNUSED,
4944 IL_TX_FIFO_UNUSED,
be663ab6
WYG
4945};
4946
46bc8d4b 4947static int il4965_alive_notify(struct il_priv *il)
be663ab6
WYG
4948{
4949 u32 a;
4950 unsigned long flags;
4951 int i, chan;
4952 u32 reg_val;
4953
46bc8d4b 4954 spin_lock_irqsave(&il->lock, flags);
be663ab6
WYG
4955
4956 /* Clear 4965's internal Tx Scheduler data base */
db54eb57 4957 il->scd_base_addr = il_rd_prph(il,
d3175167
SG
4958 IL49_SCD_SRAM_BASE_ADDR);
4959 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
4960 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
46bc8d4b 4961 il_write_targ_mem(il, a, 0);
d3175167 4962 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
46bc8d4b
SG
4963 il_write_targ_mem(il, a, 0);
4964 for (; a < il->scd_base_addr +
d3175167 4965 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num); a += 4)
46bc8d4b 4966 il_write_targ_mem(il, a, 0);
be663ab6
WYG
4967
4968 /* Tel 4965 where to find Tx byte count tables */
d3175167 4969 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR,
46bc8d4b 4970 il->scd_bc_tbls.dma >> 10);
be663ab6
WYG
4971
4972 /* Enable DMA channel */
4973 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
0c1a94e2 4974 il_wr(il,
be663ab6
WYG
4975 FH_TCSR_CHNL_TX_CONFIG_REG(chan),
4976 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4977 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
4978
4979 /* Update FH chicken bits */
0c1a94e2
SG
4980 reg_val = il_rd(il, FH_TX_CHICKEN_BITS_REG);
4981 il_wr(il, FH_TX_CHICKEN_BITS_REG,
be663ab6
WYG
4982 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
4983
4984 /* Disable chain mode for all queues */
d3175167 4985 il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
be663ab6
WYG
4986
4987 /* Initialize each Tx queue (including the command queue) */
46bc8d4b 4988 for (i = 0; i < il->hw_params.max_txq_num; i++) {
be663ab6 4989
0c2c8852 4990 /* TFD circular buffer read/write idxes */
d3175167 4991 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
0c1a94e2 4992 il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
be663ab6
WYG
4993
4994 /* Max Tx Window size for Scheduler-ACK mode */
46bc8d4b 4995 il_write_targ_mem(il, il->scd_base_addr +
d3175167 4996 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
be663ab6 4997 (SCD_WIN_SIZE <<
d3175167
SG
4998 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4999 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
be663ab6
WYG
5000
5001 /* Frame limit */
46bc8d4b 5002 il_write_targ_mem(il, il->scd_base_addr +
d3175167 5003 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
be663ab6
WYG
5004 sizeof(u32),
5005 (SCD_FRAME_LIMIT <<
d3175167
SG
5006 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
5007 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
be663ab6
WYG
5008
5009 }
d3175167 5010 il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
46bc8d4b 5011 (1 << il->hw_params.max_txq_num) - 1);
be663ab6
WYG
5012
5013 /* Activate all Tx DMA/FIFO channels */
46bc8d4b 5014 il4965_txq_set_sched(il, IL_MASK(0, 6));
be663ab6 5015
46bc8d4b 5016 il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
be663ab6
WYG
5017
5018 /* make sure all queue are not stopped */
46bc8d4b 5019 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
be663ab6 5020 for (i = 0; i < 4; i++)
46bc8d4b 5021 atomic_set(&il->queue_stop_count[i], 0);
be663ab6
WYG
5022
5023 /* reset to 0 to enable all the queue first */
46bc8d4b 5024 il->txq_ctx_active_msk = 0;
be663ab6
WYG
5025 /* Map each Tx/cmd queue to its corresponding fifo */
5026 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
5027
5028 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
5029 int ac = default_queue_to_tx_fifo[i];
5030
46bc8d4b 5031 il_txq_ctx_activate(il, i);
be663ab6 5032
e2ebc833 5033 if (ac == IL_TX_FIFO_UNUSED)
be663ab6
WYG
5034 continue;
5035
46bc8d4b 5036 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
be663ab6
WYG
5037 }
5038
46bc8d4b 5039 spin_unlock_irqrestore(&il->lock, flags);
be663ab6
WYG
5040
5041 return 0;
5042}
5043
5044/**
e2ebc833 5045 * il4965_alive_start - called after REPLY_ALIVE notification received
be663ab6 5046 * from protocol/runtime uCode (initialization uCode's
e2ebc833 5047 * Alive gets handled by il_init_alive_start()).
be663ab6 5048 */
46bc8d4b 5049static void il4965_alive_start(struct il_priv *il)
be663ab6
WYG
5050{
5051 int ret = 0;
7c2cde2e 5052 struct il_rxon_context *ctx = &il->ctx;
be663ab6 5053
58de00a4 5054 D_INFO("Runtime Alive received.\n");
be663ab6 5055
46bc8d4b 5056 if (il->card_alive.is_valid != UCODE_VALID_OK) {
be663ab6
WYG
5057 /* We had an error bringing up the hardware, so take it
5058 * all the way back down so we can try again */
58de00a4 5059 D_INFO("Alive failed.\n");
be663ab6
WYG
5060 goto restart;
5061 }
5062
5063 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5064 * This is a paranoid check, because we would not have gotten the
5065 * "runtime" alive if code weren't properly loaded. */
46bc8d4b 5066 if (il4965_verify_ucode(il)) {
be663ab6
WYG
5067 /* Runtime instruction load was bad;
5068 * take it all the way back down so we can try again */
58de00a4 5069 D_INFO("Bad runtime uCode load.\n");
be663ab6
WYG
5070 goto restart;
5071 }
5072
46bc8d4b 5073 ret = il4965_alive_notify(il);
be663ab6 5074 if (ret) {
9406f797 5075 IL_WARN(
be663ab6
WYG
5076 "Could not complete ALIVE transition [ntf]: %d\n", ret);
5077 goto restart;
5078 }
5079
5080
5081 /* After the ALIVE response, we can send host commands to the uCode */
a6766ccd 5082 set_bit(S_ALIVE, &il->status);
be663ab6
WYG
5083
5084 /* Enable watchdog to monitor the driver tx queues */
46bc8d4b 5085 il_setup_watchdog(il);
be663ab6 5086
46bc8d4b 5087 if (il_is_rfkill(il))
be663ab6
WYG
5088 return;
5089
46bc8d4b 5090 ieee80211_wake_queues(il->hw);
be663ab6 5091
2eb05816 5092 il->active_rate = RATES_MASK;
be663ab6 5093
e2ebc833
SG
5094 if (il_is_associated_ctx(ctx)) {
5095 struct il_rxon_cmd *active_rxon =
5096 (struct il_rxon_cmd *)&ctx->active;
be663ab6
WYG
5097 /* apply any changes in staging */
5098 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
5099 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5100 } else {
be663ab6 5101 /* Initialize our rx_config data */
17d6e557 5102 il_connection_init_rx_config(il, &il->ctx);
be663ab6 5103
46bc8d4b
SG
5104 if (il->cfg->ops->hcmd->set_rxon_chain)
5105 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
be663ab6
WYG
5106 }
5107
5108 /* Configure bluetooth coexistence if enabled */
46bc8d4b 5109 il_send_bt_config(il);
be663ab6 5110
46bc8d4b 5111 il4965_reset_run_time_calib(il);
be663ab6 5112
a6766ccd 5113 set_bit(S_READY, &il->status);
be663ab6
WYG
5114
5115 /* Configure the adapter for unassociated operation */
46bc8d4b 5116 il_commit_rxon(il, ctx);
be663ab6
WYG
5117
5118 /* At this point, the NIC is initialized and operational */
46bc8d4b 5119 il4965_rf_kill_ct_config(il);
be663ab6 5120
58de00a4 5121 D_INFO("ALIVE processing complete.\n");
46bc8d4b 5122 wake_up(&il->wait_command_queue);
be663ab6 5123
46bc8d4b 5124 il_power_update_mode(il, true);
58de00a4 5125 D_INFO("Updated power mode\n");
be663ab6
WYG
5126
5127 return;
5128
5129 restart:
46bc8d4b 5130 queue_work(il->workqueue, &il->restart);
be663ab6
WYG
5131}
5132
46bc8d4b 5133static void il4965_cancel_deferred_work(struct il_priv *il);
be663ab6 5134
46bc8d4b 5135static void __il4965_down(struct il_priv *il)
be663ab6
WYG
5136{
5137 unsigned long flags;
ab42b404 5138 int exit_pending;
be663ab6 5139
58de00a4 5140 D_INFO(DRV_NAME " is going down\n");
be663ab6 5141
46bc8d4b 5142 il_scan_cancel_timeout(il, 200);
be663ab6 5143
a6766ccd 5144 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
be663ab6 5145
a6766ccd 5146 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
be663ab6 5147 * to prevent rearm timer */
46bc8d4b 5148 del_timer_sync(&il->watchdog);
be663ab6 5149
46bc8d4b
SG
5150 il_clear_ucode_stations(il, NULL);
5151 il_dealloc_bcast_stations(il);
5152 il_clear_driver_stations(il);
be663ab6
WYG
5153
5154 /* Unblock any waiting calls */
46bc8d4b 5155 wake_up_all(&il->wait_command_queue);
be663ab6
WYG
5156
5157 /* Wipe out the EXIT_PENDING status bit if we are not actually
5158 * exiting the module */
5159 if (!exit_pending)
a6766ccd 5160 clear_bit(S_EXIT_PENDING, &il->status);
be663ab6
WYG
5161
5162 /* stop and reset the on-board processor */
841b2cca 5163 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
be663ab6
WYG
5164
5165 /* tell the device to stop sending interrupts */
46bc8d4b
SG
5166 spin_lock_irqsave(&il->lock, flags);
5167 il_disable_interrupts(il);
5168 spin_unlock_irqrestore(&il->lock, flags);
5169 il4965_synchronize_irq(il);
be663ab6 5170
46bc8d4b
SG
5171 if (il->mac80211_registered)
5172 ieee80211_stop_queues(il->hw);
be663ab6 5173
e2ebc833 5174 /* If we have not previously called il_init() then
be663ab6 5175 * clear all bits but the RF Kill bit and return */
46bc8d4b 5176 if (!il_is_init(il)) {
a6766ccd
SG
5177 il->status = test_bit(S_RF_KILL_HW, &il->status) <<
5178 S_RF_KILL_HW |
5179 test_bit(S_GEO_CONFIGURED, &il->status) <<
5180 S_GEO_CONFIGURED |
5181 test_bit(S_EXIT_PENDING, &il->status) <<
5182 S_EXIT_PENDING;
be663ab6
WYG
5183 goto exit;
5184 }
5185
5186 /* ...otherwise clear out all the status bits but the RF Kill
5187 * bit and continue taking the NIC down. */
a6766ccd
SG
5188 il->status &= test_bit(S_RF_KILL_HW, &il->status) <<
5189 S_RF_KILL_HW |
5190 test_bit(S_GEO_CONFIGURED, &il->status) <<
5191 S_GEO_CONFIGURED |
5192 test_bit(S_FW_ERROR, &il->status) <<
5193 S_FW_ERROR |
5194 test_bit(S_EXIT_PENDING, &il->status) <<
5195 S_EXIT_PENDING;
be663ab6 5196
46bc8d4b
SG
5197 il4965_txq_ctx_stop(il);
5198 il4965_rxq_stop(il);
be663ab6
WYG
5199
5200 /* Power-down device's busmaster DMA clocks */
db54eb57 5201 il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
be663ab6
WYG
5202 udelay(5);
5203
5204 /* Make sure (redundant) we've released our request to stay awake */
46bc8d4b 5205 il_clear_bit(il, CSR_GP_CNTRL,
be663ab6
WYG
5206 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5207
5208 /* Stop the device, and put it in low power state */
46bc8d4b 5209 il_apm_stop(il);
be663ab6
WYG
5210
5211 exit:
46bc8d4b 5212 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
be663ab6 5213
46bc8d4b
SG
5214 dev_kfree_skb(il->beacon_skb);
5215 il->beacon_skb = NULL;
be663ab6
WYG
5216
5217 /* clear out any free frames */
46bc8d4b 5218 il4965_clear_free_frames(il);
be663ab6
WYG
5219}
5220
46bc8d4b 5221static void il4965_down(struct il_priv *il)
be663ab6 5222{
46bc8d4b
SG
5223 mutex_lock(&il->mutex);
5224 __il4965_down(il);
5225 mutex_unlock(&il->mutex);
be663ab6 5226
46bc8d4b 5227 il4965_cancel_deferred_work(il);
be663ab6
WYG
5228}
5229
5230#define HW_READY_TIMEOUT (50)
5231
46bc8d4b 5232static int il4965_set_hw_ready(struct il_priv *il)
be663ab6
WYG
5233{
5234 int ret = 0;
5235
46bc8d4b 5236 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
be663ab6
WYG
5237 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
5238
5239 /* See if we got it */
142b343f 5240 ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
be663ab6
WYG
5241 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5242 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5243 HW_READY_TIMEOUT);
5244 if (ret != -ETIMEDOUT)
46bc8d4b 5245 il->hw_ready = true;
be663ab6 5246 else
46bc8d4b 5247 il->hw_ready = false;
be663ab6 5248
58de00a4 5249 D_INFO("hardware %s\n",
46bc8d4b 5250 (il->hw_ready == 1) ? "ready" : "not ready");
be663ab6
WYG
5251 return ret;
5252}
5253
46bc8d4b 5254static int il4965_prepare_card_hw(struct il_priv *il)
be663ab6
WYG
5255{
5256 int ret = 0;
5257
58de00a4 5258 D_INFO("il4965_prepare_card_hw enter\n");
be663ab6 5259
46bc8d4b
SG
5260 ret = il4965_set_hw_ready(il);
5261 if (il->hw_ready)
be663ab6
WYG
5262 return ret;
5263
5264 /* If HW is not ready, prepare the conditions to check again */
46bc8d4b 5265 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
be663ab6
WYG
5266 CSR_HW_IF_CONFIG_REG_PREPARE);
5267
142b343f 5268 ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
be663ab6
WYG
5269 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
5270 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
5271
5272 /* HW should be ready by now, check again. */
5273 if (ret != -ETIMEDOUT)
46bc8d4b 5274 il4965_set_hw_ready(il);
be663ab6
WYG
5275
5276 return ret;
5277}
5278
5279#define MAX_HW_RESTARTS 5
5280
46bc8d4b 5281static int __il4965_up(struct il_priv *il)
be663ab6 5282{
be663ab6
WYG
5283 int i;
5284 int ret;
5285
a6766ccd 5286 if (test_bit(S_EXIT_PENDING, &il->status)) {
9406f797 5287 IL_WARN("Exit pending; will not bring the NIC up\n");
be663ab6
WYG
5288 return -EIO;
5289 }
5290
46bc8d4b 5291 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
9406f797 5292 IL_ERR("ucode not available for device bringup\n");
be663ab6
WYG
5293 return -EIO;
5294 }
5295
17d6e557
SG
5296 ret = il4965_alloc_bcast_station(il, &il->ctx);
5297 if (ret) {
5298 il_dealloc_bcast_stations(il);
5299 return ret;
be663ab6
WYG
5300 }
5301
46bc8d4b 5302 il4965_prepare_card_hw(il);
be663ab6 5303
46bc8d4b 5304 if (!il->hw_ready) {
9406f797 5305 IL_WARN("Exit HW not ready\n");
be663ab6
WYG
5306 return -EIO;
5307 }
5308
5309 /* If platform's RF_KILL switch is NOT set to KILL */
841b2cca 5310 if (_il_rd(il,
be663ab6 5311 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
a6766ccd 5312 clear_bit(S_RF_KILL_HW, &il->status);
be663ab6 5313 else
a6766ccd 5314 set_bit(S_RF_KILL_HW, &il->status);
be663ab6 5315
46bc8d4b
SG
5316 if (il_is_rfkill(il)) {
5317 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
be663ab6 5318
46bc8d4b 5319 il_enable_interrupts(il);
9406f797 5320 IL_WARN("Radio disabled by HW RF Kill switch\n");
be663ab6
WYG
5321 return 0;
5322 }
5323
841b2cca 5324 _il_wr(il, CSR_INT, 0xFFFFFFFF);
be663ab6 5325
e2ebc833 5326 /* must be initialised before il_hw_nic_init */
46bc8d4b 5327 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
be663ab6 5328
46bc8d4b 5329 ret = il4965_hw_nic_init(il);
be663ab6 5330 if (ret) {
9406f797 5331 IL_ERR("Unable to init nic\n");
be663ab6
WYG
5332 return ret;
5333 }
5334
5335 /* make sure rfkill handshake bits are cleared */
841b2cca
SG
5336 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5337 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
be663ab6
WYG
5338 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5339
5340 /* clear (again), then enable host interrupts */
841b2cca 5341 _il_wr(il, CSR_INT, 0xFFFFFFFF);
46bc8d4b 5342 il_enable_interrupts(il);
be663ab6
WYG
5343
5344 /* really make sure rfkill handshake bits are cleared */
841b2cca
SG
5345 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5346 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
be663ab6
WYG
5347
5348 /* Copy original ucode data image from disk into backup cache.
5349 * This will be used to initialize the on-board processor's
5350 * data SRAM for a clean start when the runtime program first loads. */
46bc8d4b
SG
5351 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
5352 il->ucode_data.len);
be663ab6
WYG
5353
5354 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5355
5356 /* load bootstrap state machine,
5357 * load bootstrap program into processor's memory,
5358 * prepare to load the "initialize" uCode */
46bc8d4b 5359 ret = il->cfg->ops->lib->load_ucode(il);
be663ab6
WYG
5360
5361 if (ret) {
9406f797 5362 IL_ERR("Unable to set up bootstrap uCode: %d\n",
be663ab6
WYG
5363 ret);
5364 continue;
5365 }
5366
5367 /* start card; "initialize" will load runtime ucode */
46bc8d4b 5368 il4965_nic_start(il);
be663ab6 5369
58de00a4 5370 D_INFO(DRV_NAME " is coming up\n");
be663ab6
WYG
5371
5372 return 0;
5373 }
5374
a6766ccd 5375 set_bit(S_EXIT_PENDING, &il->status);
46bc8d4b 5376 __il4965_down(il);
a6766ccd 5377 clear_bit(S_EXIT_PENDING, &il->status);
be663ab6
WYG
5378
5379 /* tried to restart and config the device for as long as our
5380 * patience could withstand */
9406f797 5381 IL_ERR("Unable to initialize device after %d attempts.\n", i);
be663ab6
WYG
5382 return -EIO;
5383}
5384
5385
5386/*****************************************************************************
5387 *
5388 * Workqueue callbacks
5389 *
5390 *****************************************************************************/
5391
e2ebc833 5392static void il4965_bg_init_alive_start(struct work_struct *data)
be663ab6 5393{
46bc8d4b 5394 struct il_priv *il =
e2ebc833 5395 container_of(data, struct il_priv, init_alive_start.work);
be663ab6 5396
46bc8d4b 5397 mutex_lock(&il->mutex);
a6766ccd 5398 if (test_bit(S_EXIT_PENDING, &il->status))
28a6e577 5399 goto out;
be663ab6 5400
46bc8d4b 5401 il->cfg->ops->lib->init_alive_start(il);
28a6e577 5402out:
46bc8d4b 5403 mutex_unlock(&il->mutex);
be663ab6
WYG
5404}
5405
e2ebc833 5406static void il4965_bg_alive_start(struct work_struct *data)
be663ab6 5407{
46bc8d4b 5408 struct il_priv *il =
e2ebc833 5409 container_of(data, struct il_priv, alive_start.work);
be663ab6 5410
46bc8d4b 5411 mutex_lock(&il->mutex);
a6766ccd 5412 if (test_bit(S_EXIT_PENDING, &il->status))
28a6e577 5413 goto out;
be663ab6 5414
46bc8d4b 5415 il4965_alive_start(il);
28a6e577 5416out:
46bc8d4b 5417 mutex_unlock(&il->mutex);
be663ab6
WYG
5418}
5419
e2ebc833 5420static void il4965_bg_run_time_calib_work(struct work_struct *work)
be663ab6 5421{
46bc8d4b 5422 struct il_priv *il = container_of(work, struct il_priv,
be663ab6
WYG
5423 run_time_calib_work);
5424
46bc8d4b 5425 mutex_lock(&il->mutex);
be663ab6 5426
a6766ccd
SG
5427 if (test_bit(S_EXIT_PENDING, &il->status) ||
5428 test_bit(S_SCANNING, &il->status)) {
46bc8d4b 5429 mutex_unlock(&il->mutex);
be663ab6
WYG
5430 return;
5431 }
5432
46bc8d4b
SG
5433 if (il->start_calib) {
5434 il4965_chain_noise_calibration(il,
ebf0d90d 5435 (void *)&il->_4965.stats);
46bc8d4b 5436 il4965_sensitivity_calibration(il,
ebf0d90d 5437 (void *)&il->_4965.stats);
be663ab6
WYG
5438 }
5439
46bc8d4b 5440 mutex_unlock(&il->mutex);
be663ab6
WYG
5441}
5442
e2ebc833 5443static void il4965_bg_restart(struct work_struct *data)
be663ab6 5444{
46bc8d4b 5445 struct il_priv *il = container_of(data, struct il_priv, restart);
be663ab6 5446
a6766ccd 5447 if (test_bit(S_EXIT_PENDING, &il->status))
be663ab6
WYG
5448 return;
5449
a6766ccd 5450 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
46bc8d4b 5451 mutex_lock(&il->mutex);
17d6e557 5452 il->ctx.vif = NULL;
46bc8d4b 5453 il->is_open = 0;
be663ab6 5454
46bc8d4b 5455 __il4965_down(il);
be663ab6 5456
46bc8d4b
SG
5457 mutex_unlock(&il->mutex);
5458 il4965_cancel_deferred_work(il);
5459 ieee80211_restart_hw(il->hw);
be663ab6 5460 } else {
46bc8d4b 5461 il4965_down(il);
be663ab6 5462
46bc8d4b 5463 mutex_lock(&il->mutex);
a6766ccd 5464 if (test_bit(S_EXIT_PENDING, &il->status)) {
46bc8d4b 5465 mutex_unlock(&il->mutex);
be663ab6 5466 return;
28a6e577 5467 }
be663ab6 5468
46bc8d4b
SG
5469 __il4965_up(il);
5470 mutex_unlock(&il->mutex);
be663ab6
WYG
5471 }
5472}
5473
e2ebc833 5474static void il4965_bg_rx_replenish(struct work_struct *data)
be663ab6 5475{
46bc8d4b 5476 struct il_priv *il =
e2ebc833 5477 container_of(data, struct il_priv, rx_replenish);
be663ab6 5478
a6766ccd 5479 if (test_bit(S_EXIT_PENDING, &il->status))
be663ab6
WYG
5480 return;
5481
46bc8d4b
SG
5482 mutex_lock(&il->mutex);
5483 il4965_rx_replenish(il);
5484 mutex_unlock(&il->mutex);
be663ab6
WYG
5485}
5486
5487/*****************************************************************************
5488 *
5489 * mac80211 entry point functions
5490 *
5491 *****************************************************************************/
5492
5493#define UCODE_READY_TIMEOUT (4 * HZ)
5494
5495/*
5496 * Not a mac80211 entry point function, but it fits in with all the
5497 * other mac80211 functions grouped here.
5498 */
46bc8d4b 5499static int il4965_mac_setup_register(struct il_priv *il,
be663ab6
WYG
5500 u32 max_probe_length)
5501{
5502 int ret;
46bc8d4b 5503 struct ieee80211_hw *hw = il->hw;
be663ab6
WYG
5504
5505 hw->rate_control_algorithm = "iwl-4965-rs";
5506
5507 /* Tell mac80211 our characteristics */
5508 hw->flags = IEEE80211_HW_SIGNAL_DBM |
5509 IEEE80211_HW_AMPDU_AGGREGATION |
5510 IEEE80211_HW_NEED_DTIM_PERIOD |
5511 IEEE80211_HW_SPECTRUM_MGMT |
5512 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
5513
46bc8d4b 5514 if (il->cfg->sku & IL_SKU_N)
be663ab6
WYG
5515 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
5516 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
5517
e2ebc833
SG
5518 hw->sta_data_size = sizeof(struct il_station_priv);
5519 hw->vif_data_size = sizeof(struct il_vif_priv);
be663ab6 5520
17d6e557
SG
5521 hw->wiphy->interface_modes |= il->ctx.interface_modes;
5522 hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
be663ab6
WYG
5523
5524 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
5525 WIPHY_FLAG_DISABLE_BEACON_HINTS;
5526
5527 /*
5528 * For now, disable PS by default because it affects
5529 * RX performance significantly.
5530 */
5531 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
5532
5533 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
5534 /* we create the 802.11 header and a zero-length SSID element */
5535 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
5536
5537 /* Default value; 4 EDCA QOS priorities */
5538 hw->queues = 4;
5539
e2ebc833 5540 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
be663ab6 5541
46bc8d4b
SG
5542 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
5543 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5544 &il->bands[IEEE80211_BAND_2GHZ];
5545 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
5546 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5547 &il->bands[IEEE80211_BAND_5GHZ];
be663ab6 5548
46bc8d4b 5549 il_leds_init(il);
be663ab6 5550
46bc8d4b 5551 ret = ieee80211_register_hw(il->hw);
be663ab6 5552 if (ret) {
9406f797 5553 IL_ERR("Failed to register hw (error %d)\n", ret);
be663ab6
WYG
5554 return ret;
5555 }
46bc8d4b 5556 il->mac80211_registered = 1;
be663ab6
WYG
5557
5558 return 0;
5559}
5560
5561
e2ebc833 5562int il4965_mac_start(struct ieee80211_hw *hw)
be663ab6 5563{
46bc8d4b 5564 struct il_priv *il = hw->priv;
be663ab6
WYG
5565 int ret;
5566
58de00a4 5567 D_MAC80211("enter\n");
be663ab6
WYG
5568
5569 /* we should be verifying the device is ready to be opened */
46bc8d4b
SG
5570 mutex_lock(&il->mutex);
5571 ret = __il4965_up(il);
5572 mutex_unlock(&il->mutex);
be663ab6
WYG
5573
5574 if (ret)
5575 return ret;
5576
46bc8d4b 5577 if (il_is_rfkill(il))
be663ab6
WYG
5578 goto out;
5579
58de00a4 5580 D_INFO("Start UP work done.\n");
be663ab6
WYG
5581
5582 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
5583 * mac80211 will not be run successfully. */
46bc8d4b 5584 ret = wait_event_timeout(il->wait_command_queue,
a6766ccd 5585 test_bit(S_READY, &il->status),
be663ab6
WYG
5586 UCODE_READY_TIMEOUT);
5587 if (!ret) {
a6766ccd 5588 if (!test_bit(S_READY, &il->status)) {
9406f797 5589 IL_ERR("START_ALIVE timeout after %dms.\n",
be663ab6
WYG
5590 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5591 return -ETIMEDOUT;
5592 }
5593 }
5594
46bc8d4b 5595 il4965_led_enable(il);
be663ab6
WYG
5596
5597out:
46bc8d4b 5598 il->is_open = 1;
58de00a4 5599 D_MAC80211("leave\n");
be663ab6
WYG
5600 return 0;
5601}
5602
e2ebc833 5603void il4965_mac_stop(struct ieee80211_hw *hw)
be663ab6 5604{
46bc8d4b 5605 struct il_priv *il = hw->priv;
be663ab6 5606
58de00a4 5607 D_MAC80211("enter\n");
be663ab6 5608
46bc8d4b 5609 if (!il->is_open)
be663ab6
WYG
5610 return;
5611
46bc8d4b 5612 il->is_open = 0;
be663ab6 5613
46bc8d4b 5614 il4965_down(il);
be663ab6 5615
46bc8d4b 5616 flush_workqueue(il->workqueue);
be663ab6 5617
a078a1fd
SG
5618 /* User space software may expect getting rfkill changes
5619 * even if interface is down */
841b2cca 5620 _il_wr(il, CSR_INT, 0xFFFFFFFF);
46bc8d4b 5621 il_enable_rfkill_int(il);
be663ab6 5622
58de00a4 5623 D_MAC80211("leave\n");
be663ab6
WYG
5624}
5625
e2ebc833 5626void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
be663ab6 5627{
46bc8d4b 5628 struct il_priv *il = hw->priv;
be663ab6 5629
58de00a4 5630 D_MACDUMP("enter\n");
be663ab6 5631
58de00a4 5632 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
be663ab6
WYG
5633 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5634
46bc8d4b 5635 if (il4965_tx_skb(il, skb))
be663ab6
WYG
5636 dev_kfree_skb_any(skb);
5637
58de00a4 5638 D_MACDUMP("leave\n");
be663ab6
WYG
5639}
5640
e2ebc833 5641void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
be663ab6
WYG
5642 struct ieee80211_vif *vif,
5643 struct ieee80211_key_conf *keyconf,
5644 struct ieee80211_sta *sta,
5645 u32 iv32, u16 *phase1key)
5646{
46bc8d4b 5647 struct il_priv *il = hw->priv;
e2ebc833 5648 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
be663ab6 5649
58de00a4 5650 D_MAC80211("enter\n");
be663ab6 5651
46bc8d4b 5652 il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta,
be663ab6
WYG
5653 iv32, phase1key);
5654
58de00a4 5655 D_MAC80211("leave\n");
be663ab6
WYG
5656}
5657
e2ebc833 5658int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
be663ab6
WYG
5659 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5660 struct ieee80211_key_conf *key)
5661{
46bc8d4b 5662 struct il_priv *il = hw->priv;
e2ebc833
SG
5663 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5664 struct il_rxon_context *ctx = vif_priv->ctx;
be663ab6
WYG
5665 int ret;
5666 u8 sta_id;
5667 bool is_default_wep_key = false;
5668
58de00a4 5669 D_MAC80211("enter\n");
be663ab6 5670
46bc8d4b 5671 if (il->cfg->mod_params->sw_crypto) {
58de00a4 5672 D_MAC80211("leave - hwcrypto disabled\n");
be663ab6
WYG
5673 return -EOPNOTSUPP;
5674 }
5675
46bc8d4b 5676 sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
e2ebc833 5677 if (sta_id == IL_INVALID_STATION)
be663ab6
WYG
5678 return -EINVAL;
5679
46bc8d4b
SG
5680 mutex_lock(&il->mutex);
5681 il_scan_cancel_timeout(il, 100);
be663ab6
WYG
5682
5683 /*
5684 * If we are getting WEP group key and we didn't receive any key mapping
5685 * so far, we are in legacy wep mode (group key only), otherwise we are
5686 * in 1X mode.
5687 * In legacy wep mode, we use another host command to the uCode.
5688 */
5689 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5690 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
5691 !sta) {
5692 if (cmd == SET_KEY)
5693 is_default_wep_key = !ctx->key_mapping_keys;
5694 else
5695 is_default_wep_key =
5696 (key->hw_key_idx == HW_KEY_DEFAULT);
5697 }
5698
5699 switch (cmd) {
5700 case SET_KEY:
5701 if (is_default_wep_key)
46bc8d4b 5702 ret = il4965_set_default_wep_key(il,
be663ab6
WYG
5703 vif_priv->ctx, key);
5704 else
46bc8d4b 5705 ret = il4965_set_dynamic_key(il, vif_priv->ctx,
be663ab6
WYG
5706 key, sta_id);
5707
58de00a4 5708 D_MAC80211("enable hwcrypto key\n");
be663ab6
WYG
5709 break;
5710 case DISABLE_KEY:
5711 if (is_default_wep_key)
46bc8d4b 5712 ret = il4965_remove_default_wep_key(il, ctx, key);
be663ab6 5713 else
46bc8d4b 5714 ret = il4965_remove_dynamic_key(il, ctx,
be663ab6
WYG
5715 key, sta_id);
5716
58de00a4 5717 D_MAC80211("disable hwcrypto key\n");
be663ab6
WYG
5718 break;
5719 default:
5720 ret = -EINVAL;
5721 }
5722
46bc8d4b 5723 mutex_unlock(&il->mutex);
58de00a4 5724 D_MAC80211("leave\n");
be663ab6
WYG
5725
5726 return ret;
5727}
5728
e2ebc833 5729int il4965_mac_ampdu_action(struct ieee80211_hw *hw,
be663ab6
WYG
5730 struct ieee80211_vif *vif,
5731 enum ieee80211_ampdu_mlme_action action,
5732 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5733 u8 buf_size)
5734{
46bc8d4b 5735 struct il_priv *il = hw->priv;
be663ab6
WYG
5736 int ret = -EINVAL;
5737
58de00a4 5738 D_HT("A-MPDU action on addr %pM tid %d\n",
be663ab6
WYG
5739 sta->addr, tid);
5740
46bc8d4b 5741 if (!(il->cfg->sku & IL_SKU_N))
be663ab6
WYG
5742 return -EACCES;
5743
46bc8d4b 5744 mutex_lock(&il->mutex);
be663ab6
WYG
5745
5746 switch (action) {
5747 case IEEE80211_AMPDU_RX_START:
58de00a4 5748 D_HT("start Rx\n");
46bc8d4b 5749 ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
be663ab6
WYG
5750 break;
5751 case IEEE80211_AMPDU_RX_STOP:
58de00a4 5752 D_HT("stop Rx\n");
46bc8d4b 5753 ret = il4965_sta_rx_agg_stop(il, sta, tid);
a6766ccd 5754 if (test_bit(S_EXIT_PENDING, &il->status))
be663ab6
WYG
5755 ret = 0;
5756 break;
5757 case IEEE80211_AMPDU_TX_START:
58de00a4 5758 D_HT("start Tx\n");
46bc8d4b 5759 ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
be663ab6
WYG
5760 break;
5761 case IEEE80211_AMPDU_TX_STOP:
58de00a4 5762 D_HT("stop Tx\n");
46bc8d4b 5763 ret = il4965_tx_agg_stop(il, vif, sta, tid);
a6766ccd 5764 if (test_bit(S_EXIT_PENDING, &il->status))
be663ab6
WYG
5765 ret = 0;
5766 break;
5767 case IEEE80211_AMPDU_TX_OPERATIONAL:
5768 ret = 0;
5769 break;
5770 }
46bc8d4b 5771 mutex_unlock(&il->mutex);
be663ab6
WYG
5772
5773 return ret;
5774}
5775
e2ebc833 5776int il4965_mac_sta_add(struct ieee80211_hw *hw,
be663ab6
WYG
5777 struct ieee80211_vif *vif,
5778 struct ieee80211_sta *sta)
5779{
46bc8d4b 5780 struct il_priv *il = hw->priv;
e2ebc833
SG
5781 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
5782 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
be663ab6
WYG
5783 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
5784 int ret;
5785 u8 sta_id;
5786
58de00a4 5787 D_INFO("received request to add station %pM\n",
be663ab6 5788 sta->addr);
46bc8d4b 5789 mutex_lock(&il->mutex);
58de00a4 5790 D_INFO("proceeding to add station %pM\n",
be663ab6 5791 sta->addr);
e2ebc833 5792 sta_priv->common.sta_id = IL_INVALID_STATION;
be663ab6
WYG
5793
5794 atomic_set(&sta_priv->pending_frames, 0);
5795
46bc8d4b 5796 ret = il_add_station_common(il, vif_priv->ctx, sta->addr,
be663ab6
WYG
5797 is_ap, sta, &sta_id);
5798 if (ret) {
9406f797 5799 IL_ERR("Unable to add station %pM (%d)\n",
be663ab6
WYG
5800 sta->addr, ret);
5801 /* Should we return success if return code is EEXIST ? */
46bc8d4b 5802 mutex_unlock(&il->mutex);
be663ab6
WYG
5803 return ret;
5804 }
5805
5806 sta_priv->common.sta_id = sta_id;
5807
5808 /* Initialize rate scaling */
58de00a4 5809 D_INFO("Initializing rate scaling for station %pM\n",
be663ab6 5810 sta->addr);
46bc8d4b
SG
5811 il4965_rs_rate_init(il, sta, sta_id);
5812 mutex_unlock(&il->mutex);
be663ab6
WYG
5813
5814 return 0;
5815}
5816
e2ebc833 5817void il4965_mac_channel_switch(struct ieee80211_hw *hw,
be663ab6
WYG
5818 struct ieee80211_channel_switch *ch_switch)
5819{
46bc8d4b 5820 struct il_priv *il = hw->priv;
e2ebc833 5821 const struct il_channel_info *ch_info;
be663ab6
WYG
5822 struct ieee80211_conf *conf = &hw->conf;
5823 struct ieee80211_channel *channel = ch_switch->channel;
46bc8d4b 5824 struct il_ht_config *ht_conf = &il->current_ht_config;
be663ab6 5825
7c2cde2e 5826 struct il_rxon_context *ctx = &il->ctx;
be663ab6 5827 u16 ch;
be663ab6 5828
58de00a4 5829 D_MAC80211("enter\n");
be663ab6 5830
46bc8d4b 5831 mutex_lock(&il->mutex);
28a6e577 5832
46bc8d4b 5833 if (il_is_rfkill(il))
28a6e577 5834 goto out;
be663ab6 5835
a6766ccd
SG
5836 if (test_bit(S_EXIT_PENDING, &il->status) ||
5837 test_bit(S_SCANNING, &il->status) ||
5838 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
28a6e577 5839 goto out;
be663ab6 5840
e2ebc833 5841 if (!il_is_associated_ctx(ctx))
28a6e577 5842 goto out;
be663ab6 5843
46bc8d4b 5844 if (!il->cfg->ops->lib->set_channel_switch)
7f1f9742 5845 goto out;
be663ab6 5846
7f1f9742
SG
5847 ch = channel->hw_value;
5848 if (le16_to_cpu(ctx->active.channel) == ch)
5849 goto out;
5850
46bc8d4b 5851 ch_info = il_get_channel_info(il, channel->band, ch);
e2ebc833 5852 if (!il_is_channel_valid(ch_info)) {
58de00a4 5853 D_MAC80211("invalid channel\n");
7f1f9742
SG
5854 goto out;
5855 }
5856
46bc8d4b 5857 spin_lock_irq(&il->lock);
7f1f9742 5858
46bc8d4b 5859 il->current_ht_config.smps = conf->smps_mode;
7f1f9742
SG
5860
5861 /* Configure HT40 channels */
5862 ctx->ht.enabled = conf_is_ht(conf);
5863 if (ctx->ht.enabled) {
5864 if (conf_is_ht40_minus(conf)) {
5865 ctx->ht.extension_chan_offset =
5866 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5867 ctx->ht.is_40mhz = true;
5868 } else if (conf_is_ht40_plus(conf)) {
5869 ctx->ht.extension_chan_offset =
5870 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5871 ctx->ht.is_40mhz = true;
5872 } else {
5873 ctx->ht.extension_chan_offset =
5874 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5875 ctx->ht.is_40mhz = false;
be663ab6 5876 }
7f1f9742
SG
5877 } else
5878 ctx->ht.is_40mhz = false;
5879
5880 if ((le16_to_cpu(ctx->staging.channel) != ch))
5881 ctx->staging.flags = 0;
5882
46bc8d4b
SG
5883 il_set_rxon_channel(il, channel, ctx);
5884 il_set_rxon_ht(il, ht_conf);
5885 il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
7f1f9742 5886
46bc8d4b 5887 spin_unlock_irq(&il->lock);
7f1f9742 5888
46bc8d4b 5889 il_set_rate(il);
7f1f9742
SG
5890 /*
5891 * at this point, staging_rxon has the
5892 * configuration for channel switch
5893 */
a6766ccd 5894 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
46bc8d4b
SG
5895 il->switch_channel = cpu_to_le16(ch);
5896 if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
a6766ccd 5897 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
46bc8d4b 5898 il->switch_channel = 0;
7f1f9742 5899 ieee80211_chswitch_done(ctx->vif, false);
be663ab6 5900 }
7f1f9742 5901
be663ab6 5902out:
46bc8d4b 5903 mutex_unlock(&il->mutex);
58de00a4 5904 D_MAC80211("leave\n");
be663ab6
WYG
5905}
5906
e2ebc833 5907void il4965_configure_filter(struct ieee80211_hw *hw,
be663ab6
WYG
5908 unsigned int changed_flags,
5909 unsigned int *total_flags,
5910 u64 multicast)
5911{
46bc8d4b 5912 struct il_priv *il = hw->priv;
be663ab6 5913 __le32 filter_or = 0, filter_nand = 0;
be663ab6
WYG
5914
5915#define CHK(test, flag) do { \
5916 if (*total_flags & (test)) \
5917 filter_or |= (flag); \
5918 else \
5919 filter_nand |= (flag); \
5920 } while (0)
5921
58de00a4 5922 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n",
be663ab6
WYG
5923 changed_flags, *total_flags);
5924
5925 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
5926 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
5927 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
5928 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
5929
5930#undef CHK
5931
46bc8d4b 5932 mutex_lock(&il->mutex);
be663ab6 5933
17d6e557
SG
5934 il->ctx.staging.filter_flags &= ~filter_nand;
5935 il->ctx.staging.filter_flags |= filter_or;
be663ab6 5936
17d6e557
SG
5937 /*
5938 * Not committing directly because hardware can perform a scan,
5939 * but we'll eventually commit the filter flags change anyway.
5940 */
be663ab6 5941
46bc8d4b 5942 mutex_unlock(&il->mutex);
be663ab6
WYG
5943
5944 /*
5945 * Receiving all multicast frames is always enabled by the
e2ebc833 5946 * default flags setup in il_connection_init_rx_config()
be663ab6
WYG
5947 * since we currently do not support programming multicast
5948 * filters into the device.
5949 */
5950 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
5951 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
5952}
5953
5954/*****************************************************************************
5955 *
5956 * driver setup and teardown
5957 *
5958 *****************************************************************************/
5959
e2ebc833 5960static void il4965_bg_txpower_work(struct work_struct *work)
be663ab6 5961{
46bc8d4b 5962 struct il_priv *il = container_of(work, struct il_priv,
be663ab6
WYG
5963 txpower_work);
5964
46bc8d4b 5965 mutex_lock(&il->mutex);
f325757a 5966
be663ab6 5967 /* If a scan happened to start before we got here
ebf0d90d 5968 * then just return; the stats notification will
be663ab6
WYG
5969 * kick off another scheduled work to compensate for
5970 * any temperature delta we missed here. */
a6766ccd
SG
5971 if (test_bit(S_EXIT_PENDING, &il->status) ||
5972 test_bit(S_SCANNING, &il->status))
f325757a 5973 goto out;
be663ab6
WYG
5974
5975 /* Regardless of if we are associated, we must reconfigure the
5976 * TX power since frames can be sent on non-radar channels while
5977 * not associated */
46bc8d4b 5978 il->cfg->ops->lib->send_tx_power(il);
be663ab6
WYG
5979
5980 /* Update last_temperature to keep is_calib_needed from running
5981 * when it isn't needed... */
46bc8d4b 5982 il->last_temperature = il->temperature;
f325757a 5983out:
46bc8d4b 5984 mutex_unlock(&il->mutex);
be663ab6
WYG
5985}
5986
46bc8d4b 5987static void il4965_setup_deferred_work(struct il_priv *il)
be663ab6 5988{
46bc8d4b 5989 il->workqueue = create_singlethread_workqueue(DRV_NAME);
be663ab6 5990
46bc8d4b 5991 init_waitqueue_head(&il->wait_command_queue);
be663ab6 5992
46bc8d4b
SG
5993 INIT_WORK(&il->restart, il4965_bg_restart);
5994 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
5995 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
5996 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
5997 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
be663ab6 5998
46bc8d4b 5999 il_setup_scan_deferred_work(il);
be663ab6 6000
46bc8d4b 6001 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
be663ab6 6002
ebf0d90d
SG
6003 init_timer(&il->stats_periodic);
6004 il->stats_periodic.data = (unsigned long)il;
6005 il->stats_periodic.function = il4965_bg_stats_periodic;
be663ab6 6006
46bc8d4b
SG
6007 init_timer(&il->watchdog);
6008 il->watchdog.data = (unsigned long)il;
6009 il->watchdog.function = il_bg_watchdog;
be663ab6 6010
46bc8d4b
SG
6011 tasklet_init(&il->irq_tasklet, (void (*)(unsigned long))
6012 il4965_irq_tasklet, (unsigned long)il);
be663ab6
WYG
6013}
6014
46bc8d4b 6015static void il4965_cancel_deferred_work(struct il_priv *il)
be663ab6 6016{
46bc8d4b
SG
6017 cancel_work_sync(&il->txpower_work);
6018 cancel_delayed_work_sync(&il->init_alive_start);
6019 cancel_delayed_work(&il->alive_start);
6020 cancel_work_sync(&il->run_time_calib_work);
be663ab6 6021
46bc8d4b 6022 il_cancel_scan_deferred_work(il);
be663ab6 6023
ebf0d90d 6024 del_timer_sync(&il->stats_periodic);
be663ab6
WYG
6025}
6026
46bc8d4b 6027static void il4965_init_hw_rates(struct il_priv *il,
be663ab6
WYG
6028 struct ieee80211_rate *rates)
6029{
6030 int i;
6031
2eb05816 6032 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
d2ddf621 6033 rates[i].bitrate = il_rates[i].ieee * 5;
0c2c8852 6034 rates[i].hw_value = i; /* Rate scaling will work on idxes */
be663ab6
WYG
6035 rates[i].hw_value_short = i;
6036 rates[i].flags = 0;
e2ebc833 6037 if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
be663ab6
WYG
6038 /*
6039 * If CCK != 1M then set short preamble rate flag.
6040 */
6041 rates[i].flags |=
2eb05816 6042 (il_rates[i].plcp == RATE_1M_PLCP) ?
be663ab6
WYG
6043 0 : IEEE80211_RATE_SHORT_PREAMBLE;
6044 }
6045 }
6046}
6047/*
46bc8d4b 6048 * Acquire il->lock before calling this function !
be663ab6 6049 */
0c2c8852 6050void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
be663ab6 6051{
0c1a94e2 6052 il_wr(il, HBUS_TARG_WRPTR,
0c2c8852
SG
6053 (idx & 0xff) | (txq_id << 8));
6054 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
be663ab6
WYG
6055}
6056
46bc8d4b 6057void il4965_tx_queue_set_status(struct il_priv *il,
e2ebc833 6058 struct il_tx_queue *txq,
be663ab6
WYG
6059 int tx_fifo_id, int scd_retry)
6060{
6061 int txq_id = txq->q.id;
6062
6063 /* Find out whether to activate Tx queue */
46bc8d4b 6064 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
be663ab6
WYG
6065
6066 /* Set up and activate */
d3175167
SG
6067 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
6068 (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
6069 (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
6070 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
6071 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
6072 IL49_SCD_QUEUE_STTS_REG_MSK);
be663ab6
WYG
6073
6074 txq->sched_retry = scd_retry;
6075
58de00a4 6076 D_INFO("%s %s Queue %d on AC %d\n",
be663ab6
WYG
6077 active ? "Activate" : "Deactivate",
6078 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6079}
6080
6081
46bc8d4b 6082static int il4965_init_drv(struct il_priv *il)
be663ab6
WYG
6083{
6084 int ret;
6085
46bc8d4b
SG
6086 spin_lock_init(&il->sta_lock);
6087 spin_lock_init(&il->hcmd_lock);
be663ab6 6088
46bc8d4b 6089 INIT_LIST_HEAD(&il->free_frames);
be663ab6 6090
46bc8d4b 6091 mutex_init(&il->mutex);
be663ab6 6092
46bc8d4b
SG
6093 il->ieee_channels = NULL;
6094 il->ieee_rates = NULL;
6095 il->band = IEEE80211_BAND_2GHZ;
be663ab6 6096
46bc8d4b
SG
6097 il->iw_mode = NL80211_IFTYPE_STATION;
6098 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
6099 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
be663ab6
WYG
6100
6101 /* initialize force reset */
46bc8d4b 6102 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
be663ab6
WYG
6103
6104 /* Choose which receivers/antennas to use */
46bc8d4b
SG
6105 if (il->cfg->ops->hcmd->set_rxon_chain)
6106 il->cfg->ops->hcmd->set_rxon_chain(il,
7c2cde2e 6107 &il->ctx);
be663ab6 6108
46bc8d4b 6109 il_init_scan_params(il);
be663ab6 6110
46bc8d4b 6111 ret = il_init_channel_map(il);
be663ab6 6112 if (ret) {
9406f797 6113 IL_ERR("initializing regulatory failed: %d\n", ret);
be663ab6
WYG
6114 goto err;
6115 }
6116
46bc8d4b 6117 ret = il_init_geos(il);
be663ab6 6118 if (ret) {
9406f797 6119 IL_ERR("initializing geos failed: %d\n", ret);
be663ab6
WYG
6120 goto err_free_channel_map;
6121 }
46bc8d4b 6122 il4965_init_hw_rates(il, il->ieee_rates);
be663ab6
WYG
6123
6124 return 0;
6125
6126err_free_channel_map:
46bc8d4b 6127 il_free_channel_map(il);
be663ab6
WYG
6128err:
6129 return ret;
6130}
6131
46bc8d4b 6132static void il4965_uninit_drv(struct il_priv *il)
be663ab6 6133{
46bc8d4b
SG
6134 il4965_calib_free_results(il);
6135 il_free_geos(il);
6136 il_free_channel_map(il);
6137 kfree(il->scan_cmd);
be663ab6
WYG
6138}
6139
46bc8d4b 6140static void il4965_hw_detect(struct il_priv *il)
be663ab6 6141{
841b2cca
SG
6142 il->hw_rev = _il_rd(il, CSR_HW_REV);
6143 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
46bc8d4b 6144 il->rev_id = il->pci_dev->revision;
58de00a4 6145 D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
be663ab6
WYG
6146}
6147
46bc8d4b 6148static int il4965_set_hw_params(struct il_priv *il)
be663ab6 6149{
46bc8d4b
SG
6150 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
6151 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
6152 if (il->cfg->mod_params->amsdu_size_8K)
6153 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
be663ab6 6154 else
46bc8d4b 6155 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
be663ab6 6156
46bc8d4b 6157 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
be663ab6 6158
46bc8d4b
SG
6159 if (il->cfg->mod_params->disable_11n)
6160 il->cfg->sku &= ~IL_SKU_N;
be663ab6
WYG
6161
6162 /* Device-specific setup */
46bc8d4b 6163 return il->cfg->ops->lib->set_hw_params(il);
be663ab6
WYG
6164}
6165
e2ebc833
SG
6166static const u8 il4965_bss_ac_to_fifo[] = {
6167 IL_TX_FIFO_VO,
6168 IL_TX_FIFO_VI,
6169 IL_TX_FIFO_BE,
6170 IL_TX_FIFO_BK,
be663ab6
WYG
6171};
6172
e2ebc833 6173static const u8 il4965_bss_ac_to_queue[] = {
be663ab6
WYG
6174 0, 1, 2, 3,
6175};
6176
6177static int
e2ebc833 6178il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
be663ab6 6179{
7c2cde2e 6180 int err = 0;
46bc8d4b 6181 struct il_priv *il;
be663ab6 6182 struct ieee80211_hw *hw;
e2ebc833 6183 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
be663ab6
WYG
6184 unsigned long flags;
6185 u16 pci_cmd;
6186
6187 /************************
6188 * 1. Allocating HW data
6189 ************************/
6190
e2ebc833 6191 hw = il_alloc_all(cfg);
be663ab6
WYG
6192 if (!hw) {
6193 err = -ENOMEM;
6194 goto out;
6195 }
46bc8d4b
SG
6196 il = hw->priv;
6197 /* At this point both hw and il are allocated. */
be663ab6 6198
7c2cde2e
SG
6199 il->ctx.ctxid = 0;
6200
6201 il->ctx.always_active = true;
6202 il->ctx.is_active = true;
6203 il->ctx.rxon_cmd = REPLY_RXON;
6204 il->ctx.rxon_timing_cmd = REPLY_RXON_TIMING;
6205 il->ctx.rxon_assoc_cmd = REPLY_RXON_ASSOC;
6206 il->ctx.qos_cmd = REPLY_QOS_PARAM;
6207 il->ctx.ap_sta_id = IL_AP_ID;
6208 il->ctx.wep_key_cmd = REPLY_WEPKEY;
6209 il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
6210 il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
6211 il->ctx.exclusive_interface_modes =
be663ab6 6212 BIT(NL80211_IFTYPE_ADHOC);
7c2cde2e 6213 il->ctx.interface_modes =
be663ab6 6214 BIT(NL80211_IFTYPE_STATION);
7c2cde2e
SG
6215 il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
6216 il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
6217 il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
6218 il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
be663ab6
WYG
6219
6220 SET_IEEE80211_DEV(hw, &pdev->dev);
6221
58de00a4 6222 D_INFO("*** LOAD DRIVER ***\n");
46bc8d4b
SG
6223 il->cfg = cfg;
6224 il->pci_dev = pdev;
6225 il->inta_mask = CSR_INI_SET_MASK;
be663ab6 6226
46bc8d4b 6227 if (il_alloc_traffic_mem(il))
9406f797 6228 IL_ERR("Not enough memory to generate traffic log\n");
be663ab6
WYG
6229
6230 /**************************
6231 * 2. Initializing PCI bus
6232 **************************/
6233 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6234 PCIE_LINK_STATE_CLKPM);
6235
6236 if (pci_enable_device(pdev)) {
6237 err = -ENODEV;
6238 goto out_ieee80211_free_hw;
6239 }
6240
6241 pci_set_master(pdev);
6242
6243 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6244 if (!err)
6245 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6246 if (err) {
6247 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6248 if (!err)
6249 err = pci_set_consistent_dma_mask(pdev,
6250 DMA_BIT_MASK(32));
6251 /* both attempts failed: */
6252 if (err) {
9406f797 6253 IL_WARN("No suitable DMA available.\n");
be663ab6
WYG
6254 goto out_pci_disable_device;
6255 }
6256 }
6257
6258 err = pci_request_regions(pdev, DRV_NAME);
6259 if (err)
6260 goto out_pci_disable_device;
6261
46bc8d4b 6262 pci_set_drvdata(pdev, il);
be663ab6
WYG
6263
6264
6265 /***********************
6266 * 3. Read REV register
6267 ***********************/
46bc8d4b
SG
6268 il->hw_base = pci_iomap(pdev, 0, 0);
6269 if (!il->hw_base) {
be663ab6
WYG
6270 err = -ENODEV;
6271 goto out_pci_release_regions;
6272 }
6273
58de00a4 6274 D_INFO("pci_resource_len = 0x%08llx\n",
be663ab6 6275 (unsigned long long) pci_resource_len(pdev, 0));
58de00a4 6276 D_INFO("pci_resource_base = %p\n", il->hw_base);
be663ab6
WYG
6277
6278 /* these spin locks will be used in apm_ops.init and EEPROM access
6279 * we should init now
6280 */
46bc8d4b
SG
6281 spin_lock_init(&il->reg_lock);
6282 spin_lock_init(&il->lock);
be663ab6
WYG
6283
6284 /*
6285 * stop and reset the on-board processor just in case it is in a
6286 * strange state ... like being left stranded by a primary kernel
6287 * and this is now the kdump kernel trying to start up
6288 */
841b2cca 6289 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
be663ab6 6290
46bc8d4b 6291 il4965_hw_detect(il);
9406f797 6292 IL_INFO("Detected %s, REV=0x%X\n",
46bc8d4b 6293 il->cfg->name, il->hw_rev);
be663ab6
WYG
6294
6295 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6296 * PCI Tx retries from interfering with C3 CPU state */
6297 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6298
46bc8d4b
SG
6299 il4965_prepare_card_hw(il);
6300 if (!il->hw_ready) {
9406f797 6301 IL_WARN("Failed, HW not ready\n");
be663ab6
WYG
6302 goto out_iounmap;
6303 }
6304
6305 /*****************
6306 * 4. Read EEPROM
6307 *****************/
6308 /* Read the EEPROM */
46bc8d4b 6309 err = il_eeprom_init(il);
be663ab6 6310 if (err) {
9406f797 6311 IL_ERR("Unable to init EEPROM\n");
be663ab6
WYG
6312 goto out_iounmap;
6313 }
46bc8d4b 6314 err = il4965_eeprom_check_version(il);
be663ab6
WYG
6315 if (err)
6316 goto out_free_eeprom;
6317
6318 if (err)
6319 goto out_free_eeprom;
6320
6321 /* extract MAC Address */
46bc8d4b 6322 il4965_eeprom_get_mac(il, il->addresses[0].addr);
58de00a4 6323 D_INFO("MAC address: %pM\n", il->addresses[0].addr);
46bc8d4b
SG
6324 il->hw->wiphy->addresses = il->addresses;
6325 il->hw->wiphy->n_addresses = 1;
be663ab6
WYG
6326
6327 /************************
6328 * 5. Setup HW constants
6329 ************************/
46bc8d4b 6330 if (il4965_set_hw_params(il)) {
9406f797 6331 IL_ERR("failed to set hw parameters\n");
be663ab6
WYG
6332 goto out_free_eeprom;
6333 }
6334
6335 /*******************
46bc8d4b 6336 * 6. Setup il
be663ab6
WYG
6337 *******************/
6338
46bc8d4b 6339 err = il4965_init_drv(il);
be663ab6
WYG
6340 if (err)
6341 goto out_free_eeprom;
46bc8d4b 6342 /* At this point both hw and il are initialized. */
be663ab6
WYG
6343
6344 /********************
6345 * 7. Setup services
6346 ********************/
46bc8d4b
SG
6347 spin_lock_irqsave(&il->lock, flags);
6348 il_disable_interrupts(il);
6349 spin_unlock_irqrestore(&il->lock, flags);
be663ab6 6350
46bc8d4b 6351 pci_enable_msi(il->pci_dev);
be663ab6 6352
46bc8d4b
SG
6353 err = request_irq(il->pci_dev->irq, il_isr,
6354 IRQF_SHARED, DRV_NAME, il);
be663ab6 6355 if (err) {
9406f797 6356 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
be663ab6
WYG
6357 goto out_disable_msi;
6358 }
6359
46bc8d4b
SG
6360 il4965_setup_deferred_work(il);
6361 il4965_setup_rx_handlers(il);
be663ab6
WYG
6362
6363 /*********************************************
6364 * 8. Enable interrupts and read RFKILL state
6365 *********************************************/
6366
a078a1fd 6367 /* enable rfkill interrupt: hw bug w/a */
46bc8d4b 6368 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
be663ab6
WYG
6369 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6370 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
46bc8d4b 6371 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
be663ab6
WYG
6372 }
6373
46bc8d4b 6374 il_enable_rfkill_int(il);
be663ab6
WYG
6375
6376 /* If platform's RF_KILL switch is NOT set to KILL */
841b2cca 6377 if (_il_rd(il, CSR_GP_CNTRL) &
be663ab6 6378 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
a6766ccd 6379 clear_bit(S_RF_KILL_HW, &il->status);
be663ab6 6380 else
a6766ccd 6381 set_bit(S_RF_KILL_HW, &il->status);
be663ab6 6382
46bc8d4b 6383 wiphy_rfkill_set_hw_state(il->hw->wiphy,
a6766ccd 6384 test_bit(S_RF_KILL_HW, &il->status));
be663ab6 6385
46bc8d4b 6386 il_power_initialize(il);
be663ab6 6387
46bc8d4b 6388 init_completion(&il->_4965.firmware_loading_complete);
be663ab6 6389
46bc8d4b 6390 err = il4965_request_firmware(il, true);
be663ab6
WYG
6391 if (err)
6392 goto out_destroy_workqueue;
6393
6394 return 0;
6395
6396 out_destroy_workqueue:
46bc8d4b
SG
6397 destroy_workqueue(il->workqueue);
6398 il->workqueue = NULL;
6399 free_irq(il->pci_dev->irq, il);
be663ab6 6400 out_disable_msi:
46bc8d4b
SG
6401 pci_disable_msi(il->pci_dev);
6402 il4965_uninit_drv(il);
be663ab6 6403 out_free_eeprom:
46bc8d4b 6404 il_eeprom_free(il);
be663ab6 6405 out_iounmap:
46bc8d4b 6406 pci_iounmap(pdev, il->hw_base);
be663ab6
WYG
6407 out_pci_release_regions:
6408 pci_set_drvdata(pdev, NULL);
6409 pci_release_regions(pdev);
6410 out_pci_disable_device:
6411 pci_disable_device(pdev);
6412 out_ieee80211_free_hw:
46bc8d4b
SG
6413 il_free_traffic_mem(il);
6414 ieee80211_free_hw(il->hw);
be663ab6
WYG
6415 out:
6416 return err;
6417}
6418
e2ebc833 6419static void __devexit il4965_pci_remove(struct pci_dev *pdev)
be663ab6 6420{
46bc8d4b 6421 struct il_priv *il = pci_get_drvdata(pdev);
be663ab6
WYG
6422 unsigned long flags;
6423
46bc8d4b 6424 if (!il)
be663ab6
WYG
6425 return;
6426
46bc8d4b 6427 wait_for_completion(&il->_4965.firmware_loading_complete);
be663ab6 6428
58de00a4 6429 D_INFO("*** UNLOAD DRIVER ***\n");
be663ab6 6430
46bc8d4b 6431 il_dbgfs_unregister(il);
e2ebc833 6432 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
be663ab6 6433
e2ebc833
SG
6434 /* ieee80211_unregister_hw call wil cause il_mac_stop to
6435 * to be called and il4965_down since we are removing the device
a6766ccd 6436 * we need to set S_EXIT_PENDING bit.
be663ab6 6437 */
a6766ccd 6438 set_bit(S_EXIT_PENDING, &il->status);
be663ab6 6439
46bc8d4b 6440 il_leds_exit(il);
be663ab6 6441
46bc8d4b
SG
6442 if (il->mac80211_registered) {
6443 ieee80211_unregister_hw(il->hw);
6444 il->mac80211_registered = 0;
be663ab6 6445 } else {
46bc8d4b 6446 il4965_down(il);
be663ab6
WYG
6447 }
6448
6449 /*
6450 * Make sure device is reset to low power before unloading driver.
e2ebc833
SG
6451 * This may be redundant with il4965_down(), but there are paths to
6452 * run il4965_down() without calling apm_ops.stop(), and there are
6453 * paths to avoid running il4965_down() at all before leaving driver.
be663ab6
WYG
6454 * This (inexpensive) call *makes sure* device is reset.
6455 */
46bc8d4b 6456 il_apm_stop(il);
be663ab6
WYG
6457
6458 /* make sure we flush any pending irq or
6459 * tasklet for the driver
6460 */
46bc8d4b
SG
6461 spin_lock_irqsave(&il->lock, flags);
6462 il_disable_interrupts(il);
6463 spin_unlock_irqrestore(&il->lock, flags);
be663ab6 6464
46bc8d4b 6465 il4965_synchronize_irq(il);
be663ab6 6466
46bc8d4b 6467 il4965_dealloc_ucode_pci(il);
be663ab6 6468
46bc8d4b
SG
6469 if (il->rxq.bd)
6470 il4965_rx_queue_free(il, &il->rxq);
6471 il4965_hw_txq_ctx_free(il);
be663ab6 6472
46bc8d4b 6473 il_eeprom_free(il);
be663ab6
WYG
6474
6475
6476 /*netif_stop_queue(dev); */
46bc8d4b 6477 flush_workqueue(il->workqueue);
be663ab6 6478
e2ebc833 6479 /* ieee80211_unregister_hw calls il_mac_stop, which flushes
46bc8d4b 6480 * il->workqueue... so we can't take down the workqueue
be663ab6 6481 * until now... */
46bc8d4b
SG
6482 destroy_workqueue(il->workqueue);
6483 il->workqueue = NULL;
6484 il_free_traffic_mem(il);
be663ab6 6485
46bc8d4b
SG
6486 free_irq(il->pci_dev->irq, il);
6487 pci_disable_msi(il->pci_dev);
6488 pci_iounmap(pdev, il->hw_base);
be663ab6
WYG
6489 pci_release_regions(pdev);
6490 pci_disable_device(pdev);
6491 pci_set_drvdata(pdev, NULL);
6492
46bc8d4b 6493 il4965_uninit_drv(il);
be663ab6 6494
46bc8d4b 6495 dev_kfree_skb(il->beacon_skb);
be663ab6 6496
46bc8d4b 6497 ieee80211_free_hw(il->hw);
be663ab6
WYG
6498}
6499
6500/*
6501 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
46bc8d4b 6502 * must be called under il->lock and mac access
be663ab6 6503 */
46bc8d4b 6504void il4965_txq_set_sched(struct il_priv *il, u32 mask)
be663ab6 6505{
d3175167 6506 il_wr_prph(il, IL49_SCD_TXFACT, mask);
be663ab6
WYG
6507}
6508
6509/*****************************************************************************
6510 *
6511 * driver and module entry point
6512 *
6513 *****************************************************************************/
6514
6515/* Hardware specific file defines the PCI IDs table for that hardware module */
e2ebc833 6516static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
e2ebc833
SG
6517 {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
6518 {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
be663ab6
WYG
6519 {0}
6520};
e2ebc833 6521MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
be663ab6 6522
e2ebc833 6523static struct pci_driver il4965_driver = {
be663ab6 6524 .name = DRV_NAME,
e2ebc833
SG
6525 .id_table = il4965_hw_card_ids,
6526 .probe = il4965_pci_probe,
6527 .remove = __devexit_p(il4965_pci_remove),
6528 .driver.pm = IL_LEGACY_PM_OPS,
be663ab6
WYG
6529};
6530
e2ebc833 6531static int __init il4965_init(void)
be663ab6
WYG
6532{
6533
6534 int ret;
6535 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
6536 pr_info(DRV_COPYRIGHT "\n");
6537
e2ebc833 6538 ret = il4965_rate_control_register();
be663ab6
WYG
6539 if (ret) {
6540 pr_err("Unable to register rate control algorithm: %d\n", ret);
6541 return ret;
6542 }
6543
e2ebc833 6544 ret = pci_register_driver(&il4965_driver);
be663ab6
WYG
6545 if (ret) {
6546 pr_err("Unable to initialize PCI module\n");
6547 goto error_register;
6548 }
6549
6550 return ret;
6551
6552error_register:
e2ebc833 6553 il4965_rate_control_unregister();
be663ab6
WYG
6554 return ret;
6555}
6556
e2ebc833 6557static void __exit il4965_exit(void)
be663ab6 6558{
e2ebc833
SG
6559 pci_unregister_driver(&il4965_driver);
6560 il4965_rate_control_unregister();
be663ab6
WYG
6561}
6562
e2ebc833
SG
6563module_exit(il4965_exit);
6564module_init(il4965_init);
be663ab6 6565
d3175167 6566#ifdef CONFIG_IWLEGACY_DEBUG
d2ddf621 6567module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
be663ab6
WYG
6568MODULE_PARM_DESC(debug, "debug output mask");
6569#endif
6570
e2ebc833 6571module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
be663ab6 6572MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
e2ebc833 6573module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
be663ab6 6574MODULE_PARM_DESC(queues_num, "number of hw queues.");
e2ebc833 6575module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
be663ab6 6576MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
e2ebc833 6577module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K,
be663ab6
WYG
6578 int, S_IRUGO);
6579MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
e2ebc833 6580module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
be663ab6 6581MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
This page took 0.492252 seconds and 5 git commands to generate.