Commit | Line | Data |
---|---|---|
be663ab6 WYG |
1 | /****************************************************************************** |
2 | * | |
3 | * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. | |
4 | * | |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
25 | * Intel Linux Wireless <ilw@linux.intel.com> | |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
27 | * | |
28 | *****************************************************************************/ | |
29 | ||
30 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
31 | ||
32 | #include <linux/kernel.h> | |
33 | #include <linux/module.h> | |
34 | #include <linux/init.h> | |
35 | #include <linux/pci.h> | |
36 | #include <linux/pci-aspm.h> | |
37 | #include <linux/slab.h> | |
38 | #include <linux/dma-mapping.h> | |
39 | #include <linux/delay.h> | |
40 | #include <linux/sched.h> | |
41 | #include <linux/skbuff.h> | |
42 | #include <linux/netdevice.h> | |
be663ab6 WYG |
43 | #include <linux/firmware.h> |
44 | #include <linux/etherdevice.h> | |
45 | #include <linux/if_arp.h> | |
46 | ||
47 | #include <net/mac80211.h> | |
48 | ||
49 | #include <asm/div64.h> | |
50 | ||
51 | #define DRV_NAME "iwl4965" | |
52 | ||
98613be0 | 53 | #include "common.h" |
af038f40 | 54 | #include "4965.h" |
be663ab6 | 55 | |
be663ab6 WYG |
56 | /****************************************************************************** |
57 | * | |
58 | * module boiler plate | |
59 | * | |
60 | ******************************************************************************/ | |
61 | ||
62 | /* | |
63 | * module name, copyright, version, etc. | |
64 | */ | |
65 | #define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux" | |
66 | ||
d3175167 | 67 | #ifdef CONFIG_IWLEGACY_DEBUG |
be663ab6 WYG |
68 | #define VD "d" |
69 | #else | |
70 | #define VD | |
71 | #endif | |
72 | ||
73 | #define DRV_VERSION IWLWIFI_VERSION VD | |
74 | ||
be663ab6 WYG |
75 | MODULE_DESCRIPTION(DRV_DESCRIPTION); |
76 | MODULE_VERSION(DRV_VERSION); | |
77 | MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); | |
78 | MODULE_LICENSE("GPL"); | |
79 | MODULE_ALIAS("iwl4965"); | |
80 | ||
e7392364 SG |
81 | void |
82 | il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status) | |
fcb74588 SG |
83 | { |
84 | if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { | |
85 | IL_ERR("Tx flush command to flush out all frames\n"); | |
a6766ccd | 86 | if (!test_bit(S_EXIT_PENDING, &il->status)) |
fcb74588 SG |
87 | queue_work(il->workqueue, &il->tx_flush); |
88 | } | |
89 | } | |
90 | ||
91 | /* | |
92 | * EEPROM | |
93 | */ | |
94 | struct il_mod_params il4965_mod_params = { | |
95 | .amsdu_size_8K = 1, | |
96 | .restart_fw = 1, | |
97 | /* the rest are 0 by default */ | |
98 | }; | |
99 | ||
e7392364 SG |
100 | void |
101 | il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) | |
fcb74588 SG |
102 | { |
103 | unsigned long flags; | |
104 | int i; | |
105 | spin_lock_irqsave(&rxq->lock, flags); | |
106 | INIT_LIST_HEAD(&rxq->rx_free); | |
107 | INIT_LIST_HEAD(&rxq->rx_used); | |
108 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
109 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | |
110 | /* In the reset function, these buffers may have been allocated | |
111 | * to an SKB, so we need to unmap and free potential storage */ | |
112 | if (rxq->pool[i].page != NULL) { | |
113 | pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, | |
e7392364 SG |
114 | PAGE_SIZE << il->hw_params.rx_page_order, |
115 | PCI_DMA_FROMDEVICE); | |
fcb74588 SG |
116 | __il_free_pages(il, rxq->pool[i].page); |
117 | rxq->pool[i].page = NULL; | |
118 | } | |
119 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
120 | } | |
121 | ||
122 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
123 | rxq->queue[i] = NULL; | |
124 | ||
125 | /* Set us so that we have processed and used all buffers, but have | |
126 | * not restocked the Rx queue with fresh buffers */ | |
127 | rxq->read = rxq->write = 0; | |
128 | rxq->write_actual = 0; | |
129 | rxq->free_count = 0; | |
130 | spin_unlock_irqrestore(&rxq->lock, flags); | |
131 | } | |
132 | ||
e7392364 SG |
133 | int |
134 | il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq) | |
fcb74588 SG |
135 | { |
136 | u32 rb_size; | |
e7392364 | 137 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ |
fcb74588 SG |
138 | u32 rb_timeout = 0; |
139 | ||
140 | if (il->cfg->mod_params->amsdu_size_8K) | |
9a95b370 | 141 | rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; |
fcb74588 | 142 | else |
9a95b370 | 143 | rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; |
fcb74588 SG |
144 | |
145 | /* Stop Rx DMA */ | |
9a95b370 | 146 | il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
fcb74588 SG |
147 | |
148 | /* Reset driver's Rx queue write idx */ | |
9a95b370 | 149 | il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); |
fcb74588 SG |
150 | |
151 | /* Tell device where to find RBD circular buffer in DRAM */ | |
e7392364 | 152 | il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8)); |
fcb74588 SG |
153 | |
154 | /* Tell device where in DRAM to update its Rx status */ | |
e7392364 | 155 | il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4); |
fcb74588 SG |
156 | |
157 | /* Enable Rx DMA | |
158 | * Direct rx interrupts to hosts | |
159 | * Rx buffer size 4 or 8k | |
160 | * RB timeout 0x10 | |
161 | * 256 RBDs | |
162 | */ | |
9a95b370 | 163 | il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, |
e7392364 SG |
164 | FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | |
165 | FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
1722f8e1 SG |
166 | FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | |
167 | rb_size | | |
168 | (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | | |
169 | (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | |
fcb74588 SG |
170 | |
171 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
172 | il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF); | |
173 | ||
174 | return 0; | |
175 | } | |
176 | ||
e7392364 SG |
177 | static void |
178 | il4965_set_pwr_vmain(struct il_priv *il) | |
fcb74588 SG |
179 | { |
180 | /* | |
181 | * (for documentation purposes) | |
182 | * to set power to V_AUX, do: | |
183 | ||
184 | if (pci_pme_capable(il->pci_dev, PCI_D3cold)) | |
185 | il_set_bits_mask_prph(il, APMG_PS_CTRL_REG, | |
186 | APMG_PS_CTRL_VAL_PWR_SRC_VAUX, | |
187 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
188 | */ | |
189 | ||
190 | il_set_bits_mask_prph(il, APMG_PS_CTRL_REG, | |
e7392364 SG |
191 | APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, |
192 | ~APMG_PS_CTRL_MSK_PWR_SRC); | |
fcb74588 SG |
193 | } |
194 | ||
e7392364 SG |
195 | int |
196 | il4965_hw_nic_init(struct il_priv *il) | |
fcb74588 SG |
197 | { |
198 | unsigned long flags; | |
199 | struct il_rx_queue *rxq = &il->rxq; | |
200 | int ret; | |
201 | ||
fcb74588 | 202 | spin_lock_irqsave(&il->lock, flags); |
f03ee2a8 | 203 | il_apm_init(il); |
fcb74588 SG |
204 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ |
205 | il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF); | |
fcb74588 SG |
206 | spin_unlock_irqrestore(&il->lock, flags); |
207 | ||
208 | il4965_set_pwr_vmain(il); | |
f03ee2a8 | 209 | il4965_nic_config(il); |
fcb74588 SG |
210 | |
211 | /* Allocate the RX queue, or reset if it is already allocated */ | |
212 | if (!rxq->bd) { | |
213 | ret = il_rx_queue_alloc(il); | |
214 | if (ret) { | |
215 | IL_ERR("Unable to initialize Rx queue\n"); | |
216 | return -ENOMEM; | |
217 | } | |
218 | } else | |
219 | il4965_rx_queue_reset(il, rxq); | |
220 | ||
221 | il4965_rx_replenish(il); | |
222 | ||
223 | il4965_rx_init(il, rxq); | |
224 | ||
225 | spin_lock_irqsave(&il->lock, flags); | |
226 | ||
227 | rxq->need_update = 1; | |
228 | il_rx_queue_update_write_ptr(il, rxq); | |
229 | ||
230 | spin_unlock_irqrestore(&il->lock, flags); | |
231 | ||
232 | /* Allocate or reset and init all Tx and Command queues */ | |
233 | if (!il->txq) { | |
234 | ret = il4965_txq_ctx_alloc(il); | |
235 | if (ret) | |
236 | return ret; | |
237 | } else | |
238 | il4965_txq_ctx_reset(il); | |
239 | ||
a6766ccd | 240 | set_bit(S_INIT, &il->status); |
fcb74588 SG |
241 | |
242 | return 0; | |
243 | } | |
244 | ||
245 | /** | |
246 | * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | |
247 | */ | |
e7392364 SG |
248 | static inline __le32 |
249 | il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr) | |
fcb74588 | 250 | { |
e7392364 | 251 | return cpu_to_le32((u32) (dma_addr >> 8)); |
fcb74588 SG |
252 | } |
253 | ||
254 | /** | |
255 | * il4965_rx_queue_restock - refill RX queue from pre-allocated pool | |
256 | * | |
257 | * If there are slots in the RX queue that need to be restocked, | |
258 | * and we have free pre-allocated buffers, fill the ranks as much | |
259 | * as we can, pulling from rx_free. | |
260 | * | |
261 | * This moves the 'write' idx forward to catch up with 'processed', and | |
262 | * also updates the memory address in the firmware to reference the new | |
263 | * target buffer. | |
264 | */ | |
e7392364 SG |
265 | void |
266 | il4965_rx_queue_restock(struct il_priv *il) | |
fcb74588 SG |
267 | { |
268 | struct il_rx_queue *rxq = &il->rxq; | |
269 | struct list_head *element; | |
270 | struct il_rx_buf *rxb; | |
271 | unsigned long flags; | |
272 | ||
273 | spin_lock_irqsave(&rxq->lock, flags); | |
274 | while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { | |
275 | /* The overwritten rxb must be a used one */ | |
276 | rxb = rxq->queue[rxq->write]; | |
277 | BUG_ON(rxb && rxb->page); | |
278 | ||
279 | /* Get next free Rx buffer, remove from free list */ | |
280 | element = rxq->rx_free.next; | |
281 | rxb = list_entry(element, struct il_rx_buf, list); | |
282 | list_del(element); | |
283 | ||
284 | /* Point to Rx buffer via next RBD in circular buffer */ | |
e7392364 SG |
285 | rxq->bd[rxq->write] = |
286 | il4965_dma_addr2rbd_ptr(il, rxb->page_dma); | |
fcb74588 SG |
287 | rxq->queue[rxq->write] = rxb; |
288 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | |
289 | rxq->free_count--; | |
290 | } | |
291 | spin_unlock_irqrestore(&rxq->lock, flags); | |
292 | /* If the pre-allocated buffer pool is dropping low, schedule to | |
293 | * refill it */ | |
294 | if (rxq->free_count <= RX_LOW_WATERMARK) | |
295 | queue_work(il->workqueue, &il->rx_replenish); | |
296 | ||
fcb74588 SG |
297 | /* If we've added more space for the firmware to place data, tell it. |
298 | * Increment device's write pointer in multiples of 8. */ | |
299 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
300 | spin_lock_irqsave(&rxq->lock, flags); | |
301 | rxq->need_update = 1; | |
302 | spin_unlock_irqrestore(&rxq->lock, flags); | |
303 | il_rx_queue_update_write_ptr(il, rxq); | |
304 | } | |
305 | } | |
306 | ||
307 | /** | |
308 | * il4965_rx_replenish - Move all used packet from rx_used to rx_free | |
309 | * | |
310 | * When moving to rx_free an SKB is allocated for the slot. | |
311 | * | |
312 | * Also restock the Rx queue via il_rx_queue_restock. | |
313 | * This is called as a scheduled work item (except for during initialization) | |
314 | */ | |
e7392364 SG |
315 | static void |
316 | il4965_rx_allocate(struct il_priv *il, gfp_t priority) | |
fcb74588 SG |
317 | { |
318 | struct il_rx_queue *rxq = &il->rxq; | |
319 | struct list_head *element; | |
320 | struct il_rx_buf *rxb; | |
321 | struct page *page; | |
322 | unsigned long flags; | |
323 | gfp_t gfp_mask = priority; | |
324 | ||
325 | while (1) { | |
326 | spin_lock_irqsave(&rxq->lock, flags); | |
327 | if (list_empty(&rxq->rx_used)) { | |
328 | spin_unlock_irqrestore(&rxq->lock, flags); | |
329 | return; | |
330 | } | |
331 | spin_unlock_irqrestore(&rxq->lock, flags); | |
332 | ||
333 | if (rxq->free_count > RX_LOW_WATERMARK) | |
334 | gfp_mask |= __GFP_NOWARN; | |
335 | ||
336 | if (il->hw_params.rx_page_order > 0) | |
337 | gfp_mask |= __GFP_COMP; | |
338 | ||
339 | /* Alloc a new receive buffer */ | |
340 | page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); | |
341 | if (!page) { | |
342 | if (net_ratelimit()) | |
e7392364 SG |
343 | D_INFO("alloc_pages failed, " "order: %d\n", |
344 | il->hw_params.rx_page_order); | |
fcb74588 SG |
345 | |
346 | if (rxq->free_count <= RX_LOW_WATERMARK && | |
347 | net_ratelimit()) | |
e7392364 SG |
348 | IL_ERR("Failed to alloc_pages with %s. " |
349 | "Only %u free buffers remaining.\n", | |
350 | priority == | |
351 | GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", | |
352 | rxq->free_count); | |
fcb74588 SG |
353 | /* We don't reschedule replenish work here -- we will |
354 | * call the restock method and if it still needs | |
355 | * more buffers it will schedule replenish */ | |
356 | return; | |
357 | } | |
358 | ||
359 | spin_lock_irqsave(&rxq->lock, flags); | |
360 | ||
361 | if (list_empty(&rxq->rx_used)) { | |
362 | spin_unlock_irqrestore(&rxq->lock, flags); | |
363 | __free_pages(page, il->hw_params.rx_page_order); | |
364 | return; | |
365 | } | |
366 | element = rxq->rx_used.next; | |
367 | rxb = list_entry(element, struct il_rx_buf, list); | |
368 | list_del(element); | |
369 | ||
370 | spin_unlock_irqrestore(&rxq->lock, flags); | |
371 | ||
372 | BUG_ON(rxb->page); | |
373 | rxb->page = page; | |
374 | /* Get physical address of the RB */ | |
e7392364 SG |
375 | rxb->page_dma = |
376 | pci_map_page(il->pci_dev, page, 0, | |
377 | PAGE_SIZE << il->hw_params.rx_page_order, | |
378 | PCI_DMA_FROMDEVICE); | |
fcb74588 SG |
379 | /* dma address must be no more than 36 bits */ |
380 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | |
381 | /* and also 256 byte aligned! */ | |
382 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | |
383 | ||
384 | spin_lock_irqsave(&rxq->lock, flags); | |
385 | ||
386 | list_add_tail(&rxb->list, &rxq->rx_free); | |
387 | rxq->free_count++; | |
388 | il->alloc_rxb_page++; | |
389 | ||
390 | spin_unlock_irqrestore(&rxq->lock, flags); | |
391 | } | |
392 | } | |
393 | ||
e7392364 SG |
394 | void |
395 | il4965_rx_replenish(struct il_priv *il) | |
fcb74588 SG |
396 | { |
397 | unsigned long flags; | |
398 | ||
399 | il4965_rx_allocate(il, GFP_KERNEL); | |
400 | ||
401 | spin_lock_irqsave(&il->lock, flags); | |
402 | il4965_rx_queue_restock(il); | |
403 | spin_unlock_irqrestore(&il->lock, flags); | |
404 | } | |
405 | ||
e7392364 SG |
406 | void |
407 | il4965_rx_replenish_now(struct il_priv *il) | |
fcb74588 SG |
408 | { |
409 | il4965_rx_allocate(il, GFP_ATOMIC); | |
410 | ||
411 | il4965_rx_queue_restock(il); | |
412 | } | |
413 | ||
414 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | |
415 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL | |
416 | * This free routine walks the list of POOL entries and if SKB is set to | |
417 | * non NULL it is unmapped and freed | |
418 | */ | |
e7392364 SG |
419 | void |
420 | il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) | |
fcb74588 SG |
421 | { |
422 | int i; | |
423 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | |
424 | if (rxq->pool[i].page != NULL) { | |
425 | pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, | |
e7392364 SG |
426 | PAGE_SIZE << il->hw_params.rx_page_order, |
427 | PCI_DMA_FROMDEVICE); | |
fcb74588 SG |
428 | __il_free_pages(il, rxq->pool[i].page); |
429 | rxq->pool[i].page = NULL; | |
430 | } | |
431 | } | |
432 | ||
433 | dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, | |
434 | rxq->bd_dma); | |
435 | dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status), | |
436 | rxq->rb_stts, rxq->rb_stts_dma); | |
437 | rxq->bd = NULL; | |
e7392364 | 438 | rxq->rb_stts = NULL; |
fcb74588 SG |
439 | } |
440 | ||
e7392364 SG |
441 | int |
442 | il4965_rxq_stop(struct il_priv *il) | |
fcb74588 | 443 | { |
775ed8ab | 444 | int ret; |
fcb74588 | 445 | |
775ed8ab SG |
446 | _il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
447 | ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG, | |
448 | FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, | |
449 | FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, | |
450 | 1000); | |
451 | if (ret < 0) | |
452 | IL_ERR("Can't stop Rx DMA.\n"); | |
fcb74588 SG |
453 | |
454 | return 0; | |
455 | } | |
456 | ||
e7392364 SG |
457 | int |
458 | il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) | |
fcb74588 SG |
459 | { |
460 | int idx = 0; | |
461 | int band_offset = 0; | |
462 | ||
463 | /* HT rate format: mac80211 wants an MCS number, which is just LSB */ | |
464 | if (rate_n_flags & RATE_MCS_HT_MSK) { | |
465 | idx = (rate_n_flags & 0xff); | |
466 | return idx; | |
e7392364 | 467 | /* Legacy rate format, search for match in table */ |
fcb74588 SG |
468 | } else { |
469 | if (band == IEEE80211_BAND_5GHZ) | |
470 | band_offset = IL_FIRST_OFDM_RATE; | |
471 | for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++) | |
472 | if (il_rates[idx].plcp == (rate_n_flags & 0xFF)) | |
473 | return idx - band_offset; | |
474 | } | |
475 | ||
476 | return -1; | |
477 | } | |
478 | ||
e7392364 SG |
479 | static int |
480 | il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp) | |
fcb74588 SG |
481 | { |
482 | /* data from PHY/DSP regarding signal strength, etc., | |
483 | * contents are always there, not configurable by host. */ | |
484 | struct il4965_rx_non_cfg_phy *ncphy = | |
485 | (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; | |
e7392364 SG |
486 | u32 agc = |
487 | (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >> | |
488 | IL49_AGC_DB_POS; | |
fcb74588 SG |
489 | |
490 | u32 valid_antennae = | |
491 | (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK) | |
e7392364 | 492 | >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET; |
fcb74588 SG |
493 | u8 max_rssi = 0; |
494 | u32 i; | |
495 | ||
496 | /* Find max rssi among 3 possible receivers. | |
497 | * These values are measured by the digital signal processor (DSP). | |
498 | * They should stay fairly constant even as the signal strength varies, | |
499 | * if the radio's automatic gain control (AGC) is working right. | |
500 | * AGC value (see below) will provide the "interesting" info. */ | |
501 | for (i = 0; i < 3; i++) | |
502 | if (valid_antennae & (1 << i)) | |
503 | max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); | |
504 | ||
505 | D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n", | |
506 | ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], | |
507 | max_rssi, agc); | |
508 | ||
509 | /* dBm = max_rssi dB - agc dB - constant. | |
510 | * Higher AGC (higher radio gain) means lower signal. */ | |
511 | return max_rssi - agc - IL4965_RSSI_OFFSET; | |
512 | } | |
513 | ||
e7392364 SG |
514 | static u32 |
515 | il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in) | |
fcb74588 SG |
516 | { |
517 | u32 decrypt_out = 0; | |
518 | ||
519 | if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == | |
e7392364 SG |
520 | RX_RES_STATUS_STATION_FOUND) |
521 | decrypt_out |= | |
522 | (RX_RES_STATUS_STATION_FOUND | | |
523 | RX_RES_STATUS_NO_STATION_INFO_MISMATCH); | |
fcb74588 SG |
524 | |
525 | decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); | |
526 | ||
527 | /* packet was not encrypted */ | |
528 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | |
e7392364 | 529 | RX_RES_STATUS_SEC_TYPE_NONE) |
fcb74588 SG |
530 | return decrypt_out; |
531 | ||
532 | /* packet was encrypted with unknown alg */ | |
533 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | |
e7392364 | 534 | RX_RES_STATUS_SEC_TYPE_ERR) |
fcb74588 SG |
535 | return decrypt_out; |
536 | ||
537 | /* decryption was not done in HW */ | |
538 | if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != | |
e7392364 | 539 | RX_MPDU_RES_STATUS_DEC_DONE_MSK) |
fcb74588 SG |
540 | return decrypt_out; |
541 | ||
542 | switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { | |
543 | ||
544 | case RX_RES_STATUS_SEC_TYPE_CCMP: | |
545 | /* alg is CCM: check MIC only */ | |
546 | if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) | |
547 | /* Bad MIC */ | |
548 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | |
549 | else | |
550 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | |
551 | ||
552 | break; | |
553 | ||
554 | case RX_RES_STATUS_SEC_TYPE_TKIP: | |
555 | if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { | |
556 | /* Bad TTAK */ | |
557 | decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; | |
558 | break; | |
559 | } | |
560 | /* fall through if TTAK OK */ | |
561 | default: | |
562 | if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) | |
563 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | |
564 | else | |
565 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | |
566 | break; | |
567 | } | |
568 | ||
e7392364 | 569 | D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out); |
fcb74588 SG |
570 | |
571 | return decrypt_out; | |
572 | } | |
573 | ||
e7392364 SG |
574 | static void |
575 | il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr, | |
576 | u16 len, u32 ampdu_status, struct il_rx_buf *rxb, | |
577 | struct ieee80211_rx_status *stats) | |
fcb74588 SG |
578 | { |
579 | struct sk_buff *skb; | |
580 | __le16 fc = hdr->frame_control; | |
581 | ||
582 | /* We only process data packets if the interface is open */ | |
583 | if (unlikely(!il->is_open)) { | |
e7392364 | 584 | D_DROP("Dropping packet while interface is not open.\n"); |
fcb74588 SG |
585 | return; |
586 | } | |
587 | ||
588 | /* In case of HW accelerated crypto and bad decryption, drop */ | |
589 | if (!il->cfg->mod_params->sw_crypto && | |
590 | il_set_decrypted_flag(il, hdr, ampdu_status, stats)) | |
591 | return; | |
592 | ||
593 | skb = dev_alloc_skb(128); | |
594 | if (!skb) { | |
595 | IL_ERR("dev_alloc_skb failed\n"); | |
596 | return; | |
597 | } | |
598 | ||
599 | skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); | |
600 | ||
601 | il_update_stats(il, false, fc, len); | |
602 | memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); | |
603 | ||
604 | ieee80211_rx(il->hw, skb); | |
605 | il->alloc_rxb_page--; | |
606 | rxb->page = NULL; | |
607 | } | |
608 | ||
4d69c752 SG |
609 | /* Called for N_RX (legacy ABG frames), or |
610 | * N_RX_MPDU (HT high-throughput N frames). */ | |
e7392364 SG |
611 | void |
612 | il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) | |
fcb74588 SG |
613 | { |
614 | struct ieee80211_hdr *header; | |
615 | struct ieee80211_rx_status rx_status; | |
616 | struct il_rx_pkt *pkt = rxb_addr(rxb); | |
617 | struct il_rx_phy_res *phy_res; | |
618 | __le32 rx_pkt_status; | |
619 | struct il_rx_mpdu_res_start *amsdu; | |
620 | u32 len; | |
621 | u32 ampdu_status; | |
622 | u32 rate_n_flags; | |
623 | ||
624 | /** | |
4d69c752 SG |
625 | * N_RX and N_RX_MPDU are handled differently. |
626 | * N_RX: physical layer info is in this buffer | |
627 | * N_RX_MPDU: physical layer info was sent in separate | |
fcb74588 SG |
628 | * command and cached in il->last_phy_res |
629 | * | |
630 | * Here we set up local variables depending on which command is | |
631 | * received. | |
632 | */ | |
4d69c752 | 633 | if (pkt->hdr.cmd == N_RX) { |
fcb74588 | 634 | phy_res = (struct il_rx_phy_res *)pkt->u.raw; |
e7392364 SG |
635 | header = |
636 | (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) + | |
637 | phy_res->cfg_phy_cnt); | |
fcb74588 SG |
638 | |
639 | len = le16_to_cpu(phy_res->byte_count); | |
e7392364 SG |
640 | rx_pkt_status = |
641 | *(__le32 *) (pkt->u.raw + sizeof(*phy_res) + | |
642 | phy_res->cfg_phy_cnt + len); | |
fcb74588 SG |
643 | ampdu_status = le32_to_cpu(rx_pkt_status); |
644 | } else { | |
645 | if (!il->_4965.last_phy_res_valid) { | |
646 | IL_ERR("MPDU frame without cached PHY data\n"); | |
647 | return; | |
648 | } | |
649 | phy_res = &il->_4965.last_phy_res; | |
650 | amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw; | |
651 | header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); | |
652 | len = le16_to_cpu(amsdu->byte_count); | |
e7392364 SG |
653 | rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len); |
654 | ampdu_status = | |
655 | il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status)); | |
fcb74588 SG |
656 | } |
657 | ||
658 | if ((unlikely(phy_res->cfg_phy_cnt > 20))) { | |
659 | D_DROP("dsp size out of range [0,20]: %d/n", | |
e7392364 | 660 | phy_res->cfg_phy_cnt); |
fcb74588 SG |
661 | return; |
662 | } | |
663 | ||
664 | if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || | |
665 | !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { | |
e7392364 | 666 | D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status)); |
fcb74588 SG |
667 | return; |
668 | } | |
669 | ||
670 | /* This will be used in several places later */ | |
671 | rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); | |
672 | ||
673 | /* rx_status carries information about the packet to mac80211 */ | |
674 | rx_status.mactime = le64_to_cpu(phy_res->timestamp); | |
e7392364 SG |
675 | rx_status.band = |
676 | (phy_res-> | |
677 | phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : | |
678 | IEEE80211_BAND_5GHZ; | |
fcb74588 | 679 | rx_status.freq = |
e7392364 SG |
680 | ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), |
681 | rx_status.band); | |
fcb74588 | 682 | rx_status.rate_idx = |
e7392364 | 683 | il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); |
fcb74588 SG |
684 | rx_status.flag = 0; |
685 | ||
686 | /* TSF isn't reliable. In order to allow smooth user experience, | |
687 | * this W/A doesn't propagate it to the mac80211 */ | |
e7392364 | 688 | /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */ |
fcb74588 SG |
689 | |
690 | il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); | |
691 | ||
692 | /* Find max signal strength (dBm) among 3 antenna/receiver chains */ | |
693 | rx_status.signal = il4965_calc_rssi(il, phy_res); | |
694 | ||
e7392364 SG |
695 | D_STATS("Rssi %d, TSF %llu\n", rx_status.signal, |
696 | (unsigned long long)rx_status.mactime); | |
fcb74588 SG |
697 | |
698 | /* | |
699 | * "antenna number" | |
700 | * | |
701 | * It seems that the antenna field in the phy flags value | |
702 | * is actually a bit field. This is undefined by radiotap, | |
703 | * it wants an actual antenna number but I always get "7" | |
704 | * for most legacy frames I receive indicating that the | |
705 | * same frame was received on all three RX chains. | |
706 | * | |
707 | * I think this field should be removed in favor of a | |
708 | * new 802.11n radiotap field "RX chains" that is defined | |
709 | * as a bitmask. | |
710 | */ | |
711 | rx_status.antenna = | |
e7392364 SG |
712 | (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> |
713 | RX_RES_PHY_FLAGS_ANTENNA_POS; | |
fcb74588 SG |
714 | |
715 | /* set the preamble flag if appropriate */ | |
716 | if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) | |
717 | rx_status.flag |= RX_FLAG_SHORTPRE; | |
718 | ||
719 | /* Set up the HT phy flags */ | |
720 | if (rate_n_flags & RATE_MCS_HT_MSK) | |
721 | rx_status.flag |= RX_FLAG_HT; | |
722 | if (rate_n_flags & RATE_MCS_HT40_MSK) | |
723 | rx_status.flag |= RX_FLAG_40MHZ; | |
724 | if (rate_n_flags & RATE_MCS_SGI_MSK) | |
725 | rx_status.flag |= RX_FLAG_SHORT_GI; | |
726 | ||
e7392364 SG |
727 | il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb, |
728 | &rx_status); | |
fcb74588 SG |
729 | } |
730 | ||
4d69c752 | 731 | /* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY). |
6e9848b4 | 732 | * This will be used later in il_hdl_rx() for N_RX_MPDU. */ |
e7392364 SG |
733 | void |
734 | il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb) | |
fcb74588 SG |
735 | { |
736 | struct il_rx_pkt *pkt = rxb_addr(rxb); | |
737 | il->_4965.last_phy_res_valid = true; | |
738 | memcpy(&il->_4965.last_phy_res, pkt->u.raw, | |
739 | sizeof(struct il_rx_phy_res)); | |
740 | } | |
741 | ||
e7392364 SG |
742 | static int |
743 | il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif, | |
744 | enum ieee80211_band band, u8 is_active, | |
745 | u8 n_probes, struct il_scan_channel *scan_ch) | |
fcb74588 SG |
746 | { |
747 | struct ieee80211_channel *chan; | |
748 | const struct ieee80211_supported_band *sband; | |
749 | const struct il_channel_info *ch_info; | |
750 | u16 passive_dwell = 0; | |
751 | u16 active_dwell = 0; | |
752 | int added, i; | |
753 | u16 channel; | |
754 | ||
755 | sband = il_get_hw_mode(il, band); | |
756 | if (!sband) | |
757 | return 0; | |
758 | ||
759 | active_dwell = il_get_active_dwell_time(il, band, n_probes); | |
760 | passive_dwell = il_get_passive_dwell_time(il, band, vif); | |
761 | ||
762 | if (passive_dwell <= active_dwell) | |
763 | passive_dwell = active_dwell + 1; | |
764 | ||
765 | for (i = 0, added = 0; i < il->scan_request->n_channels; i++) { | |
766 | chan = il->scan_request->channels[i]; | |
767 | ||
768 | if (chan->band != band) | |
769 | continue; | |
770 | ||
771 | channel = chan->hw_value; | |
772 | scan_ch->channel = cpu_to_le16(channel); | |
773 | ||
774 | ch_info = il_get_channel_info(il, band, channel); | |
775 | if (!il_is_channel_valid(ch_info)) { | |
e7392364 SG |
776 | D_SCAN("Channel %d is INVALID for this band.\n", |
777 | channel); | |
fcb74588 SG |
778 | continue; |
779 | } | |
780 | ||
781 | if (!is_active || il_is_channel_passive(ch_info) || | |
782 | (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) | |
783 | scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; | |
784 | else | |
785 | scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; | |
786 | ||
787 | if (n_probes) | |
788 | scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes); | |
789 | ||
790 | scan_ch->active_dwell = cpu_to_le16(active_dwell); | |
791 | scan_ch->passive_dwell = cpu_to_le16(passive_dwell); | |
792 | ||
793 | /* Set txpower levels to defaults */ | |
794 | scan_ch->dsp_atten = 110; | |
795 | ||
796 | /* NOTE: if we were doing 6Mb OFDM for scans we'd use | |
797 | * power level: | |
798 | * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; | |
799 | */ | |
800 | if (band == IEEE80211_BAND_5GHZ) | |
801 | scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; | |
802 | else | |
803 | scan_ch->tx_gain = ((1 << 5) | (5 << 3)); | |
804 | ||
e7392364 SG |
805 | D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel, |
806 | le32_to_cpu(scan_ch->type), | |
807 | (scan_ch-> | |
808 | type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE", | |
809 | (scan_ch-> | |
810 | type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell : | |
811 | passive_dwell); | |
fcb74588 SG |
812 | |
813 | scan_ch++; | |
814 | added++; | |
815 | } | |
816 | ||
817 | D_SCAN("total channels to scan %d\n", added); | |
818 | return added; | |
819 | } | |
820 | ||
a0c1ef3b SG |
821 | static void |
822 | il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid) | |
823 | { | |
824 | int i; | |
825 | u8 ind = *ant; | |
826 | ||
827 | for (i = 0; i < RATE_ANT_NUM - 1; i++) { | |
828 | ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; | |
829 | if (valid & BIT(ind)) { | |
830 | *ant = ind; | |
831 | return; | |
832 | } | |
833 | } | |
834 | } | |
835 | ||
e7392364 SG |
836 | int |
837 | il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif) | |
fcb74588 SG |
838 | { |
839 | struct il_host_cmd cmd = { | |
4d69c752 | 840 | .id = C_SCAN, |
fcb74588 SG |
841 | .len = sizeof(struct il_scan_cmd), |
842 | .flags = CMD_SIZE_HUGE, | |
843 | }; | |
844 | struct il_scan_cmd *scan; | |
fcb74588 SG |
845 | u32 rate_flags = 0; |
846 | u16 cmd_len; | |
847 | u16 rx_chain = 0; | |
848 | enum ieee80211_band band; | |
849 | u8 n_probes = 0; | |
850 | u8 rx_ant = il->hw_params.valid_rx_ant; | |
851 | u8 rate; | |
852 | bool is_active = false; | |
e7392364 | 853 | int chan_mod; |
fcb74588 SG |
854 | u8 active_chains; |
855 | u8 scan_tx_antennas = il->hw_params.valid_tx_ant; | |
856 | int ret; | |
857 | ||
858 | lockdep_assert_held(&il->mutex); | |
859 | ||
fcb74588 | 860 | if (!il->scan_cmd) { |
e7392364 SG |
861 | il->scan_cmd = |
862 | kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE, | |
863 | GFP_KERNEL); | |
fcb74588 | 864 | if (!il->scan_cmd) { |
e7392364 | 865 | D_SCAN("fail to allocate memory for scan\n"); |
fcb74588 SG |
866 | return -ENOMEM; |
867 | } | |
868 | } | |
869 | scan = il->scan_cmd; | |
870 | memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE); | |
871 | ||
872 | scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH; | |
873 | scan->quiet_time = IL_ACTIVE_QUIET_TIME; | |
874 | ||
875 | if (il_is_any_associated(il)) { | |
876 | u16 interval; | |
877 | u32 extra; | |
878 | u32 suspend_time = 100; | |
879 | u32 scan_suspend_time = 100; | |
880 | ||
881 | D_INFO("Scanning while associated...\n"); | |
882 | interval = vif->bss_conf.beacon_int; | |
883 | ||
884 | scan->suspend_time = 0; | |
885 | scan->max_out_time = cpu_to_le32(200 * 1024); | |
886 | if (!interval) | |
887 | interval = suspend_time; | |
888 | ||
889 | extra = (suspend_time / interval) << 22; | |
e7392364 SG |
890 | scan_suspend_time = |
891 | (extra | ((suspend_time % interval) * 1024)); | |
fcb74588 SG |
892 | scan->suspend_time = cpu_to_le32(scan_suspend_time); |
893 | D_SCAN("suspend_time 0x%X beacon interval %d\n", | |
e7392364 | 894 | scan_suspend_time, interval); |
fcb74588 SG |
895 | } |
896 | ||
897 | if (il->scan_request->n_ssids) { | |
898 | int i, p = 0; | |
899 | D_SCAN("Kicking off active scan\n"); | |
900 | for (i = 0; i < il->scan_request->n_ssids; i++) { | |
901 | /* always does wildcard anyway */ | |
902 | if (!il->scan_request->ssids[i].ssid_len) | |
903 | continue; | |
904 | scan->direct_scan[p].id = WLAN_EID_SSID; | |
905 | scan->direct_scan[p].len = | |
e7392364 | 906 | il->scan_request->ssids[i].ssid_len; |
fcb74588 SG |
907 | memcpy(scan->direct_scan[p].ssid, |
908 | il->scan_request->ssids[i].ssid, | |
909 | il->scan_request->ssids[i].ssid_len); | |
910 | n_probes++; | |
911 | p++; | |
912 | } | |
913 | is_active = true; | |
914 | } else | |
915 | D_SCAN("Start passive scan.\n"); | |
916 | ||
917 | scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; | |
b16db50a | 918 | scan->tx_cmd.sta_id = il->hw_params.bcast_id; |
fcb74588 SG |
919 | scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; |
920 | ||
921 | switch (il->scan_band) { | |
922 | case IEEE80211_BAND_2GHZ: | |
923 | scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; | |
e7392364 | 924 | chan_mod = |
c8b03958 | 925 | le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >> |
e7392364 | 926 | RXON_FLG_CHANNEL_MODE_POS; |
fcb74588 SG |
927 | if (chan_mod == CHANNEL_MODE_PURE_40) { |
928 | rate = RATE_6M_PLCP; | |
929 | } else { | |
930 | rate = RATE_1M_PLCP; | |
931 | rate_flags = RATE_MCS_CCK_MSK; | |
932 | } | |
933 | break; | |
934 | case IEEE80211_BAND_5GHZ: | |
935 | rate = RATE_6M_PLCP; | |
936 | break; | |
937 | default: | |
938 | IL_WARN("Invalid scan band\n"); | |
939 | return -EIO; | |
940 | } | |
941 | ||
942 | /* | |
943 | * If active scanning is requested but a certain channel is | |
944 | * marked passive, we can do active scanning if we detect | |
945 | * transmissions. | |
946 | * | |
947 | * There is an issue with some firmware versions that triggers | |
948 | * a sysassert on a "good CRC threshold" of zero (== disabled), | |
949 | * on a radar channel even though this means that we should NOT | |
950 | * send probes. | |
951 | * | |
952 | * The "good CRC threshold" is the number of frames that we | |
953 | * need to receive during our dwell time on a channel before | |
954 | * sending out probes -- setting this to a huge value will | |
955 | * mean we never reach it, but at the same time work around | |
956 | * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER | |
957 | * here instead of IL_GOOD_CRC_TH_DISABLED. | |
958 | */ | |
e7392364 SG |
959 | scan->good_CRC_th = |
960 | is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER; | |
fcb74588 SG |
961 | |
962 | band = il->scan_band; | |
963 | ||
964 | if (il->cfg->scan_rx_antennas[band]) | |
965 | rx_ant = il->cfg->scan_rx_antennas[band]; | |
966 | ||
a0c1ef3b | 967 | il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas); |
616107ed SG |
968 | rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS; |
969 | scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags); | |
fcb74588 SG |
970 | |
971 | /* In power save mode use one chain, otherwise use all chains */ | |
a6766ccd | 972 | if (test_bit(S_POWER_PMI, &il->status)) { |
fcb74588 | 973 | /* rx_ant has been set to all valid chains previously */ |
e7392364 SG |
974 | active_chains = |
975 | rx_ant & ((u8) (il->chain_noise_data.active_chains)); | |
fcb74588 SG |
976 | if (!active_chains) |
977 | active_chains = rx_ant; | |
978 | ||
979 | D_SCAN("chain_noise_data.active_chains: %u\n", | |
e7392364 | 980 | il->chain_noise_data.active_chains); |
fcb74588 SG |
981 | |
982 | rx_ant = il4965_first_antenna(active_chains); | |
983 | } | |
984 | ||
985 | /* MIMO is not used here, but value is required */ | |
986 | rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; | |
987 | rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; | |
988 | rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; | |
989 | rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; | |
990 | scan->rx_chain = cpu_to_le16(rx_chain); | |
991 | ||
e7392364 SG |
992 | cmd_len = |
993 | il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data, | |
994 | vif->addr, il->scan_request->ie, | |
995 | il->scan_request->ie_len, | |
996 | IL_MAX_SCAN_SIZE - sizeof(*scan)); | |
fcb74588 SG |
997 | scan->tx_cmd.len = cpu_to_le16(cmd_len); |
998 | ||
e7392364 SG |
999 | scan->filter_flags |= |
1000 | (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK); | |
fcb74588 | 1001 | |
e7392364 SG |
1002 | scan->channel_count = |
1003 | il4965_get_channels_for_scan(il, vif, band, is_active, n_probes, | |
1004 | (void *)&scan->data[cmd_len]); | |
fcb74588 SG |
1005 | if (scan->channel_count == 0) { |
1006 | D_SCAN("channel count %d\n", scan->channel_count); | |
1007 | return -EIO; | |
1008 | } | |
1009 | ||
e7392364 SG |
1010 | cmd.len += |
1011 | le16_to_cpu(scan->tx_cmd.len) + | |
fcb74588 SG |
1012 | scan->channel_count * sizeof(struct il_scan_channel); |
1013 | cmd.data = scan; | |
1014 | scan->len = cpu_to_le16(cmd.len); | |
1015 | ||
a6766ccd | 1016 | set_bit(S_SCAN_HW, &il->status); |
fcb74588 SG |
1017 | |
1018 | ret = il_send_cmd_sync(il, &cmd); | |
1019 | if (ret) | |
a6766ccd | 1020 | clear_bit(S_SCAN_HW, &il->status); |
fcb74588 SG |
1021 | |
1022 | return ret; | |
1023 | } | |
1024 | ||
e7392364 SG |
1025 | int |
1026 | il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif, | |
1027 | bool add) | |
fcb74588 SG |
1028 | { |
1029 | struct il_vif_priv *vif_priv = (void *)vif->drv_priv; | |
1030 | ||
1031 | if (add) | |
83007196 | 1032 | return il4965_add_bssid_station(il, vif->bss_conf.bssid, |
fcb74588 SG |
1033 | &vif_priv->ibss_bssid_sta_id); |
1034 | return il_remove_station(il, vif_priv->ibss_bssid_sta_id, | |
e7392364 | 1035 | vif->bss_conf.bssid); |
fcb74588 SG |
1036 | } |
1037 | ||
e7392364 SG |
1038 | void |
1039 | il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed) | |
fcb74588 SG |
1040 | { |
1041 | lockdep_assert_held(&il->sta_lock); | |
1042 | ||
1043 | if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed) | |
1044 | il->stations[sta_id].tid[tid].tfds_in_queue -= freed; | |
1045 | else { | |
1046 | D_TX("free more than tfds_in_queue (%u:%d)\n", | |
e7392364 | 1047 | il->stations[sta_id].tid[tid].tfds_in_queue, freed); |
fcb74588 SG |
1048 | il->stations[sta_id].tid[tid].tfds_in_queue = 0; |
1049 | } | |
1050 | } | |
1051 | ||
1052 | #define IL_TX_QUEUE_MSK 0xfffff | |
1053 | ||
e7392364 SG |
1054 | static bool |
1055 | il4965_is_single_rx_stream(struct il_priv *il) | |
fcb74588 SG |
1056 | { |
1057 | return il->current_ht_config.smps == IEEE80211_SMPS_STATIC || | |
e7392364 | 1058 | il->current_ht_config.single_chain_sufficient; |
fcb74588 SG |
1059 | } |
1060 | ||
1061 | #define IL_NUM_RX_CHAINS_MULTIPLE 3 | |
1062 | #define IL_NUM_RX_CHAINS_SINGLE 2 | |
1063 | #define IL_NUM_IDLE_CHAINS_DUAL 2 | |
1064 | #define IL_NUM_IDLE_CHAINS_SINGLE 1 | |
1065 | ||
1066 | /* | |
1067 | * Determine how many receiver/antenna chains to use. | |
1068 | * | |
1069 | * More provides better reception via diversity. Fewer saves power | |
1070 | * at the expense of throughput, but only when not in powersave to | |
1071 | * start with. | |
1072 | * | |
1073 | * MIMO (dual stream) requires at least 2, but works better with 3. | |
1074 | * This does not determine *which* chains to use, just how many. | |
1075 | */ | |
e7392364 SG |
1076 | static int |
1077 | il4965_get_active_rx_chain_count(struct il_priv *il) | |
fcb74588 SG |
1078 | { |
1079 | /* # of Rx chains to use when expecting MIMO. */ | |
1080 | if (il4965_is_single_rx_stream(il)) | |
1081 | return IL_NUM_RX_CHAINS_SINGLE; | |
1082 | else | |
1083 | return IL_NUM_RX_CHAINS_MULTIPLE; | |
1084 | } | |
1085 | ||
1086 | /* | |
1087 | * When we are in power saving mode, unless device support spatial | |
1088 | * multiplexing power save, use the active count for rx chain count. | |
1089 | */ | |
1090 | static int | |
1091 | il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt) | |
1092 | { | |
1093 | /* # Rx chains when idling, depending on SMPS mode */ | |
1094 | switch (il->current_ht_config.smps) { | |
1095 | case IEEE80211_SMPS_STATIC: | |
1096 | case IEEE80211_SMPS_DYNAMIC: | |
1097 | return IL_NUM_IDLE_CHAINS_SINGLE; | |
1098 | case IEEE80211_SMPS_OFF: | |
1099 | return active_cnt; | |
1100 | default: | |
e7392364 | 1101 | WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps); |
fcb74588 SG |
1102 | return active_cnt; |
1103 | } | |
1104 | } | |
1105 | ||
1106 | /* up to 4 chains */ | |
e7392364 SG |
1107 | static u8 |
1108 | il4965_count_chain_bitmap(u32 chain_bitmap) | |
fcb74588 SG |
1109 | { |
1110 | u8 res; | |
1111 | res = (chain_bitmap & BIT(0)) >> 0; | |
1112 | res += (chain_bitmap & BIT(1)) >> 1; | |
1113 | res += (chain_bitmap & BIT(2)) >> 2; | |
1114 | res += (chain_bitmap & BIT(3)) >> 3; | |
1115 | return res; | |
1116 | } | |
1117 | ||
1118 | /** | |
1119 | * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image | |
1120 | * | |
1121 | * Selects how many and which Rx receivers/antennas/chains to use. | |
1122 | * This should not be used for scan command ... it puts data in wrong place. | |
1123 | */ | |
e7392364 | 1124 | void |
83007196 | 1125 | il4965_set_rxon_chain(struct il_priv *il) |
fcb74588 SG |
1126 | { |
1127 | bool is_single = il4965_is_single_rx_stream(il); | |
a6766ccd | 1128 | bool is_cam = !test_bit(S_POWER_PMI, &il->status); |
fcb74588 SG |
1129 | u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; |
1130 | u32 active_chains; | |
1131 | u16 rx_chain; | |
1132 | ||
1133 | /* Tell uCode which antennas are actually connected. | |
1134 | * Before first association, we assume all antennas are connected. | |
1135 | * Just after first association, il4965_chain_noise_calibration() | |
1136 | * checks which antennas actually *are* connected. */ | |
1137 | if (il->chain_noise_data.active_chains) | |
1138 | active_chains = il->chain_noise_data.active_chains; | |
1139 | else | |
1140 | active_chains = il->hw_params.valid_rx_ant; | |
1141 | ||
1142 | rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; | |
1143 | ||
1144 | /* How many receivers should we use? */ | |
1145 | active_rx_cnt = il4965_get_active_rx_chain_count(il); | |
1146 | idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt); | |
1147 | ||
fcb74588 SG |
1148 | /* correct rx chain count according hw settings |
1149 | * and chain noise calibration | |
1150 | */ | |
1151 | valid_rx_cnt = il4965_count_chain_bitmap(active_chains); | |
1152 | if (valid_rx_cnt < active_rx_cnt) | |
1153 | active_rx_cnt = valid_rx_cnt; | |
1154 | ||
1155 | if (valid_rx_cnt < idle_rx_cnt) | |
1156 | idle_rx_cnt = valid_rx_cnt; | |
1157 | ||
1158 | rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; | |
e7392364 | 1159 | rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; |
fcb74588 | 1160 | |
c8b03958 | 1161 | il->staging.rx_chain = cpu_to_le16(rx_chain); |
fcb74588 SG |
1162 | |
1163 | if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam) | |
c8b03958 | 1164 | il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; |
fcb74588 | 1165 | else |
c8b03958 | 1166 | il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; |
fcb74588 | 1167 | |
c8b03958 | 1168 | D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain, |
e7392364 | 1169 | active_rx_cnt, idle_rx_cnt); |
fcb74588 SG |
1170 | |
1171 | WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || | |
1172 | active_rx_cnt < idle_rx_cnt); | |
1173 | } | |
1174 | ||
e7392364 SG |
1175 | static const char * |
1176 | il4965_get_fh_string(int cmd) | |
fcb74588 SG |
1177 | { |
1178 | switch (cmd) { | |
e7392364 SG |
1179 | IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG); |
1180 | IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG); | |
1181 | IL_CMD(FH49_RSCSR_CHNL0_WPTR); | |
1182 | IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG); | |
1183 | IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG); | |
1184 | IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG); | |
1185 | IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); | |
1186 | IL_CMD(FH49_TSSR_TX_STATUS_REG); | |
1187 | IL_CMD(FH49_TSSR_TX_ERROR_REG); | |
fcb74588 SG |
1188 | default: |
1189 | return "UNKNOWN"; | |
1190 | } | |
1191 | } | |
1192 | ||
e7392364 SG |
1193 | int |
1194 | il4965_dump_fh(struct il_priv *il, char **buf, bool display) | |
fcb74588 SG |
1195 | { |
1196 | int i; | |
1197 | #ifdef CONFIG_IWLEGACY_DEBUG | |
1198 | int pos = 0; | |
1199 | size_t bufsz = 0; | |
1200 | #endif | |
1201 | static const u32 fh_tbl[] = { | |
9a95b370 SG |
1202 | FH49_RSCSR_CHNL0_STTS_WPTR_REG, |
1203 | FH49_RSCSR_CHNL0_RBDCB_BASE_REG, | |
1204 | FH49_RSCSR_CHNL0_WPTR, | |
1205 | FH49_MEM_RCSR_CHNL0_CONFIG_REG, | |
1206 | FH49_MEM_RSSR_SHARED_CTRL_REG, | |
1207 | FH49_MEM_RSSR_RX_STATUS_REG, | |
1208 | FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, | |
1209 | FH49_TSSR_TX_STATUS_REG, | |
1210 | FH49_TSSR_TX_ERROR_REG | |
fcb74588 SG |
1211 | }; |
1212 | #ifdef CONFIG_IWLEGACY_DEBUG | |
1213 | if (display) { | |
1214 | bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; | |
1215 | *buf = kmalloc(bufsz, GFP_KERNEL); | |
1216 | if (!*buf) | |
1217 | return -ENOMEM; | |
e7392364 SG |
1218 | pos += |
1219 | scnprintf(*buf + pos, bufsz - pos, "FH register values:\n"); | |
fcb74588 | 1220 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { |
e7392364 SG |
1221 | pos += |
1222 | scnprintf(*buf + pos, bufsz - pos, | |
1223 | " %34s: 0X%08x\n", | |
1722f8e1 SG |
1224 | il4965_get_fh_string(fh_tbl[i]), |
1225 | il_rd(il, fh_tbl[i])); | |
fcb74588 SG |
1226 | } |
1227 | return pos; | |
1228 | } | |
1229 | #endif | |
1230 | IL_ERR("FH register values:\n"); | |
e7392364 SG |
1231 | for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { |
1232 | IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]), | |
1233 | il_rd(il, fh_tbl[i])); | |
fcb74588 SG |
1234 | } |
1235 | return 0; | |
1236 | } | |
a1751b22 | 1237 | |
e7392364 SG |
1238 | void |
1239 | il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb) | |
a1751b22 SG |
1240 | { |
1241 | struct il_rx_pkt *pkt = rxb_addr(rxb); | |
1242 | struct il_missed_beacon_notif *missed_beacon; | |
1243 | ||
1244 | missed_beacon = &pkt->u.missed_beacon; | |
1245 | if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > | |
1246 | il->missed_beacon_threshold) { | |
e7392364 SG |
1247 | D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n", |
1248 | le32_to_cpu(missed_beacon->consecutive_missed_beacons), | |
1249 | le32_to_cpu(missed_beacon->total_missed_becons), | |
1250 | le32_to_cpu(missed_beacon->num_recvd_beacons), | |
1251 | le32_to_cpu(missed_beacon->num_expected_beacons)); | |
a6766ccd | 1252 | if (!test_bit(S_SCANNING, &il->status)) |
a1751b22 SG |
1253 | il4965_init_sensitivity(il); |
1254 | } | |
1255 | } | |
1256 | ||
1257 | /* Calculate noise level, based on measurements during network silence just | |
1258 | * before arriving beacon. This measurement can be done only if we know | |
1259 | * exactly when to expect beacons, therefore only when we're associated. */ | |
e7392364 SG |
1260 | static void |
1261 | il4965_rx_calc_noise(struct il_priv *il) | |
a1751b22 SG |
1262 | { |
1263 | struct stats_rx_non_phy *rx_info; | |
1264 | int num_active_rx = 0; | |
1265 | int total_silence = 0; | |
1266 | int bcn_silence_a, bcn_silence_b, bcn_silence_c; | |
1267 | int last_rx_noise; | |
1268 | ||
1269 | rx_info = &(il->_4965.stats.rx.general); | |
1270 | bcn_silence_a = | |
e7392364 | 1271 | le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; |
a1751b22 | 1272 | bcn_silence_b = |
e7392364 | 1273 | le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; |
a1751b22 | 1274 | bcn_silence_c = |
e7392364 | 1275 | le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; |
a1751b22 SG |
1276 | |
1277 | if (bcn_silence_a) { | |
1278 | total_silence += bcn_silence_a; | |
1279 | num_active_rx++; | |
1280 | } | |
1281 | if (bcn_silence_b) { | |
1282 | total_silence += bcn_silence_b; | |
1283 | num_active_rx++; | |
1284 | } | |
1285 | if (bcn_silence_c) { | |
1286 | total_silence += bcn_silence_c; | |
1287 | num_active_rx++; | |
1288 | } | |
1289 | ||
1290 | /* Average among active antennas */ | |
1291 | if (num_active_rx) | |
1292 | last_rx_noise = (total_silence / num_active_rx) - 107; | |
1293 | else | |
1294 | last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE; | |
1295 | ||
e7392364 SG |
1296 | D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a, |
1297 | bcn_silence_b, bcn_silence_c, last_rx_noise); | |
a1751b22 SG |
1298 | } |
1299 | ||
1300 | #ifdef CONFIG_IWLEGACY_DEBUGFS | |
1301 | /* | |
1302 | * based on the assumption of all stats counter are in DWORD | |
1303 | * FIXME: This function is for debugging, do not deal with | |
1304 | * the case of counters roll-over. | |
1305 | */ | |
e7392364 SG |
1306 | static void |
1307 | il4965_accumulative_stats(struct il_priv *il, __le32 * stats) | |
a1751b22 SG |
1308 | { |
1309 | int i, size; | |
1310 | __le32 *prev_stats; | |
1311 | u32 *accum_stats; | |
1312 | u32 *delta, *max_delta; | |
1313 | struct stats_general_common *general, *accum_general; | |
1314 | struct stats_tx *tx, *accum_tx; | |
1315 | ||
1722f8e1 SG |
1316 | prev_stats = (__le32 *) &il->_4965.stats; |
1317 | accum_stats = (u32 *) &il->_4965.accum_stats; | |
a1751b22 SG |
1318 | size = sizeof(struct il_notif_stats); |
1319 | general = &il->_4965.stats.general.common; | |
1320 | accum_general = &il->_4965.accum_stats.general.common; | |
1321 | tx = &il->_4965.stats.tx; | |
1322 | accum_tx = &il->_4965.accum_stats.tx; | |
1722f8e1 SG |
1323 | delta = (u32 *) &il->_4965.delta_stats; |
1324 | max_delta = (u32 *) &il->_4965.max_delta; | |
a1751b22 SG |
1325 | |
1326 | for (i = sizeof(__le32); i < size; | |
e7392364 SG |
1327 | i += |
1328 | sizeof(__le32), stats++, prev_stats++, delta++, max_delta++, | |
1329 | accum_stats++) { | |
a1751b22 | 1330 | if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { |
e7392364 SG |
1331 | *delta = |
1332 | (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats)); | |
a1751b22 SG |
1333 | *accum_stats += *delta; |
1334 | if (*delta > *max_delta) | |
1335 | *max_delta = *delta; | |
1336 | } | |
1337 | } | |
1338 | ||
1339 | /* reset accumulative stats for "no-counter" type stats */ | |
1340 | accum_general->temperature = general->temperature; | |
1341 | accum_general->ttl_timestamp = general->ttl_timestamp; | |
1342 | } | |
1343 | #endif | |
1344 | ||
e7392364 SG |
1345 | void |
1346 | il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb) | |
a1751b22 | 1347 | { |
527901d0 SG |
1348 | const int recalib_seconds = 60; |
1349 | bool change; | |
a1751b22 SG |
1350 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
1351 | ||
e7392364 SG |
1352 | D_RX("Statistics notification received (%d vs %d).\n", |
1353 | (int)sizeof(struct il_notif_stats), | |
1354 | le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK); | |
1355 | ||
1356 | change = | |
1357 | ((il->_4965.stats.general.common.temperature != | |
1358 | pkt->u.stats.general.common.temperature) || | |
1359 | ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) != | |
1360 | (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK))); | |
a1751b22 | 1361 | #ifdef CONFIG_IWLEGACY_DEBUGFS |
1722f8e1 | 1362 | il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats); |
a1751b22 SG |
1363 | #endif |
1364 | ||
1365 | /* TODO: reading some of stats is unneeded */ | |
e7392364 | 1366 | memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats)); |
a1751b22 | 1367 | |
db7746f7 | 1368 | set_bit(S_STATS, &il->status); |
a1751b22 | 1369 | |
527901d0 SG |
1370 | /* |
1371 | * Reschedule the stats timer to occur in recalib_seconds to ensure | |
1372 | * we get a thermal update even if the uCode doesn't give us one | |
1373 | */ | |
e7392364 | 1374 | mod_timer(&il->stats_periodic, |
527901d0 | 1375 | jiffies + msecs_to_jiffies(recalib_seconds * 1000)); |
a1751b22 | 1376 | |
a6766ccd | 1377 | if (unlikely(!test_bit(S_SCANNING, &il->status)) && |
4d69c752 | 1378 | (pkt->hdr.cmd == N_STATS)) { |
a1751b22 SG |
1379 | il4965_rx_calc_noise(il); |
1380 | queue_work(il->workqueue, &il->run_time_calib_work); | |
1381 | } | |
527901d0 SG |
1382 | |
1383 | if (change) | |
1384 | il4965_temperature_calib(il); | |
a1751b22 SG |
1385 | } |
1386 | ||
e7392364 SG |
1387 | void |
1388 | il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb) | |
a1751b22 SG |
1389 | { |
1390 | struct il_rx_pkt *pkt = rxb_addr(rxb); | |
1391 | ||
db7746f7 | 1392 | if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) { |
a1751b22 SG |
1393 | #ifdef CONFIG_IWLEGACY_DEBUGFS |
1394 | memset(&il->_4965.accum_stats, 0, | |
e7392364 | 1395 | sizeof(struct il_notif_stats)); |
a1751b22 | 1396 | memset(&il->_4965.delta_stats, 0, |
e7392364 SG |
1397 | sizeof(struct il_notif_stats)); |
1398 | memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats)); | |
a1751b22 SG |
1399 | #endif |
1400 | D_RX("Statistics have been cleared\n"); | |
1401 | } | |
d2dfb33e | 1402 | il4965_hdl_stats(il, rxb); |
a1751b22 SG |
1403 | } |
1404 | ||
8f29b456 SG |
1405 | |
1406 | /* | |
1407 | * mac80211 queues, ACs, hardware queues, FIFOs. | |
1408 | * | |
1409 | * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues | |
1410 | * | |
1411 | * Mac80211 uses the following numbers, which we get as from it | |
1412 | * by way of skb_get_queue_mapping(skb): | |
1413 | * | |
1414 | * VO 0 | |
1415 | * VI 1 | |
1416 | * BE 2 | |
1417 | * BK 3 | |
1418 | * | |
1419 | * | |
1420 | * Regular (not A-MPDU) frames are put into hardware queues corresponding | |
1421 | * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their | |
1422 | * own queue per aggregation session (RA/TID combination), such queues are | |
1423 | * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In | |
1424 | * order to map frames to the right queue, we also need an AC->hw queue | |
1425 | * mapping. This is implemented here. | |
1426 | * | |
1427 | * Due to the way hw queues are set up (by the hw specific modules like | |
af038f40 | 1428 | * 4965.c), the AC->hw queue mapping is the identity |
8f29b456 SG |
1429 | * mapping. |
1430 | */ | |
1431 | ||
a1751b22 SG |
1432 | static const u8 tid_to_ac[] = { |
1433 | IEEE80211_AC_BE, | |
1434 | IEEE80211_AC_BK, | |
1435 | IEEE80211_AC_BK, | |
1436 | IEEE80211_AC_BE, | |
1437 | IEEE80211_AC_VI, | |
1438 | IEEE80211_AC_VI, | |
1439 | IEEE80211_AC_VO, | |
1440 | IEEE80211_AC_VO | |
1441 | }; | |
1442 | ||
e7392364 SG |
1443 | static inline int |
1444 | il4965_get_ac_from_tid(u16 tid) | |
a1751b22 SG |
1445 | { |
1446 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | |
1447 | return tid_to_ac[tid]; | |
1448 | ||
1449 | /* no support for TIDs 8-15 yet */ | |
1450 | return -EINVAL; | |
1451 | } | |
1452 | ||
1453 | static inline int | |
83007196 | 1454 | il4965_get_fifo_from_tid(u16 tid) |
a1751b22 | 1455 | { |
b75b3a70 SG |
1456 | const u8 ac_to_fifo[] = { |
1457 | IL_TX_FIFO_VO, | |
1458 | IL_TX_FIFO_VI, | |
1459 | IL_TX_FIFO_BE, | |
1460 | IL_TX_FIFO_BK, | |
1461 | }; | |
1462 | ||
a1751b22 | 1463 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) |
b75b3a70 | 1464 | return ac_to_fifo[tid_to_ac[tid]]; |
a1751b22 SG |
1465 | |
1466 | /* no support for TIDs 8-15 yet */ | |
1467 | return -EINVAL; | |
1468 | } | |
1469 | ||
1470 | /* | |
4d69c752 | 1471 | * handle build C_TX command notification. |
a1751b22 | 1472 | */ |
e7392364 SG |
1473 | static void |
1474 | il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb, | |
1475 | struct il_tx_cmd *tx_cmd, | |
1476 | struct ieee80211_tx_info *info, | |
1477 | struct ieee80211_hdr *hdr, u8 std_id) | |
a1751b22 SG |
1478 | { |
1479 | __le16 fc = hdr->frame_control; | |
1480 | __le32 tx_flags = tx_cmd->tx_flags; | |
1481 | ||
1482 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | |
1483 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | |
1484 | tx_flags |= TX_CMD_FLG_ACK_MSK; | |
1485 | if (ieee80211_is_mgmt(fc)) | |
1486 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
1487 | if (ieee80211_is_probe_resp(fc) && | |
1488 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | |
1489 | tx_flags |= TX_CMD_FLG_TSF_MSK; | |
1490 | } else { | |
1491 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | |
1492 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
1493 | } | |
1494 | ||
1495 | if (ieee80211_is_back_req(fc)) | |
1496 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | |
1497 | ||
1498 | tx_cmd->sta_id = std_id; | |
1499 | if (ieee80211_has_morefrags(fc)) | |
1500 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | |
1501 | ||
1502 | if (ieee80211_is_data_qos(fc)) { | |
1503 | u8 *qc = ieee80211_get_qos_ctl(hdr); | |
1504 | tx_cmd->tid_tspec = qc[0] & 0xf; | |
1505 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | |
1506 | } else { | |
1507 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | |
1508 | } | |
1509 | ||
1510 | il_tx_cmd_protection(il, info, fc, &tx_flags); | |
1511 | ||
1512 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | |
1513 | if (ieee80211_is_mgmt(fc)) { | |
1514 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | |
1515 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | |
1516 | else | |
1517 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | |
1518 | } else { | |
1519 | tx_cmd->timeout.pm_frame_timeout = 0; | |
1520 | } | |
1521 | ||
1522 | tx_cmd->driver_txop = 0; | |
1523 | tx_cmd->tx_flags = tx_flags; | |
1524 | tx_cmd->next_frame_len = 0; | |
1525 | } | |
1526 | ||
e7392364 SG |
1527 | static void |
1528 | il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd, | |
1529 | struct ieee80211_tx_info *info, __le16 fc) | |
a1751b22 | 1530 | { |
616107ed | 1531 | const u8 rts_retry_limit = 60; |
a1751b22 SG |
1532 | u32 rate_flags; |
1533 | int rate_idx; | |
a1751b22 SG |
1534 | u8 data_retry_limit; |
1535 | u8 rate_plcp; | |
1536 | ||
e7392364 | 1537 | /* Set retry limit on DATA packets and Probe Responses */ |
a1751b22 SG |
1538 | if (ieee80211_is_probe_resp(fc)) |
1539 | data_retry_limit = 3; | |
1540 | else | |
1541 | data_retry_limit = IL4965_DEFAULT_TX_RETRY; | |
1542 | tx_cmd->data_retry_limit = data_retry_limit; | |
a1751b22 | 1543 | /* Set retry limit on RTS packets */ |
616107ed | 1544 | tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit); |
a1751b22 SG |
1545 | |
1546 | /* DATA packets will use the uCode station table for rate/antenna | |
1547 | * selection */ | |
1548 | if (ieee80211_is_data(fc)) { | |
1549 | tx_cmd->initial_rate_idx = 0; | |
1550 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | |
1551 | return; | |
1552 | } | |
1553 | ||
1554 | /** | |
1555 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | |
1556 | * not really a TX rate. Thus, we use the lowest supported rate for | |
1557 | * this band. Also use the lowest supported rate if the stored rate | |
1558 | * idx is invalid. | |
1559 | */ | |
1560 | rate_idx = info->control.rates[0].idx; | |
e7392364 SG |
1561 | if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0 |
1562 | || rate_idx > RATE_COUNT_LEGACY) | |
1563 | rate_idx = | |
1564 | rate_lowest_index(&il->bands[info->band], | |
1565 | info->control.sta); | |
a1751b22 SG |
1566 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ |
1567 | if (info->band == IEEE80211_BAND_5GHZ) | |
1568 | rate_idx += IL_FIRST_OFDM_RATE; | |
1569 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | |
1570 | rate_plcp = il_rates[rate_idx].plcp; | |
1571 | /* Zero out flags for this packet */ | |
1572 | rate_flags = 0; | |
1573 | ||
1574 | /* Set CCK flag as needed */ | |
1575 | if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE) | |
1576 | rate_flags |= RATE_MCS_CCK_MSK; | |
1577 | ||
1578 | /* Set up antennas */ | |
a0c1ef3b | 1579 | il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant); |
616107ed | 1580 | rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS; |
a1751b22 SG |
1581 | |
1582 | /* Set the rate in the TX cmd */ | |
616107ed | 1583 | tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags); |
a1751b22 SG |
1584 | } |
1585 | ||
e7392364 SG |
1586 | static void |
1587 | il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info, | |
1588 | struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag, | |
1589 | int sta_id) | |
a1751b22 SG |
1590 | { |
1591 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | |
1592 | ||
1593 | switch (keyconf->cipher) { | |
1594 | case WLAN_CIPHER_SUITE_CCMP: | |
1595 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | |
1596 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | |
1597 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | |
1598 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | |
1599 | D_TX("tx_cmd with AES hwcrypto\n"); | |
1600 | break; | |
1601 | ||
1602 | case WLAN_CIPHER_SUITE_TKIP: | |
1603 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | |
1604 | ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); | |
1605 | D_TX("tx_cmd with tkip hwcrypto\n"); | |
1606 | break; | |
1607 | ||
1608 | case WLAN_CIPHER_SUITE_WEP104: | |
1609 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | |
1610 | /* fall through */ | |
1611 | case WLAN_CIPHER_SUITE_WEP40: | |
e7392364 SG |
1612 | tx_cmd->sec_ctl |= |
1613 | (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) << | |
1614 | TX_CMD_SEC_SHIFT); | |
a1751b22 SG |
1615 | |
1616 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | |
1617 | ||
e7392364 SG |
1618 | D_TX("Configuring packet for WEP encryption " "with key %d\n", |
1619 | keyconf->keyidx); | |
a1751b22 SG |
1620 | break; |
1621 | ||
1622 | default: | |
1623 | IL_ERR("Unknown encode cipher %x\n", keyconf->cipher); | |
1624 | break; | |
1625 | } | |
1626 | } | |
1627 | ||
1628 | /* | |
4d69c752 | 1629 | * start C_TX command process |
a1751b22 | 1630 | */ |
e7392364 SG |
1631 | int |
1632 | il4965_tx_skb(struct il_priv *il, struct sk_buff *skb) | |
a1751b22 SG |
1633 | { |
1634 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | |
1635 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
1636 | struct ieee80211_sta *sta = info->control.sta; | |
1637 | struct il_station_priv *sta_priv = NULL; | |
1638 | struct il_tx_queue *txq; | |
1639 | struct il_queue *q; | |
1640 | struct il_device_cmd *out_cmd; | |
1641 | struct il_cmd_meta *out_meta; | |
1642 | struct il_tx_cmd *tx_cmd; | |
a1751b22 SG |
1643 | int txq_id; |
1644 | dma_addr_t phys_addr; | |
1645 | dma_addr_t txcmd_phys; | |
1646 | dma_addr_t scratch_phys; | |
1647 | u16 len, firstlen, secondlen; | |
1648 | u16 seq_number = 0; | |
1649 | __le16 fc; | |
1650 | u8 hdr_len; | |
1651 | u8 sta_id; | |
1652 | u8 wait_write_ptr = 0; | |
1653 | u8 tid = 0; | |
1654 | u8 *qc = NULL; | |
1655 | unsigned long flags; | |
1656 | bool is_agg = false; | |
1657 | ||
a1751b22 SG |
1658 | spin_lock_irqsave(&il->lock, flags); |
1659 | if (il_is_rfkill(il)) { | |
1660 | D_DROP("Dropping - RF KILL\n"); | |
1661 | goto drop_unlock; | |
1662 | } | |
1663 | ||
1664 | fc = hdr->frame_control; | |
1665 | ||
1666 | #ifdef CONFIG_IWLEGACY_DEBUG | |
1667 | if (ieee80211_is_auth(fc)) | |
1668 | D_TX("Sending AUTH frame\n"); | |
1669 | else if (ieee80211_is_assoc_req(fc)) | |
1670 | D_TX("Sending ASSOC frame\n"); | |
1671 | else if (ieee80211_is_reassoc_req(fc)) | |
1672 | D_TX("Sending REASSOC frame\n"); | |
1673 | #endif | |
1674 | ||
1675 | hdr_len = ieee80211_hdrlen(fc); | |
1676 | ||
1677 | /* For management frames use broadcast id to do not break aggregation */ | |
1678 | if (!ieee80211_is_data(fc)) | |
b16db50a | 1679 | sta_id = il->hw_params.bcast_id; |
a1751b22 SG |
1680 | else { |
1681 | /* Find idx into station table for destination station */ | |
83007196 | 1682 | sta_id = il_sta_id_or_broadcast(il, info->control.sta); |
a1751b22 SG |
1683 | |
1684 | if (sta_id == IL_INVALID_STATION) { | |
e7392364 | 1685 | D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); |
a1751b22 SG |
1686 | goto drop_unlock; |
1687 | } | |
1688 | } | |
1689 | ||
1690 | D_TX("station Id %d\n", sta_id); | |
1691 | ||
1692 | if (sta) | |
1693 | sta_priv = (void *)sta->drv_priv; | |
1694 | ||
1695 | if (sta_priv && sta_priv->asleep && | |
1696 | (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) { | |
1697 | /* | |
1698 | * This sends an asynchronous command to the device, | |
1699 | * but we can rely on it being processed before the | |
1700 | * next frame is processed -- and the next frame to | |
1701 | * this station is the one that will consume this | |
1702 | * counter. | |
1703 | * For now set the counter to just 1 since we do not | |
1704 | * support uAPSD yet. | |
1705 | */ | |
1706 | il4965_sta_modify_sleep_tx_count(il, sta_id, 1); | |
1707 | } | |
1708 | ||
d1e14e94 SG |
1709 | /* FIXME: remove me ? */ |
1710 | WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); | |
1711 | ||
eb123af3 SG |
1712 | /* Access category (AC) is also the queue number */ |
1713 | txq_id = skb_get_queue_mapping(skb); | |
a1751b22 SG |
1714 | |
1715 | /* irqs already disabled/saved above when locking il->lock */ | |
1716 | spin_lock(&il->sta_lock); | |
1717 | ||
1718 | if (ieee80211_is_data_qos(fc)) { | |
1719 | qc = ieee80211_get_qos_ctl(hdr); | |
1720 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | |
1721 | if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { | |
1722 | spin_unlock(&il->sta_lock); | |
1723 | goto drop_unlock; | |
1724 | } | |
1725 | seq_number = il->stations[sta_id].tid[tid].seq_number; | |
1726 | seq_number &= IEEE80211_SCTL_SEQ; | |
e7392364 SG |
1727 | hdr->seq_ctrl = |
1728 | hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG); | |
a1751b22 SG |
1729 | hdr->seq_ctrl |= cpu_to_le16(seq_number); |
1730 | seq_number += 0x10; | |
1731 | /* aggregation is on for this <sta,tid> */ | |
1732 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | |
1733 | il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) { | |
1734 | txq_id = il->stations[sta_id].tid[tid].agg.txq_id; | |
1735 | is_agg = true; | |
1736 | } | |
1737 | } | |
1738 | ||
1739 | txq = &il->txq[txq_id]; | |
1740 | q = &txq->q; | |
1741 | ||
1742 | if (unlikely(il_queue_space(q) < q->high_mark)) { | |
1743 | spin_unlock(&il->sta_lock); | |
1744 | goto drop_unlock; | |
1745 | } | |
1746 | ||
1747 | if (ieee80211_is_data_qos(fc)) { | |
1748 | il->stations[sta_id].tid[tid].tfds_in_queue++; | |
1749 | if (!ieee80211_has_morefrags(fc)) | |
1750 | il->stations[sta_id].tid[tid].seq_number = seq_number; | |
1751 | } | |
1752 | ||
1753 | spin_unlock(&il->sta_lock); | |
1754 | ||
00ea99e1 | 1755 | txq->skbs[q->write_ptr] = skb; |
a1751b22 SG |
1756 | |
1757 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | |
1758 | out_cmd = txq->cmd[q->write_ptr]; | |
1759 | out_meta = &txq->meta[q->write_ptr]; | |
1760 | tx_cmd = &out_cmd->cmd.tx; | |
1761 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | |
1762 | memset(tx_cmd, 0, sizeof(struct il_tx_cmd)); | |
1763 | ||
1764 | /* | |
1765 | * Set up the Tx-command (not MAC!) header. | |
1766 | * Store the chosen Tx queue and TFD idx within the sequence field; | |
1767 | * after Tx, uCode's Tx response will return this value so driver can | |
1768 | * locate the frame within the tx queue and do post-tx processing. | |
1769 | */ | |
4d69c752 | 1770 | out_cmd->hdr.cmd = C_TX; |
e7392364 SG |
1771 | out_cmd->hdr.sequence = |
1772 | cpu_to_le16((u16) | |
1773 | (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr))); | |
a1751b22 SG |
1774 | |
1775 | /* Copy MAC header from skb into command buffer */ | |
1776 | memcpy(tx_cmd->hdr, hdr, hdr_len); | |
1777 | ||
a1751b22 | 1778 | /* Total # bytes to be transmitted */ |
e7392364 | 1779 | len = (u16) skb->len; |
a1751b22 SG |
1780 | tx_cmd->len = cpu_to_le16(len); |
1781 | ||
1782 | if (info->control.hw_key) | |
1783 | il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id); | |
1784 | ||
1785 | /* TODO need this for burst mode later on */ | |
1786 | il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id); | |
a1751b22 SG |
1787 | |
1788 | il4965_tx_cmd_build_rate(il, tx_cmd, info, fc); | |
1789 | ||
1790 | il_update_stats(il, true, fc, len); | |
1791 | /* | |
1792 | * Use the first empty entry in this queue's command buffer array | |
1793 | * to contain the Tx command and MAC header concatenated together | |
1794 | * (payload data will be in another buffer). | |
1795 | * Size of this varies, due to varying MAC header length. | |
1796 | * If end is not dword aligned, we'll have 2 extra bytes at the end | |
1797 | * of the MAC header (device reads on dword boundaries). | |
1798 | * We'll tell device about this padding later. | |
1799 | */ | |
e7392364 | 1800 | len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len; |
a1751b22 SG |
1801 | firstlen = (len + 3) & ~3; |
1802 | ||
1803 | /* Tell NIC about any 2-byte padding after MAC header */ | |
1804 | if (firstlen != len) | |
1805 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | |
1806 | ||
1807 | /* Physical address of this Tx command's header (not MAC header!), | |
1808 | * within command buffer array. */ | |
e7392364 SG |
1809 | txcmd_phys = |
1810 | pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, | |
1811 | PCI_DMA_BIDIRECTIONAL); | |
a1751b22 SG |
1812 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); |
1813 | dma_unmap_len_set(out_meta, len, firstlen); | |
1814 | /* Add buffer containing Tx command and MAC(!) header to TFD's | |
1815 | * first entry */ | |
1600b875 | 1816 | il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); |
a1751b22 SG |
1817 | |
1818 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | |
1819 | txq->need_update = 1; | |
1820 | } else { | |
1821 | wait_write_ptr = 1; | |
1822 | txq->need_update = 0; | |
1823 | } | |
1824 | ||
1825 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | |
1826 | * if any (802.11 null frames have no payload). */ | |
1827 | secondlen = skb->len - hdr_len; | |
1828 | if (secondlen > 0) { | |
e7392364 SG |
1829 | phys_addr = |
1830 | pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, | |
1831 | PCI_DMA_TODEVICE); | |
1600b875 SG |
1832 | il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, |
1833 | 0, 0); | |
a1751b22 SG |
1834 | } |
1835 | ||
e7392364 SG |
1836 | scratch_phys = |
1837 | txcmd_phys + sizeof(struct il_cmd_header) + | |
1838 | offsetof(struct il_tx_cmd, scratch); | |
a1751b22 SG |
1839 | |
1840 | /* take back ownership of DMA buffer to enable update */ | |
e7392364 SG |
1841 | pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen, |
1842 | PCI_DMA_BIDIRECTIONAL); | |
a1751b22 SG |
1843 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); |
1844 | tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys); | |
1845 | ||
e7392364 | 1846 | D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence)); |
a1751b22 | 1847 | D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); |
e7392364 SG |
1848 | il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd)); |
1849 | il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len); | |
a1751b22 SG |
1850 | |
1851 | /* Set up entry for this TFD in Tx byte-count array */ | |
1852 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | |
1600b875 | 1853 | il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len)); |
a1751b22 | 1854 | |
e7392364 SG |
1855 | pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen, |
1856 | PCI_DMA_BIDIRECTIONAL); | |
a1751b22 SG |
1857 | |
1858 | /* Tell device the write idx *just past* this latest filled TFD */ | |
1859 | q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); | |
1860 | il_txq_update_write_ptr(il, txq); | |
1861 | spin_unlock_irqrestore(&il->lock, flags); | |
1862 | ||
1863 | /* | |
1864 | * At this point the frame is "transmitted" successfully | |
1865 | * and we will get a TX status notification eventually, | |
1866 | * regardless of the value of ret. "ret" only indicates | |
1867 | * whether or not we should update the write pointer. | |
1868 | */ | |
1869 | ||
1870 | /* | |
1871 | * Avoid atomic ops if it isn't an associated client. | |
1872 | * Also, if this is a packet for aggregation, don't | |
1873 | * increase the counter because the ucode will stop | |
1874 | * aggregation queues when their respective station | |
1875 | * goes to sleep. | |
1876 | */ | |
1877 | if (sta_priv && sta_priv->client && !is_agg) | |
1878 | atomic_inc(&sta_priv->pending_frames); | |
1879 | ||
1880 | if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { | |
1881 | if (wait_write_ptr) { | |
1882 | spin_lock_irqsave(&il->lock, flags); | |
1883 | txq->need_update = 1; | |
1884 | il_txq_update_write_ptr(il, txq); | |
1885 | spin_unlock_irqrestore(&il->lock, flags); | |
1886 | } else { | |
1887 | il_stop_queue(il, txq); | |
1888 | } | |
1889 | } | |
1890 | ||
1891 | return 0; | |
1892 | ||
1893 | drop_unlock: | |
1894 | spin_unlock_irqrestore(&il->lock, flags); | |
1895 | return -1; | |
1896 | } | |
1897 | ||
e7392364 SG |
1898 | static inline int |
1899 | il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size) | |
a1751b22 | 1900 | { |
e7392364 SG |
1901 | ptr->addr = |
1902 | dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL); | |
a1751b22 SG |
1903 | if (!ptr->addr) |
1904 | return -ENOMEM; | |
1905 | ptr->size = size; | |
1906 | return 0; | |
1907 | } | |
1908 | ||
e7392364 SG |
1909 | static inline void |
1910 | il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr) | |
a1751b22 SG |
1911 | { |
1912 | if (unlikely(!ptr->addr)) | |
1913 | return; | |
1914 | ||
1915 | dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); | |
1916 | memset(ptr, 0, sizeof(*ptr)); | |
1917 | } | |
1918 | ||
1919 | /** | |
1920 | * il4965_hw_txq_ctx_free - Free TXQ Context | |
1921 | * | |
1922 | * Destroy all TX DMA queues and structures | |
1923 | */ | |
e7392364 SG |
1924 | void |
1925 | il4965_hw_txq_ctx_free(struct il_priv *il) | |
a1751b22 SG |
1926 | { |
1927 | int txq_id; | |
1928 | ||
1929 | /* Tx queues */ | |
1930 | if (il->txq) { | |
1931 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) | |
1932 | if (txq_id == il->cmd_queue) | |
1933 | il_cmd_queue_free(il); | |
1934 | else | |
1935 | il_tx_queue_free(il, txq_id); | |
1936 | } | |
1937 | il4965_free_dma_ptr(il, &il->kw); | |
1938 | ||
1939 | il4965_free_dma_ptr(il, &il->scd_bc_tbls); | |
1940 | ||
1941 | /* free tx queue structure */ | |
1942 | il_txq_mem(il); | |
1943 | } | |
1944 | ||
1945 | /** | |
1946 | * il4965_txq_ctx_alloc - allocate TX queue context | |
1947 | * Allocate all Tx DMA structures and initialize them | |
1948 | * | |
1949 | * @param il | |
1950 | * @return error code | |
1951 | */ | |
e7392364 SG |
1952 | int |
1953 | il4965_txq_ctx_alloc(struct il_priv *il) | |
a1751b22 SG |
1954 | { |
1955 | int ret; | |
1956 | int txq_id, slots_num; | |
1957 | unsigned long flags; | |
1958 | ||
1959 | /* Free all tx/cmd queues and keep-warm buffer */ | |
1960 | il4965_hw_txq_ctx_free(il); | |
1961 | ||
e7392364 SG |
1962 | ret = |
1963 | il4965_alloc_dma_ptr(il, &il->scd_bc_tbls, | |
1964 | il->hw_params.scd_bc_tbls_size); | |
a1751b22 SG |
1965 | if (ret) { |
1966 | IL_ERR("Scheduler BC Table allocation failed\n"); | |
1967 | goto error_bc_tbls; | |
1968 | } | |
1969 | /* Alloc keep-warm buffer */ | |
1970 | ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE); | |
1971 | if (ret) { | |
1972 | IL_ERR("Keep Warm allocation failed\n"); | |
1973 | goto error_kw; | |
1974 | } | |
1975 | ||
1976 | /* allocate tx queue structure */ | |
1977 | ret = il_alloc_txq_mem(il); | |
1978 | if (ret) | |
1979 | goto error; | |
1980 | ||
1981 | spin_lock_irqsave(&il->lock, flags); | |
1982 | ||
1983 | /* Turn off all Tx DMA fifos */ | |
1984 | il4965_txq_set_sched(il, 0); | |
1985 | ||
1986 | /* Tell NIC where to find the "keep warm" buffer */ | |
9a95b370 | 1987 | il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4); |
a1751b22 SG |
1988 | |
1989 | spin_unlock_irqrestore(&il->lock, flags); | |
1990 | ||
1991 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | |
1992 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { | |
e7392364 SG |
1993 | slots_num = |
1994 | (txq_id == | |
1995 | il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | |
1996 | ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id); | |
a1751b22 SG |
1997 | if (ret) { |
1998 | IL_ERR("Tx %d queue init failed\n", txq_id); | |
1999 | goto error; | |
2000 | } | |
2001 | } | |
2002 | ||
2003 | return ret; | |
2004 | ||
e7392364 | 2005 | error: |
a1751b22 SG |
2006 | il4965_hw_txq_ctx_free(il); |
2007 | il4965_free_dma_ptr(il, &il->kw); | |
e7392364 | 2008 | error_kw: |
a1751b22 | 2009 | il4965_free_dma_ptr(il, &il->scd_bc_tbls); |
e7392364 | 2010 | error_bc_tbls: |
a1751b22 SG |
2011 | return ret; |
2012 | } | |
2013 | ||
e7392364 SG |
2014 | void |
2015 | il4965_txq_ctx_reset(struct il_priv *il) | |
a1751b22 SG |
2016 | { |
2017 | int txq_id, slots_num; | |
2018 | unsigned long flags; | |
2019 | ||
2020 | spin_lock_irqsave(&il->lock, flags); | |
2021 | ||
2022 | /* Turn off all Tx DMA fifos */ | |
2023 | il4965_txq_set_sched(il, 0); | |
2024 | ||
2025 | /* Tell NIC where to find the "keep warm" buffer */ | |
9a95b370 | 2026 | il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4); |
a1751b22 SG |
2027 | |
2028 | spin_unlock_irqrestore(&il->lock, flags); | |
2029 | ||
2030 | /* Alloc and init all Tx queues, including the command queue (#4) */ | |
2031 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { | |
e7392364 SG |
2032 | slots_num = |
2033 | txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | |
2034 | il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id); | |
a1751b22 SG |
2035 | } |
2036 | } | |
2037 | ||
e7392364 | 2038 | void |
775ed8ab | 2039 | il4965_txq_ctx_unmap(struct il_priv *il) |
a1751b22 | 2040 | { |
775ed8ab | 2041 | int txq_id; |
a1751b22 SG |
2042 | |
2043 | if (!il->txq) | |
2044 | return; | |
2045 | ||
2046 | /* Unmap DMA from host system and free skb's */ | |
2047 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) | |
2048 | if (txq_id == il->cmd_queue) | |
2049 | il_cmd_queue_unmap(il); | |
2050 | else | |
2051 | il_tx_queue_unmap(il, txq_id); | |
2052 | } | |
2053 | ||
775ed8ab SG |
2054 | /** |
2055 | * il4965_txq_ctx_stop - Stop all Tx DMA channels | |
2056 | */ | |
2057 | void | |
2058 | il4965_txq_ctx_stop(struct il_priv *il) | |
2059 | { | |
2060 | int ch, ret; | |
2061 | ||
2062 | _il_wr_prph(il, IL49_SCD_TXFACT, 0); | |
2063 | ||
2064 | /* Stop each Tx DMA channel, and wait for it to be idle */ | |
2065 | for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) { | |
2066 | _il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | |
2067 | ret = | |
2068 | _il_poll_bit(il, FH49_TSSR_TX_STATUS_REG, | |
2069 | FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | |
2070 | FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | |
2071 | 1000); | |
2072 | if (ret < 0) | |
2073 | IL_ERR("Timeout stopping DMA channel %d [0x%08x]", | |
2074 | ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG)); | |
2075 | } | |
2076 | } | |
2077 | ||
a1751b22 SG |
2078 | /* |
2079 | * Find first available (lowest unused) Tx Queue, mark it "active". | |
2080 | * Called only when finding queue for aggregation. | |
2081 | * Should never return anything < 7, because they should already | |
2082 | * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) | |
2083 | */ | |
e7392364 SG |
2084 | static int |
2085 | il4965_txq_ctx_activate_free(struct il_priv *il) | |
a1751b22 SG |
2086 | { |
2087 | int txq_id; | |
2088 | ||
2089 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) | |
2090 | if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk)) | |
2091 | return txq_id; | |
2092 | return -1; | |
2093 | } | |
2094 | ||
2095 | /** | |
2096 | * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration | |
2097 | */ | |
e7392364 SG |
2098 | static void |
2099 | il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id) | |
a1751b22 SG |
2100 | { |
2101 | /* Simply stop the queue, but don't change any configuration; | |
2102 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | |
e7392364 | 2103 | il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id), |
1722f8e1 SG |
2104 | (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | |
2105 | (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | |
a1751b22 SG |
2106 | } |
2107 | ||
2108 | /** | |
2109 | * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue | |
2110 | */ | |
e7392364 SG |
2111 | static int |
2112 | il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id) | |
a1751b22 SG |
2113 | { |
2114 | u32 tbl_dw_addr; | |
2115 | u32 tbl_dw; | |
2116 | u16 scd_q2ratid; | |
2117 | ||
2118 | scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; | |
2119 | ||
e7392364 SG |
2120 | tbl_dw_addr = |
2121 | il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); | |
a1751b22 SG |
2122 | |
2123 | tbl_dw = il_read_targ_mem(il, tbl_dw_addr); | |
2124 | ||
2125 | if (txq_id & 0x1) | |
2126 | tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); | |
2127 | else | |
2128 | tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); | |
2129 | ||
2130 | il_write_targ_mem(il, tbl_dw_addr, tbl_dw); | |
2131 | ||
2132 | return 0; | |
2133 | } | |
2134 | ||
2135 | /** | |
2136 | * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue | |
2137 | * | |
2138 | * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE, | |
2139 | * i.e. it must be one of the higher queues used for aggregation | |
2140 | */ | |
e7392364 SG |
2141 | static int |
2142 | il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id, | |
2143 | int tid, u16 ssn_idx) | |
a1751b22 SG |
2144 | { |
2145 | unsigned long flags; | |
2146 | u16 ra_tid; | |
2147 | int ret; | |
2148 | ||
2149 | if ((IL49_FIRST_AMPDU_QUEUE > txq_id) || | |
2150 | (IL49_FIRST_AMPDU_QUEUE + | |
89ef1ed2 | 2151 | il->cfg->num_of_ampdu_queues <= txq_id)) { |
e7392364 | 2152 | IL_WARN("queue number out of range: %d, must be %d to %d\n", |
a1751b22 SG |
2153 | txq_id, IL49_FIRST_AMPDU_QUEUE, |
2154 | IL49_FIRST_AMPDU_QUEUE + | |
89ef1ed2 | 2155 | il->cfg->num_of_ampdu_queues - 1); |
a1751b22 SG |
2156 | return -EINVAL; |
2157 | } | |
2158 | ||
2159 | ra_tid = BUILD_RAxTID(sta_id, tid); | |
2160 | ||
2161 | /* Modify device's station table to Tx this TID */ | |
2162 | ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid); | |
2163 | if (ret) | |
2164 | return ret; | |
2165 | ||
2166 | spin_lock_irqsave(&il->lock, flags); | |
2167 | ||
2168 | /* Stop this Tx queue before configuring it */ | |
2169 | il4965_tx_queue_stop_scheduler(il, txq_id); | |
2170 | ||
2171 | /* Map receiver-address / traffic-ID to this queue */ | |
2172 | il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id); | |
2173 | ||
2174 | /* Set this queue as a chain-building queue */ | |
2175 | il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); | |
2176 | ||
2177 | /* Place first TFD at idx corresponding to start sequence number. | |
2178 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | |
2179 | il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | |
2180 | il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | |
2181 | il4965_set_wr_ptrs(il, txq_id, ssn_idx); | |
2182 | ||
2183 | /* Set up Tx win size and frame limit for this queue */ | |
2184 | il_write_targ_mem(il, | |
e7392364 SG |
2185 | il->scd_base_addr + |
2186 | IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id), | |
2187 | (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) | |
2188 | & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); | |
a1751b22 | 2189 | |
e7392364 SG |
2190 | il_write_targ_mem(il, |
2191 | il->scd_base_addr + | |
2192 | IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), | |
2193 | (SCD_FRAME_LIMIT << | |
2194 | IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | |
2195 | IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); | |
a1751b22 SG |
2196 | |
2197 | il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id)); | |
2198 | ||
2199 | /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ | |
2200 | il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1); | |
2201 | ||
2202 | spin_unlock_irqrestore(&il->lock, flags); | |
2203 | ||
2204 | return 0; | |
2205 | } | |
2206 | ||
e7392364 SG |
2207 | int |
2208 | il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, | |
2209 | struct ieee80211_sta *sta, u16 tid, u16 * ssn) | |
a1751b22 SG |
2210 | { |
2211 | int sta_id; | |
2212 | int tx_fifo; | |
2213 | int txq_id; | |
2214 | int ret; | |
2215 | unsigned long flags; | |
2216 | struct il_tid_data *tid_data; | |
2217 | ||
83007196 SG |
2218 | /* FIXME: warning if tx fifo not found ? */ |
2219 | tx_fifo = il4965_get_fifo_from_tid(tid); | |
a1751b22 SG |
2220 | if (unlikely(tx_fifo < 0)) |
2221 | return tx_fifo; | |
2222 | ||
53611e05 | 2223 | D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid); |
a1751b22 SG |
2224 | |
2225 | sta_id = il_sta_id(sta); | |
2226 | if (sta_id == IL_INVALID_STATION) { | |
2227 | IL_ERR("Start AGG on invalid station\n"); | |
2228 | return -ENXIO; | |
2229 | } | |
2230 | if (unlikely(tid >= MAX_TID_COUNT)) | |
2231 | return -EINVAL; | |
2232 | ||
2233 | if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) { | |
2234 | IL_ERR("Start AGG when state is not IL_AGG_OFF !\n"); | |
2235 | return -ENXIO; | |
2236 | } | |
2237 | ||
2238 | txq_id = il4965_txq_ctx_activate_free(il); | |
2239 | if (txq_id == -1) { | |
2240 | IL_ERR("No free aggregation queue available\n"); | |
2241 | return -ENXIO; | |
2242 | } | |
2243 | ||
2244 | spin_lock_irqsave(&il->sta_lock, flags); | |
2245 | tid_data = &il->stations[sta_id].tid[tid]; | |
2246 | *ssn = SEQ_TO_SN(tid_data->seq_number); | |
2247 | tid_data->agg.txq_id = txq_id; | |
e7392364 | 2248 | il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id); |
a1751b22 SG |
2249 | spin_unlock_irqrestore(&il->sta_lock, flags); |
2250 | ||
e7392364 | 2251 | ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn); |
a1751b22 SG |
2252 | if (ret) |
2253 | return ret; | |
2254 | ||
2255 | spin_lock_irqsave(&il->sta_lock, flags); | |
2256 | tid_data = &il->stations[sta_id].tid[tid]; | |
2257 | if (tid_data->tfds_in_queue == 0) { | |
2258 | D_HT("HW queue is empty\n"); | |
2259 | tid_data->agg.state = IL_AGG_ON; | |
2260 | ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); | |
2261 | } else { | |
e7392364 SG |
2262 | D_HT("HW queue is NOT empty: %d packets in HW queue\n", |
2263 | tid_data->tfds_in_queue); | |
a1751b22 SG |
2264 | tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA; |
2265 | } | |
2266 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
2267 | return ret; | |
2268 | } | |
2269 | ||
2270 | /** | |
2271 | * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE | |
2272 | * il->lock must be held by the caller | |
2273 | */ | |
e7392364 SG |
2274 | static int |
2275 | il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo) | |
a1751b22 SG |
2276 | { |
2277 | if ((IL49_FIRST_AMPDU_QUEUE > txq_id) || | |
2278 | (IL49_FIRST_AMPDU_QUEUE + | |
89ef1ed2 | 2279 | il->cfg->num_of_ampdu_queues <= txq_id)) { |
e7392364 | 2280 | IL_WARN("queue number out of range: %d, must be %d to %d\n", |
a1751b22 SG |
2281 | txq_id, IL49_FIRST_AMPDU_QUEUE, |
2282 | IL49_FIRST_AMPDU_QUEUE + | |
89ef1ed2 | 2283 | il->cfg->num_of_ampdu_queues - 1); |
a1751b22 SG |
2284 | return -EINVAL; |
2285 | } | |
2286 | ||
2287 | il4965_tx_queue_stop_scheduler(il, txq_id); | |
2288 | ||
e7392364 | 2289 | il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); |
a1751b22 SG |
2290 | |
2291 | il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | |
2292 | il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | |
2293 | /* supposes that ssn_idx is valid (!= 0xFFF) */ | |
2294 | il4965_set_wr_ptrs(il, txq_id, ssn_idx); | |
2295 | ||
e7392364 | 2296 | il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id)); |
a1751b22 SG |
2297 | il_txq_ctx_deactivate(il, txq_id); |
2298 | il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0); | |
2299 | ||
2300 | return 0; | |
2301 | } | |
2302 | ||
e7392364 SG |
2303 | int |
2304 | il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif, | |
2305 | struct ieee80211_sta *sta, u16 tid) | |
a1751b22 SG |
2306 | { |
2307 | int tx_fifo_id, txq_id, sta_id, ssn; | |
2308 | struct il_tid_data *tid_data; | |
2309 | int write_ptr, read_ptr; | |
2310 | unsigned long flags; | |
2311 | ||
83007196 SG |
2312 | /* FIXME: warning if tx_fifo_id not found ? */ |
2313 | tx_fifo_id = il4965_get_fifo_from_tid(tid); | |
a1751b22 SG |
2314 | if (unlikely(tx_fifo_id < 0)) |
2315 | return tx_fifo_id; | |
2316 | ||
2317 | sta_id = il_sta_id(sta); | |
2318 | ||
2319 | if (sta_id == IL_INVALID_STATION) { | |
2320 | IL_ERR("Invalid station for AGG tid %d\n", tid); | |
2321 | return -ENXIO; | |
2322 | } | |
2323 | ||
2324 | spin_lock_irqsave(&il->sta_lock, flags); | |
2325 | ||
2326 | tid_data = &il->stations[sta_id].tid[tid]; | |
2327 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | |
2328 | txq_id = tid_data->agg.txq_id; | |
2329 | ||
2330 | switch (il->stations[sta_id].tid[tid].agg.state) { | |
2331 | case IL_EMPTYING_HW_QUEUE_ADDBA: | |
2332 | /* | |
2333 | * This can happen if the peer stops aggregation | |
2334 | * again before we've had a chance to drain the | |
2335 | * queue we selected previously, i.e. before the | |
2336 | * session was really started completely. | |
2337 | */ | |
2338 | D_HT("AGG stop before setup done\n"); | |
2339 | goto turn_off; | |
2340 | case IL_AGG_ON: | |
2341 | break; | |
2342 | default: | |
2343 | IL_WARN("Stopping AGG while state not ON or starting\n"); | |
2344 | } | |
2345 | ||
2346 | write_ptr = il->txq[txq_id].q.write_ptr; | |
2347 | read_ptr = il->txq[txq_id].q.read_ptr; | |
2348 | ||
2349 | /* The queue is not empty */ | |
2350 | if (write_ptr != read_ptr) { | |
2351 | D_HT("Stopping a non empty AGG HW QUEUE\n"); | |
2352 | il->stations[sta_id].tid[tid].agg.state = | |
e7392364 | 2353 | IL_EMPTYING_HW_QUEUE_DELBA; |
a1751b22 SG |
2354 | spin_unlock_irqrestore(&il->sta_lock, flags); |
2355 | return 0; | |
2356 | } | |
2357 | ||
2358 | D_HT("HW queue is empty\n"); | |
e7392364 | 2359 | turn_off: |
a1751b22 SG |
2360 | il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF; |
2361 | ||
2362 | /* do not restore/save irqs */ | |
2363 | spin_unlock(&il->sta_lock); | |
2364 | spin_lock(&il->lock); | |
2365 | ||
2366 | /* | |
2367 | * the only reason this call can fail is queue number out of range, | |
2368 | * which can happen if uCode is reloaded and all the station | |
2369 | * information are lost. if it is outside the range, there is no need | |
2370 | * to deactivate the uCode queue, just return "success" to allow | |
2371 | * mac80211 to clean up it own data. | |
2372 | */ | |
2373 | il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id); | |
2374 | spin_unlock_irqrestore(&il->lock, flags); | |
2375 | ||
2376 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | |
2377 | ||
2378 | return 0; | |
2379 | } | |
2380 | ||
e7392364 SG |
2381 | int |
2382 | il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id) | |
a1751b22 SG |
2383 | { |
2384 | struct il_queue *q = &il->txq[txq_id].q; | |
2385 | u8 *addr = il->stations[sta_id].sta.sta.addr; | |
2386 | struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid]; | |
a1751b22 SG |
2387 | |
2388 | lockdep_assert_held(&il->sta_lock); | |
2389 | ||
2390 | switch (il->stations[sta_id].tid[tid].agg.state) { | |
2391 | case IL_EMPTYING_HW_QUEUE_DELBA: | |
2392 | /* We are reclaiming the last packet of the */ | |
2393 | /* aggregated HW queue */ | |
e7392364 | 2394 | if (txq_id == tid_data->agg.txq_id && |
a1751b22 SG |
2395 | q->read_ptr == q->write_ptr) { |
2396 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); | |
83007196 | 2397 | int tx_fifo = il4965_get_fifo_from_tid(tid); |
e7392364 | 2398 | D_HT("HW queue empty: continue DELBA flow\n"); |
a1751b22 SG |
2399 | il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo); |
2400 | tid_data->agg.state = IL_AGG_OFF; | |
83007196 | 2401 | ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid); |
a1751b22 SG |
2402 | } |
2403 | break; | |
2404 | case IL_EMPTYING_HW_QUEUE_ADDBA: | |
2405 | /* We are reclaiming the last packet of the queue */ | |
2406 | if (tid_data->tfds_in_queue == 0) { | |
e7392364 | 2407 | D_HT("HW queue empty: continue ADDBA flow\n"); |
a1751b22 | 2408 | tid_data->agg.state = IL_AGG_ON; |
83007196 | 2409 | ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid); |
a1751b22 SG |
2410 | } |
2411 | break; | |
2412 | } | |
2413 | ||
2414 | return 0; | |
2415 | } | |
2416 | ||
e7392364 | 2417 | static void |
83007196 | 2418 | il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1) |
a1751b22 SG |
2419 | { |
2420 | struct ieee80211_sta *sta; | |
2421 | struct il_station_priv *sta_priv; | |
2422 | ||
2423 | rcu_read_lock(); | |
83007196 | 2424 | sta = ieee80211_find_sta(il->vif, addr1); |
a1751b22 SG |
2425 | if (sta) { |
2426 | sta_priv = (void *)sta->drv_priv; | |
2427 | /* avoid atomic ops if this isn't a client */ | |
2428 | if (sta_priv->client && | |
2429 | atomic_dec_return(&sta_priv->pending_frames) == 0) | |
2430 | ieee80211_sta_block_awake(il->hw, sta, false); | |
2431 | } | |
2432 | rcu_read_unlock(); | |
2433 | } | |
2434 | ||
2435 | static void | |
00ea99e1 | 2436 | il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg) |
a1751b22 | 2437 | { |
00ea99e1 | 2438 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
a1751b22 SG |
2439 | |
2440 | if (!is_agg) | |
83007196 | 2441 | il4965_non_agg_tx_status(il, hdr->addr1); |
a1751b22 | 2442 | |
00ea99e1 | 2443 | ieee80211_tx_status_irqsafe(il->hw, skb); |
a1751b22 SG |
2444 | } |
2445 | ||
e7392364 SG |
2446 | int |
2447 | il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx) | |
a1751b22 SG |
2448 | { |
2449 | struct il_tx_queue *txq = &il->txq[txq_id]; | |
2450 | struct il_queue *q = &txq->q; | |
a1751b22 SG |
2451 | int nfreed = 0; |
2452 | struct ieee80211_hdr *hdr; | |
00ea99e1 | 2453 | struct sk_buff *skb; |
a1751b22 SG |
2454 | |
2455 | if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { | |
2456 | IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " | |
e7392364 SG |
2457 | "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, |
2458 | q->write_ptr, q->read_ptr); | |
a1751b22 SG |
2459 | return 0; |
2460 | } | |
2461 | ||
e7392364 | 2462 | for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; |
a1751b22 SG |
2463 | q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { |
2464 | ||
00ea99e1 | 2465 | skb = txq->skbs[txq->q.read_ptr]; |
a1751b22 | 2466 | |
00ea99e1 | 2467 | if (WARN_ON_ONCE(skb == NULL)) |
a1751b22 SG |
2468 | continue; |
2469 | ||
00ea99e1 | 2470 | hdr = (struct ieee80211_hdr *) skb->data; |
a1751b22 SG |
2471 | if (ieee80211_is_data_qos(hdr->frame_control)) |
2472 | nfreed++; | |
2473 | ||
00ea99e1 | 2474 | il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE); |
a1751b22 | 2475 | |
00ea99e1 | 2476 | txq->skbs[txq->q.read_ptr] = NULL; |
1600b875 | 2477 | il->ops->txq_free_tfd(il, txq); |
a1751b22 SG |
2478 | } |
2479 | return nfreed; | |
2480 | } | |
2481 | ||
2482 | /** | |
2483 | * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack | |
2484 | * | |
2485 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | |
2486 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | |
2487 | */ | |
e7392364 SG |
2488 | static int |
2489 | il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg, | |
2490 | struct il_compressed_ba_resp *ba_resp) | |
a1751b22 SG |
2491 | { |
2492 | int i, sh, ack; | |
2493 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | |
2494 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | |
2495 | int successes = 0; | |
2496 | struct ieee80211_tx_info *info; | |
2497 | u64 bitmap, sent_bitmap; | |
2498 | ||
e7392364 | 2499 | if (unlikely(!agg->wait_for_ba)) { |
a1751b22 SG |
2500 | if (unlikely(ba_resp->bitmap)) |
2501 | IL_ERR("Received BA when not expected\n"); | |
2502 | return -EINVAL; | |
2503 | } | |
2504 | ||
2505 | /* Mark that the expected block-ack response arrived */ | |
2506 | agg->wait_for_ba = 0; | |
e7392364 | 2507 | D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); |
a1751b22 SG |
2508 | |
2509 | /* Calculate shift to align block-ack bits with our Tx win bits */ | |
2510 | sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4); | |
e7392364 | 2511 | if (sh < 0) /* tbw something is wrong with indices */ |
a1751b22 SG |
2512 | sh += 0x100; |
2513 | ||
2514 | if (agg->frame_count > (64 - sh)) { | |
2515 | D_TX_REPLY("more frames than bitmap size"); | |
2516 | return -1; | |
2517 | } | |
2518 | ||
2519 | /* don't use 64-bit values for now */ | |
2520 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | |
2521 | ||
2522 | /* check for success or failure according to the | |
2523 | * transmitted bitmap and block-ack bitmap */ | |
2524 | sent_bitmap = bitmap & agg->bitmap; | |
2525 | ||
2526 | /* For each frame attempted in aggregation, | |
2527 | * update driver's record of tx frame's status. */ | |
2528 | i = 0; | |
2529 | while (sent_bitmap) { | |
2530 | ack = sent_bitmap & 1ULL; | |
2531 | successes += ack; | |
e7392364 SG |
2532 | D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK", |
2533 | i, (agg->start_idx + i) & 0xff, agg->start_idx + i); | |
a1751b22 SG |
2534 | sent_bitmap >>= 1; |
2535 | ++i; | |
2536 | } | |
2537 | ||
e7392364 | 2538 | D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap); |
a1751b22 | 2539 | |
00ea99e1 | 2540 | info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]); |
a1751b22 SG |
2541 | memset(&info->status, 0, sizeof(info->status)); |
2542 | info->flags |= IEEE80211_TX_STAT_ACK; | |
2543 | info->flags |= IEEE80211_TX_STAT_AMPDU; | |
2544 | info->status.ampdu_ack_len = successes; | |
2545 | info->status.ampdu_len = agg->frame_count; | |
2546 | il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info); | |
2547 | ||
2548 | return 0; | |
2549 | } | |
2550 | ||
3dfea27d SG |
2551 | static inline bool |
2552 | il4965_is_tx_success(u32 status) | |
2553 | { | |
2554 | status &= TX_STATUS_MSK; | |
2555 | return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE); | |
2556 | } | |
2557 | ||
2558 | static u8 | |
2559 | il4965_find_station(struct il_priv *il, const u8 *addr) | |
2560 | { | |
2561 | int i; | |
2562 | int start = 0; | |
2563 | int ret = IL_INVALID_STATION; | |
2564 | unsigned long flags; | |
2565 | ||
2566 | if (il->iw_mode == NL80211_IFTYPE_ADHOC) | |
2567 | start = IL_STA_ID; | |
2568 | ||
2569 | if (is_broadcast_ether_addr(addr)) | |
2570 | return il->hw_params.bcast_id; | |
2571 | ||
2572 | spin_lock_irqsave(&il->sta_lock, flags); | |
2573 | for (i = start; i < il->hw_params.max_stations; i++) | |
2574 | if (il->stations[i].used && | |
2575 | (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) { | |
2576 | ret = i; | |
2577 | goto out; | |
2578 | } | |
2579 | ||
2580 | D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations); | |
2581 | ||
2582 | out: | |
2583 | /* | |
2584 | * It may be possible that more commands interacting with stations | |
2585 | * arrive before we completed processing the adding of | |
2586 | * station | |
2587 | */ | |
2588 | if (ret != IL_INVALID_STATION && | |
2589 | (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) || | |
2590 | ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) && | |
2591 | (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) { | |
2592 | IL_ERR("Requested station info for sta %d before ready.\n", | |
2593 | ret); | |
2594 | ret = IL_INVALID_STATION; | |
2595 | } | |
2596 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
2597 | return ret; | |
2598 | } | |
2599 | ||
2600 | static int | |
2601 | il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr) | |
2602 | { | |
2603 | if (il->iw_mode == NL80211_IFTYPE_STATION) | |
2604 | return IL_AP_ID; | |
2605 | else { | |
2606 | u8 *da = ieee80211_get_DA(hdr); | |
2607 | ||
2608 | return il4965_find_station(il, da); | |
2609 | } | |
2610 | } | |
2611 | ||
2612 | static inline u32 | |
2613 | il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp) | |
2614 | { | |
2615 | return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN; | |
2616 | } | |
2617 | ||
2618 | static inline u32 | |
2619 | il4965_tx_status_to_mac80211(u32 status) | |
2620 | { | |
2621 | status &= TX_STATUS_MSK; | |
2622 | ||
2623 | switch (status) { | |
2624 | case TX_STATUS_SUCCESS: | |
2625 | case TX_STATUS_DIRECT_DONE: | |
2626 | return IEEE80211_TX_STAT_ACK; | |
2627 | case TX_STATUS_FAIL_DEST_PS: | |
2628 | return IEEE80211_TX_STAT_TX_FILTERED; | |
2629 | default: | |
2630 | return 0; | |
2631 | } | |
2632 | } | |
2633 | ||
2634 | /** | |
2635 | * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue | |
2636 | */ | |
2637 | static int | |
2638 | il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg, | |
2639 | struct il4965_tx_resp *tx_resp, int txq_id, | |
2640 | u16 start_idx) | |
2641 | { | |
2642 | u16 status; | |
2643 | struct agg_tx_status *frame_status = tx_resp->u.agg_status; | |
2644 | struct ieee80211_tx_info *info = NULL; | |
2645 | struct ieee80211_hdr *hdr = NULL; | |
2646 | u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); | |
2647 | int i, sh, idx; | |
2648 | u16 seq; | |
2649 | if (agg->wait_for_ba) | |
2650 | D_TX_REPLY("got tx response w/o block-ack\n"); | |
2651 | ||
2652 | agg->frame_count = tx_resp->frame_count; | |
2653 | agg->start_idx = start_idx; | |
2654 | agg->rate_n_flags = rate_n_flags; | |
2655 | agg->bitmap = 0; | |
2656 | ||
2657 | /* num frames attempted by Tx command */ | |
2658 | if (agg->frame_count == 1) { | |
2659 | /* Only one frame was attempted; no block-ack will arrive */ | |
2660 | status = le16_to_cpu(frame_status[0].status); | |
2661 | idx = start_idx; | |
2662 | ||
2663 | D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n", | |
2664 | agg->frame_count, agg->start_idx, idx); | |
2665 | ||
2666 | info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]); | |
2667 | info->status.rates[0].count = tx_resp->failure_frame + 1; | |
2668 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; | |
2669 | info->flags |= il4965_tx_status_to_mac80211(status); | |
2670 | il4965_hwrate_to_tx_control(il, rate_n_flags, info); | |
2671 | ||
2672 | D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff, | |
2673 | tx_resp->failure_frame); | |
2674 | D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags); | |
2675 | ||
2676 | agg->wait_for_ba = 0; | |
2677 | } else { | |
2678 | /* Two or more frames were attempted; expect block-ack */ | |
2679 | u64 bitmap = 0; | |
2680 | int start = agg->start_idx; | |
2681 | struct sk_buff *skb; | |
2682 | ||
2683 | /* Construct bit-map of pending frames within Tx win */ | |
2684 | for (i = 0; i < agg->frame_count; i++) { | |
2685 | u16 sc; | |
2686 | status = le16_to_cpu(frame_status[i].status); | |
2687 | seq = le16_to_cpu(frame_status[i].sequence); | |
2688 | idx = SEQ_TO_IDX(seq); | |
2689 | txq_id = SEQ_TO_QUEUE(seq); | |
2690 | ||
2691 | if (status & | |
2692 | (AGG_TX_STATE_FEW_BYTES_MSK | | |
2693 | AGG_TX_STATE_ABORT_MSK)) | |
2694 | continue; | |
2695 | ||
2696 | D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n", | |
2697 | agg->frame_count, txq_id, idx); | |
2698 | ||
2699 | skb = il->txq[txq_id].skbs[idx]; | |
2700 | if (WARN_ON_ONCE(skb == NULL)) | |
2701 | return -1; | |
2702 | hdr = (struct ieee80211_hdr *) skb->data; | |
2703 | ||
2704 | sc = le16_to_cpu(hdr->seq_ctrl); | |
2705 | if (idx != (SEQ_TO_SN(sc) & 0xff)) { | |
2706 | IL_ERR("BUG_ON idx doesn't match seq control" | |
2707 | " idx=%d, seq_idx=%d, seq=%d\n", idx, | |
2708 | SEQ_TO_SN(sc), hdr->seq_ctrl); | |
2709 | return -1; | |
2710 | } | |
2711 | ||
2712 | D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx, | |
2713 | SEQ_TO_SN(sc)); | |
2714 | ||
2715 | sh = idx - start; | |
2716 | if (sh > 64) { | |
2717 | sh = (start - idx) + 0xff; | |
2718 | bitmap = bitmap << sh; | |
2719 | sh = 0; | |
2720 | start = idx; | |
2721 | } else if (sh < -64) | |
2722 | sh = 0xff - (start - idx); | |
2723 | else if (sh < 0) { | |
2724 | sh = start - idx; | |
2725 | start = idx; | |
2726 | bitmap = bitmap << sh; | |
2727 | sh = 0; | |
2728 | } | |
2729 | bitmap |= 1ULL << sh; | |
2730 | D_TX_REPLY("start=%d bitmap=0x%llx\n", start, | |
2731 | (unsigned long long)bitmap); | |
2732 | } | |
2733 | ||
2734 | agg->bitmap = bitmap; | |
2735 | agg->start_idx = start; | |
2736 | D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n", | |
2737 | agg->frame_count, agg->start_idx, | |
2738 | (unsigned long long)agg->bitmap); | |
2739 | ||
2740 | if (bitmap) | |
2741 | agg->wait_for_ba = 1; | |
2742 | } | |
2743 | return 0; | |
2744 | } | |
2745 | ||
2746 | /** | |
2747 | * il4965_hdl_tx - Handle standard (non-aggregation) Tx response | |
2748 | */ | |
2749 | static void | |
2750 | il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb) | |
2751 | { | |
2752 | struct il_rx_pkt *pkt = rxb_addr(rxb); | |
2753 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | |
2754 | int txq_id = SEQ_TO_QUEUE(sequence); | |
2755 | int idx = SEQ_TO_IDX(sequence); | |
2756 | struct il_tx_queue *txq = &il->txq[txq_id]; | |
2757 | struct sk_buff *skb; | |
2758 | struct ieee80211_hdr *hdr; | |
2759 | struct ieee80211_tx_info *info; | |
2760 | struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; | |
2761 | u32 status = le32_to_cpu(tx_resp->u.status); | |
2762 | int uninitialized_var(tid); | |
2763 | int sta_id; | |
2764 | int freed; | |
2765 | u8 *qc = NULL; | |
2766 | unsigned long flags; | |
2767 | ||
2768 | if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { | |
2769 | IL_ERR("Read idx for DMA queue txq_id (%d) idx %d " | |
2770 | "is out of range [0-%d] %d %d\n", txq_id, idx, | |
2771 | txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); | |
2772 | return; | |
2773 | } | |
2774 | ||
2775 | txq->time_stamp = jiffies; | |
2776 | ||
2777 | skb = txq->skbs[txq->q.read_ptr]; | |
2778 | info = IEEE80211_SKB_CB(skb); | |
2779 | memset(&info->status, 0, sizeof(info->status)); | |
2780 | ||
2781 | hdr = (struct ieee80211_hdr *) skb->data; | |
2782 | if (ieee80211_is_data_qos(hdr->frame_control)) { | |
2783 | qc = ieee80211_get_qos_ctl(hdr); | |
2784 | tid = qc[0] & 0xf; | |
2785 | } | |
2786 | ||
2787 | sta_id = il4965_get_ra_sta_id(il, hdr); | |
2788 | if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) { | |
2789 | IL_ERR("Station not known\n"); | |
2790 | return; | |
2791 | } | |
2792 | ||
2793 | spin_lock_irqsave(&il->sta_lock, flags); | |
2794 | if (txq->sched_retry) { | |
2795 | const u32 scd_ssn = il4965_get_scd_ssn(tx_resp); | |
2796 | struct il_ht_agg *agg = NULL; | |
2797 | WARN_ON(!qc); | |
2798 | ||
2799 | agg = &il->stations[sta_id].tid[tid].agg; | |
2800 | ||
2801 | il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx); | |
2802 | ||
2803 | /* check if BAR is needed */ | |
2804 | if (tx_resp->frame_count == 1 && | |
2805 | !il4965_is_tx_success(status)) | |
2806 | info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | |
2807 | ||
2808 | if (txq->q.read_ptr != (scd_ssn & 0xff)) { | |
2809 | idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); | |
2810 | D_TX_REPLY("Retry scheduler reclaim scd_ssn " | |
2811 | "%d idx %d\n", scd_ssn, idx); | |
2812 | freed = il4965_tx_queue_reclaim(il, txq_id, idx); | |
2813 | if (qc) | |
2814 | il4965_free_tfds_in_queue(il, sta_id, tid, | |
2815 | freed); | |
2816 | ||
2817 | if (il->mac80211_registered && | |
2818 | il_queue_space(&txq->q) > txq->q.low_mark && | |
2819 | agg->state != IL_EMPTYING_HW_QUEUE_DELBA) | |
2820 | il_wake_queue(il, txq); | |
2821 | } | |
2822 | } else { | |
2823 | info->status.rates[0].count = tx_resp->failure_frame + 1; | |
2824 | info->flags |= il4965_tx_status_to_mac80211(status); | |
2825 | il4965_hwrate_to_tx_control(il, | |
2826 | le32_to_cpu(tx_resp->rate_n_flags), | |
2827 | info); | |
2828 | ||
2829 | D_TX_REPLY("TXQ %d status %s (0x%08x) " | |
2830 | "rate_n_flags 0x%x retries %d\n", txq_id, | |
2831 | il4965_get_tx_fail_reason(status), status, | |
2832 | le32_to_cpu(tx_resp->rate_n_flags), | |
2833 | tx_resp->failure_frame); | |
2834 | ||
2835 | freed = il4965_tx_queue_reclaim(il, txq_id, idx); | |
2836 | if (qc && likely(sta_id != IL_INVALID_STATION)) | |
2837 | il4965_free_tfds_in_queue(il, sta_id, tid, freed); | |
2838 | else if (sta_id == IL_INVALID_STATION) | |
2839 | D_TX_REPLY("Station not known\n"); | |
2840 | ||
2841 | if (il->mac80211_registered && | |
2842 | il_queue_space(&txq->q) > txq->q.low_mark) | |
2843 | il_wake_queue(il, txq); | |
2844 | } | |
2845 | if (qc && likely(sta_id != IL_INVALID_STATION)) | |
2846 | il4965_txq_check_empty(il, sta_id, tid, txq_id); | |
2847 | ||
2848 | il4965_check_abort_status(il, tx_resp->frame_count, status); | |
2849 | ||
2850 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
2851 | } | |
2852 | ||
a1751b22 SG |
2853 | /** |
2854 | * translate ucode response to mac80211 tx status control values | |
2855 | */ | |
e7392364 SG |
2856 | void |
2857 | il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, | |
2858 | struct ieee80211_tx_info *info) | |
a1751b22 SG |
2859 | { |
2860 | struct ieee80211_tx_rate *r = &info->control.rates[0]; | |
2861 | ||
2862 | info->antenna_sel_tx = | |
e7392364 | 2863 | ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); |
a1751b22 SG |
2864 | if (rate_n_flags & RATE_MCS_HT_MSK) |
2865 | r->flags |= IEEE80211_TX_RC_MCS; | |
2866 | if (rate_n_flags & RATE_MCS_GF_MSK) | |
2867 | r->flags |= IEEE80211_TX_RC_GREEN_FIELD; | |
2868 | if (rate_n_flags & RATE_MCS_HT40_MSK) | |
2869 | r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; | |
2870 | if (rate_n_flags & RATE_MCS_DUP_MSK) | |
2871 | r->flags |= IEEE80211_TX_RC_DUP_DATA; | |
2872 | if (rate_n_flags & RATE_MCS_SGI_MSK) | |
2873 | r->flags |= IEEE80211_TX_RC_SHORT_GI; | |
2874 | r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band); | |
2875 | } | |
2876 | ||
2877 | /** | |
6e9848b4 | 2878 | * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA |
a1751b22 SG |
2879 | * |
2880 | * Handles block-acknowledge notification from device, which reports success | |
2881 | * of frames sent via aggregation. | |
2882 | */ | |
e7392364 SG |
2883 | void |
2884 | il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb) | |
a1751b22 SG |
2885 | { |
2886 | struct il_rx_pkt *pkt = rxb_addr(rxb); | |
2887 | struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | |
2888 | struct il_tx_queue *txq = NULL; | |
2889 | struct il_ht_agg *agg; | |
2890 | int idx; | |
2891 | int sta_id; | |
2892 | int tid; | |
2893 | unsigned long flags; | |
2894 | ||
2895 | /* "flow" corresponds to Tx queue */ | |
2896 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | |
2897 | ||
2898 | /* "ssn" is start of block-ack Tx win, corresponds to idx | |
2899 | * (in Tx queue's circular buffer) of first TFD/frame in win */ | |
2900 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | |
2901 | ||
2902 | if (scd_flow >= il->hw_params.max_txq_num) { | |
e7392364 | 2903 | IL_ERR("BUG_ON scd_flow is bigger than number of queues\n"); |
a1751b22 SG |
2904 | return; |
2905 | } | |
2906 | ||
2907 | txq = &il->txq[scd_flow]; | |
2908 | sta_id = ba_resp->sta_id; | |
2909 | tid = ba_resp->tid; | |
2910 | agg = &il->stations[sta_id].tid[tid].agg; | |
2911 | if (unlikely(agg->txq_id != scd_flow)) { | |
2912 | /* | |
2913 | * FIXME: this is a uCode bug which need to be addressed, | |
2914 | * log the information and return for now! | |
2915 | * since it is possible happen very often and in order | |
2916 | * not to fill the syslog, don't enable the logging by default | |
2917 | */ | |
e7392364 SG |
2918 | D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n", |
2919 | scd_flow, agg->txq_id); | |
a1751b22 SG |
2920 | return; |
2921 | } | |
2922 | ||
2923 | /* Find idx just before block-ack win */ | |
2924 | idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | |
2925 | ||
2926 | spin_lock_irqsave(&il->sta_lock, flags); | |
2927 | ||
e7392364 | 2928 | D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n", |
1722f8e1 | 2929 | agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32, |
e7392364 SG |
2930 | ba_resp->sta_id); |
2931 | D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = " | |
2932 | "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl, | |
2933 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | |
2934 | ba_resp->scd_flow, ba_resp->scd_ssn); | |
2935 | D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx, | |
2936 | (unsigned long long)agg->bitmap); | |
a1751b22 SG |
2937 | |
2938 | /* Update driver's record of ACK vs. not for each frame in win */ | |
2939 | il4965_tx_status_reply_compressed_ba(il, agg, ba_resp); | |
2940 | ||
2941 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | |
2942 | * block-ack win (we assume that they've been successfully | |
2943 | * transmitted ... if not, it's too late anyway). */ | |
2944 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | |
2945 | /* calculate mac80211 ampdu sw queue to wake */ | |
2946 | int freed = il4965_tx_queue_reclaim(il, scd_flow, idx); | |
2947 | il4965_free_tfds_in_queue(il, sta_id, tid, freed); | |
2948 | ||
2949 | if (il_queue_space(&txq->q) > txq->q.low_mark && | |
2950 | il->mac80211_registered && | |
2951 | agg->state != IL_EMPTYING_HW_QUEUE_DELBA) | |
2952 | il_wake_queue(il, txq); | |
2953 | ||
2954 | il4965_txq_check_empty(il, sta_id, tid, scd_flow); | |
2955 | } | |
2956 | ||
2957 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
2958 | } | |
2959 | ||
2960 | #ifdef CONFIG_IWLEGACY_DEBUG | |
e7392364 SG |
2961 | const char * |
2962 | il4965_get_tx_fail_reason(u32 status) | |
a1751b22 SG |
2963 | { |
2964 | #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x | |
2965 | #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x | |
2966 | ||
2967 | switch (status & TX_STATUS_MSK) { | |
2968 | case TX_STATUS_SUCCESS: | |
2969 | return "SUCCESS"; | |
e7392364 SG |
2970 | TX_STATUS_POSTPONE(DELAY); |
2971 | TX_STATUS_POSTPONE(FEW_BYTES); | |
2972 | TX_STATUS_POSTPONE(QUIET_PERIOD); | |
2973 | TX_STATUS_POSTPONE(CALC_TTAK); | |
2974 | TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); | |
2975 | TX_STATUS_FAIL(SHORT_LIMIT); | |
2976 | TX_STATUS_FAIL(LONG_LIMIT); | |
2977 | TX_STATUS_FAIL(FIFO_UNDERRUN); | |
2978 | TX_STATUS_FAIL(DRAIN_FLOW); | |
2979 | TX_STATUS_FAIL(RFKILL_FLUSH); | |
2980 | TX_STATUS_FAIL(LIFE_EXPIRE); | |
2981 | TX_STATUS_FAIL(DEST_PS); | |
2982 | TX_STATUS_FAIL(HOST_ABORTED); | |
2983 | TX_STATUS_FAIL(BT_RETRY); | |
2984 | TX_STATUS_FAIL(STA_INVALID); | |
2985 | TX_STATUS_FAIL(FRAG_DROPPED); | |
2986 | TX_STATUS_FAIL(TID_DISABLE); | |
2987 | TX_STATUS_FAIL(FIFO_FLUSHED); | |
2988 | TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); | |
2989 | TX_STATUS_FAIL(PASSIVE_NO_RX); | |
2990 | TX_STATUS_FAIL(NO_BEACON_ON_RADAR); | |
a1751b22 SG |
2991 | } |
2992 | ||
2993 | return "UNKNOWN"; | |
2994 | ||
2995 | #undef TX_STATUS_FAIL | |
2996 | #undef TX_STATUS_POSTPONE | |
2997 | } | |
2998 | #endif /* CONFIG_IWLEGACY_DEBUG */ | |
2999 | ||
eb3cdfb7 SG |
3000 | static struct il_link_quality_cmd * |
3001 | il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id) | |
3002 | { | |
3003 | int i, r; | |
3004 | struct il_link_quality_cmd *link_cmd; | |
3005 | u32 rate_flags = 0; | |
3006 | __le32 rate_n_flags; | |
3007 | ||
3008 | link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL); | |
3009 | if (!link_cmd) { | |
3010 | IL_ERR("Unable to allocate memory for LQ cmd.\n"); | |
3011 | return NULL; | |
3012 | } | |
3013 | /* Set up the rate scaling to start at selected rate, fall back | |
3014 | * all the way down to 1M in IEEE order, and then spin on 1M */ | |
3015 | if (il->band == IEEE80211_BAND_5GHZ) | |
3016 | r = RATE_6M_IDX; | |
3017 | else | |
3018 | r = RATE_1M_IDX; | |
3019 | ||
3020 | if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE) | |
3021 | rate_flags |= RATE_MCS_CCK_MSK; | |
3022 | ||
e7392364 SG |
3023 | rate_flags |= |
3024 | il4965_first_antenna(il->hw_params. | |
3025 | valid_tx_ant) << RATE_MCS_ANT_POS; | |
616107ed | 3026 | rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags); |
eb3cdfb7 SG |
3027 | for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) |
3028 | link_cmd->rs_table[i].rate_n_flags = rate_n_flags; | |
3029 | ||
3030 | link_cmd->general_params.single_stream_ant_msk = | |
e7392364 | 3031 | il4965_first_antenna(il->hw_params.valid_tx_ant); |
eb3cdfb7 SG |
3032 | |
3033 | link_cmd->general_params.dual_stream_ant_msk = | |
e7392364 SG |
3034 | il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params. |
3035 | valid_tx_ant); | |
eb3cdfb7 SG |
3036 | if (!link_cmd->general_params.dual_stream_ant_msk) { |
3037 | link_cmd->general_params.dual_stream_ant_msk = ANT_AB; | |
3038 | } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) { | |
3039 | link_cmd->general_params.dual_stream_ant_msk = | |
e7392364 | 3040 | il->hw_params.valid_tx_ant; |
eb3cdfb7 SG |
3041 | } |
3042 | ||
3043 | link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; | |
3044 | link_cmd->agg_params.agg_time_limit = | |
e7392364 | 3045 | cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); |
eb3cdfb7 SG |
3046 | |
3047 | link_cmd->sta_id = sta_id; | |
3048 | ||
3049 | return link_cmd; | |
3050 | } | |
3051 | ||
3052 | /* | |
3053 | * il4965_add_bssid_station - Add the special IBSS BSSID station | |
3054 | * | |
3055 | * Function sleeps. | |
3056 | */ | |
3057 | int | |
83007196 | 3058 | il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r) |
eb3cdfb7 SG |
3059 | { |
3060 | int ret; | |
3061 | u8 sta_id; | |
3062 | struct il_link_quality_cmd *link_cmd; | |
3063 | unsigned long flags; | |
3064 | ||
3065 | if (sta_id_r) | |
3066 | *sta_id_r = IL_INVALID_STATION; | |
3067 | ||
83007196 | 3068 | ret = il_add_station_common(il, addr, 0, NULL, &sta_id); |
eb3cdfb7 SG |
3069 | if (ret) { |
3070 | IL_ERR("Unable to add station %pM\n", addr); | |
3071 | return ret; | |
3072 | } | |
3073 | ||
3074 | if (sta_id_r) | |
3075 | *sta_id_r = sta_id; | |
3076 | ||
3077 | spin_lock_irqsave(&il->sta_lock, flags); | |
3078 | il->stations[sta_id].used |= IL_STA_LOCAL; | |
3079 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
3080 | ||
3081 | /* Set up default rate scaling table in device's station table */ | |
3082 | link_cmd = il4965_sta_alloc_lq(il, sta_id); | |
3083 | if (!link_cmd) { | |
e7392364 SG |
3084 | IL_ERR("Unable to initialize rate scaling for station %pM.\n", |
3085 | addr); | |
eb3cdfb7 SG |
3086 | return -ENOMEM; |
3087 | } | |
3088 | ||
83007196 | 3089 | ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true); |
eb3cdfb7 SG |
3090 | if (ret) |
3091 | IL_ERR("Link quality command failed (%d)\n", ret); | |
3092 | ||
3093 | spin_lock_irqsave(&il->sta_lock, flags); | |
3094 | il->stations[sta_id].lq = link_cmd; | |
3095 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
3096 | ||
3097 | return 0; | |
3098 | } | |
3099 | ||
e7392364 | 3100 | static int |
83007196 | 3101 | il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty) |
eb3cdfb7 | 3102 | { |
d735f921 | 3103 | int i; |
eb3cdfb7 SG |
3104 | u8 buff[sizeof(struct il_wep_cmd) + |
3105 | sizeof(struct il_wep_key) * WEP_KEYS_MAX]; | |
3106 | struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff; | |
e7392364 | 3107 | size_t cmd_size = sizeof(struct il_wep_cmd); |
eb3cdfb7 | 3108 | struct il_host_cmd cmd = { |
d98e2942 | 3109 | .id = C_WEPKEY, |
eb3cdfb7 SG |
3110 | .data = wep_cmd, |
3111 | .flags = CMD_SYNC, | |
3112 | }; | |
d735f921 | 3113 | bool not_empty = false; |
eb3cdfb7 SG |
3114 | |
3115 | might_sleep(); | |
3116 | ||
e7392364 SG |
3117 | memset(wep_cmd, 0, |
3118 | cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX)); | |
eb3cdfb7 | 3119 | |
e7392364 | 3120 | for (i = 0; i < WEP_KEYS_MAX; i++) { |
d735f921 SG |
3121 | u8 key_size = il->_4965.wep_keys[i].key_size; |
3122 | ||
eb3cdfb7 | 3123 | wep_cmd->key[i].key_idx = i; |
d735f921 | 3124 | if (key_size) { |
eb3cdfb7 | 3125 | wep_cmd->key[i].key_offset = i; |
d735f921 SG |
3126 | not_empty = true; |
3127 | } else | |
eb3cdfb7 | 3128 | wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; |
eb3cdfb7 | 3129 | |
d735f921 SG |
3130 | wep_cmd->key[i].key_size = key_size; |
3131 | memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size); | |
eb3cdfb7 SG |
3132 | } |
3133 | ||
3134 | wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; | |
3135 | wep_cmd->num_keys = WEP_KEYS_MAX; | |
3136 | ||
3137 | cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX; | |
eb3cdfb7 SG |
3138 | cmd.len = cmd_size; |
3139 | ||
3140 | if (not_empty || send_if_empty) | |
3141 | return il_send_cmd(il, &cmd); | |
3142 | else | |
3143 | return 0; | |
3144 | } | |
3145 | ||
e7392364 | 3146 | int |
83007196 | 3147 | il4965_restore_default_wep_keys(struct il_priv *il) |
eb3cdfb7 SG |
3148 | { |
3149 | lockdep_assert_held(&il->mutex); | |
3150 | ||
83007196 | 3151 | return il4965_static_wepkey_cmd(il, false); |
eb3cdfb7 SG |
3152 | } |
3153 | ||
e7392364 | 3154 | int |
83007196 | 3155 | il4965_remove_default_wep_key(struct il_priv *il, |
e7392364 | 3156 | struct ieee80211_key_conf *keyconf) |
eb3cdfb7 SG |
3157 | { |
3158 | int ret; | |
d735f921 | 3159 | int idx = keyconf->keyidx; |
eb3cdfb7 SG |
3160 | |
3161 | lockdep_assert_held(&il->mutex); | |
3162 | ||
d735f921 | 3163 | D_WEP("Removing default WEP key: idx=%d\n", idx); |
eb3cdfb7 | 3164 | |
d735f921 | 3165 | memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key)); |
eb3cdfb7 | 3166 | if (il_is_rfkill(il)) { |
e7392364 | 3167 | D_WEP("Not sending C_WEPKEY command due to RFKILL.\n"); |
eb3cdfb7 SG |
3168 | /* but keys in device are clear anyway so return success */ |
3169 | return 0; | |
3170 | } | |
83007196 | 3171 | ret = il4965_static_wepkey_cmd(il, 1); |
d735f921 | 3172 | D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret); |
eb3cdfb7 SG |
3173 | |
3174 | return ret; | |
3175 | } | |
3176 | ||
e7392364 | 3177 | int |
83007196 | 3178 | il4965_set_default_wep_key(struct il_priv *il, |
e7392364 | 3179 | struct ieee80211_key_conf *keyconf) |
eb3cdfb7 SG |
3180 | { |
3181 | int ret; | |
d735f921 SG |
3182 | int len = keyconf->keylen; |
3183 | int idx = keyconf->keyidx; | |
eb3cdfb7 SG |
3184 | |
3185 | lockdep_assert_held(&il->mutex); | |
3186 | ||
d735f921 | 3187 | if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) { |
eb3cdfb7 SG |
3188 | D_WEP("Bad WEP key length %d\n", keyconf->keylen); |
3189 | return -EINVAL; | |
3190 | } | |
3191 | ||
3192 | keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; | |
3193 | keyconf->hw_key_idx = HW_KEY_DEFAULT; | |
8f9e5645 | 3194 | il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher; |
eb3cdfb7 | 3195 | |
d735f921 SG |
3196 | il->_4965.wep_keys[idx].key_size = len; |
3197 | memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len); | |
eb3cdfb7 | 3198 | |
83007196 | 3199 | ret = il4965_static_wepkey_cmd(il, false); |
eb3cdfb7 | 3200 | |
d735f921 | 3201 | D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret); |
eb3cdfb7 SG |
3202 | return ret; |
3203 | } | |
3204 | ||
e7392364 | 3205 | static int |
83007196 | 3206 | il4965_set_wep_dynamic_key_info(struct il_priv *il, |
e7392364 | 3207 | struct ieee80211_key_conf *keyconf, u8 sta_id) |
eb3cdfb7 SG |
3208 | { |
3209 | unsigned long flags; | |
3210 | __le16 key_flags = 0; | |
3211 | struct il_addsta_cmd sta_cmd; | |
3212 | ||
3213 | lockdep_assert_held(&il->mutex); | |
3214 | ||
3215 | keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; | |
3216 | ||
3217 | key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); | |
3218 | key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); | |
3219 | key_flags &= ~STA_KEY_FLG_INVALID; | |
3220 | ||
3221 | if (keyconf->keylen == WEP_KEY_LEN_128) | |
3222 | key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; | |
3223 | ||
b16db50a | 3224 | if (sta_id == il->hw_params.bcast_id) |
eb3cdfb7 SG |
3225 | key_flags |= STA_KEY_MULTICAST_MSK; |
3226 | ||
3227 | spin_lock_irqsave(&il->sta_lock, flags); | |
3228 | ||
3229 | il->stations[sta_id].keyinfo.cipher = keyconf->cipher; | |
3230 | il->stations[sta_id].keyinfo.keylen = keyconf->keylen; | |
3231 | il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; | |
3232 | ||
e7392364 | 3233 | memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); |
eb3cdfb7 | 3234 | |
e7392364 SG |
3235 | memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key, |
3236 | keyconf->keylen); | |
eb3cdfb7 | 3237 | |
e7392364 SG |
3238 | if ((il->stations[sta_id].sta.key. |
3239 | key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) | |
eb3cdfb7 | 3240 | il->stations[sta_id].sta.key.key_offset = |
e7392364 | 3241 | il_get_free_ucode_key_idx(il); |
eb3cdfb7 SG |
3242 | /* else, we are overriding an existing key => no need to allocated room |
3243 | * in uCode. */ | |
3244 | ||
3245 | WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, | |
e7392364 | 3246 | "no space for a new key"); |
eb3cdfb7 SG |
3247 | |
3248 | il->stations[sta_id].sta.key.key_flags = key_flags; | |
3249 | il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | |
3250 | il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | |
3251 | ||
3252 | memcpy(&sta_cmd, &il->stations[sta_id].sta, | |
e7392364 | 3253 | sizeof(struct il_addsta_cmd)); |
eb3cdfb7 SG |
3254 | spin_unlock_irqrestore(&il->sta_lock, flags); |
3255 | ||
3256 | return il_send_add_sta(il, &sta_cmd, CMD_SYNC); | |
3257 | } | |
3258 | ||
e7392364 SG |
3259 | static int |
3260 | il4965_set_ccmp_dynamic_key_info(struct il_priv *il, | |
e7392364 | 3261 | struct ieee80211_key_conf *keyconf, u8 sta_id) |
eb3cdfb7 SG |
3262 | { |
3263 | unsigned long flags; | |
3264 | __le16 key_flags = 0; | |
3265 | struct il_addsta_cmd sta_cmd; | |
3266 | ||
3267 | lockdep_assert_held(&il->mutex); | |
3268 | ||
3269 | key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); | |
3270 | key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); | |
3271 | key_flags &= ~STA_KEY_FLG_INVALID; | |
3272 | ||
b16db50a | 3273 | if (sta_id == il->hw_params.bcast_id) |
eb3cdfb7 SG |
3274 | key_flags |= STA_KEY_MULTICAST_MSK; |
3275 | ||
3276 | keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; | |
3277 | ||
3278 | spin_lock_irqsave(&il->sta_lock, flags); | |
3279 | il->stations[sta_id].keyinfo.cipher = keyconf->cipher; | |
3280 | il->stations[sta_id].keyinfo.keylen = keyconf->keylen; | |
3281 | ||
e7392364 | 3282 | memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen); |
eb3cdfb7 | 3283 | |
e7392364 | 3284 | memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen); |
eb3cdfb7 | 3285 | |
e7392364 SG |
3286 | if ((il->stations[sta_id].sta.key. |
3287 | key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) | |
eb3cdfb7 | 3288 | il->stations[sta_id].sta.key.key_offset = |
e7392364 | 3289 | il_get_free_ucode_key_idx(il); |
eb3cdfb7 SG |
3290 | /* else, we are overriding an existing key => no need to allocated room |
3291 | * in uCode. */ | |
3292 | ||
3293 | WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, | |
e7392364 | 3294 | "no space for a new key"); |
eb3cdfb7 SG |
3295 | |
3296 | il->stations[sta_id].sta.key.key_flags = key_flags; | |
3297 | il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | |
3298 | il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | |
3299 | ||
3300 | memcpy(&sta_cmd, &il->stations[sta_id].sta, | |
e7392364 | 3301 | sizeof(struct il_addsta_cmd)); |
eb3cdfb7 SG |
3302 | spin_unlock_irqrestore(&il->sta_lock, flags); |
3303 | ||
3304 | return il_send_add_sta(il, &sta_cmd, CMD_SYNC); | |
3305 | } | |
3306 | ||
e7392364 SG |
3307 | static int |
3308 | il4965_set_tkip_dynamic_key_info(struct il_priv *il, | |
e7392364 | 3309 | struct ieee80211_key_conf *keyconf, u8 sta_id) |
eb3cdfb7 SG |
3310 | { |
3311 | unsigned long flags; | |
3312 | int ret = 0; | |
3313 | __le16 key_flags = 0; | |
3314 | ||
3315 | key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); | |
3316 | key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); | |
3317 | key_flags &= ~STA_KEY_FLG_INVALID; | |
3318 | ||
b16db50a | 3319 | if (sta_id == il->hw_params.bcast_id) |
eb3cdfb7 SG |
3320 | key_flags |= STA_KEY_MULTICAST_MSK; |
3321 | ||
3322 | keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; | |
3323 | keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; | |
3324 | ||
3325 | spin_lock_irqsave(&il->sta_lock, flags); | |
3326 | ||
3327 | il->stations[sta_id].keyinfo.cipher = keyconf->cipher; | |
3328 | il->stations[sta_id].keyinfo.keylen = 16; | |
3329 | ||
e7392364 SG |
3330 | if ((il->stations[sta_id].sta.key. |
3331 | key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) | |
eb3cdfb7 | 3332 | il->stations[sta_id].sta.key.key_offset = |
e7392364 | 3333 | il_get_free_ucode_key_idx(il); |
eb3cdfb7 SG |
3334 | /* else, we are overriding an existing key => no need to allocated room |
3335 | * in uCode. */ | |
3336 | ||
3337 | WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, | |
e7392364 | 3338 | "no space for a new key"); |
eb3cdfb7 SG |
3339 | |
3340 | il->stations[sta_id].sta.key.key_flags = key_flags; | |
3341 | ||
eb3cdfb7 SG |
3342 | /* This copy is acutally not needed: we get the key with each TX */ |
3343 | memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16); | |
3344 | ||
3345 | memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16); | |
3346 | ||
3347 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
3348 | ||
3349 | return ret; | |
3350 | } | |
3351 | ||
e7392364 | 3352 | void |
83007196 SG |
3353 | il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf, |
3354 | struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) | |
eb3cdfb7 SG |
3355 | { |
3356 | u8 sta_id; | |
3357 | unsigned long flags; | |
3358 | int i; | |
3359 | ||
3360 | if (il_scan_cancel(il)) { | |
3361 | /* cancel scan failed, just live w/ bad key and rely | |
3362 | briefly on SW decryption */ | |
3363 | return; | |
3364 | } | |
3365 | ||
83007196 | 3366 | sta_id = il_sta_id_or_broadcast(il, sta); |
eb3cdfb7 SG |
3367 | if (sta_id == IL_INVALID_STATION) |
3368 | return; | |
3369 | ||
3370 | spin_lock_irqsave(&il->sta_lock, flags); | |
3371 | ||
3372 | il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; | |
3373 | ||
3374 | for (i = 0; i < 5; i++) | |
3375 | il->stations[sta_id].sta.key.tkip_rx_ttak[i] = | |
e7392364 | 3376 | cpu_to_le16(phase1key[i]); |
eb3cdfb7 SG |
3377 | |
3378 | il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | |
3379 | il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | |
3380 | ||
3381 | il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); | |
3382 | ||
3383 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
eb3cdfb7 SG |
3384 | } |
3385 | ||
e7392364 | 3386 | int |
83007196 | 3387 | il4965_remove_dynamic_key(struct il_priv *il, |
e7392364 | 3388 | struct ieee80211_key_conf *keyconf, u8 sta_id) |
eb3cdfb7 SG |
3389 | { |
3390 | unsigned long flags; | |
3391 | u16 key_flags; | |
3392 | u8 keyidx; | |
3393 | struct il_addsta_cmd sta_cmd; | |
3394 | ||
3395 | lockdep_assert_held(&il->mutex); | |
3396 | ||
d735f921 | 3397 | il->_4965.key_mapping_keys--; |
eb3cdfb7 SG |
3398 | |
3399 | spin_lock_irqsave(&il->sta_lock, flags); | |
3400 | key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags); | |
3401 | keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3; | |
3402 | ||
e7392364 | 3403 | D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); |
eb3cdfb7 SG |
3404 | |
3405 | if (keyconf->keyidx != keyidx) { | |
3406 | /* We need to remove a key with idx different that the one | |
3407 | * in the uCode. This means that the key we need to remove has | |
3408 | * been replaced by another one with different idx. | |
3409 | * Don't do anything and return ok | |
3410 | */ | |
3411 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
3412 | return 0; | |
3413 | } | |
3414 | ||
3415 | if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { | |
e7392364 SG |
3416 | IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx, |
3417 | key_flags); | |
eb3cdfb7 SG |
3418 | spin_unlock_irqrestore(&il->sta_lock, flags); |
3419 | return 0; | |
3420 | } | |
3421 | ||
e7392364 SG |
3422 | if (!test_and_clear_bit |
3423 | (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table)) | |
eb3cdfb7 | 3424 | IL_ERR("idx %d not used in uCode key table.\n", |
e7392364 SG |
3425 | il->stations[sta_id].sta.key.key_offset); |
3426 | memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key)); | |
3427 | memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo)); | |
eb3cdfb7 | 3428 | il->stations[sta_id].sta.key.key_flags = |
e7392364 | 3429 | STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; |
eb3cdfb7 SG |
3430 | il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; |
3431 | il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | |
3432 | il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | |
3433 | ||
3434 | if (il_is_rfkill(il)) { | |
e7392364 SG |
3435 | D_WEP |
3436 | ("Not sending C_ADD_STA command because RFKILL enabled.\n"); | |
eb3cdfb7 SG |
3437 | spin_unlock_irqrestore(&il->sta_lock, flags); |
3438 | return 0; | |
3439 | } | |
3440 | memcpy(&sta_cmd, &il->stations[sta_id].sta, | |
e7392364 | 3441 | sizeof(struct il_addsta_cmd)); |
eb3cdfb7 SG |
3442 | spin_unlock_irqrestore(&il->sta_lock, flags); |
3443 | ||
3444 | return il_send_add_sta(il, &sta_cmd, CMD_SYNC); | |
3445 | } | |
3446 | ||
e7392364 | 3447 | int |
83007196 SG |
3448 | il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf, |
3449 | u8 sta_id) | |
eb3cdfb7 SG |
3450 | { |
3451 | int ret; | |
3452 | ||
3453 | lockdep_assert_held(&il->mutex); | |
3454 | ||
d735f921 | 3455 | il->_4965.key_mapping_keys++; |
eb3cdfb7 SG |
3456 | keyconf->hw_key_idx = HW_KEY_DYNAMIC; |
3457 | ||
3458 | switch (keyconf->cipher) { | |
3459 | case WLAN_CIPHER_SUITE_CCMP: | |
e7392364 | 3460 | ret = |
83007196 | 3461 | il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id); |
eb3cdfb7 SG |
3462 | break; |
3463 | case WLAN_CIPHER_SUITE_TKIP: | |
e7392364 | 3464 | ret = |
83007196 | 3465 | il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id); |
eb3cdfb7 SG |
3466 | break; |
3467 | case WLAN_CIPHER_SUITE_WEP40: | |
3468 | case WLAN_CIPHER_SUITE_WEP104: | |
83007196 | 3469 | ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id); |
eb3cdfb7 SG |
3470 | break; |
3471 | default: | |
e7392364 SG |
3472 | IL_ERR("Unknown alg: %s cipher = %x\n", __func__, |
3473 | keyconf->cipher); | |
eb3cdfb7 SG |
3474 | ret = -EINVAL; |
3475 | } | |
3476 | ||
e7392364 SG |
3477 | D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n", |
3478 | keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret); | |
eb3cdfb7 SG |
3479 | |
3480 | return ret; | |
3481 | } | |
3482 | ||
3483 | /** | |
3484 | * il4965_alloc_bcast_station - add broadcast station into driver's station table. | |
3485 | * | |
3486 | * This adds the broadcast station into the driver's station table | |
3487 | * and marks it driver active, so that it will be restored to the | |
3488 | * device at the next best time. | |
3489 | */ | |
e7392364 | 3490 | int |
83007196 | 3491 | il4965_alloc_bcast_station(struct il_priv *il) |
eb3cdfb7 SG |
3492 | { |
3493 | struct il_link_quality_cmd *link_cmd; | |
3494 | unsigned long flags; | |
3495 | u8 sta_id; | |
3496 | ||
3497 | spin_lock_irqsave(&il->sta_lock, flags); | |
83007196 | 3498 | sta_id = il_prep_station(il, il_bcast_addr, false, NULL); |
eb3cdfb7 SG |
3499 | if (sta_id == IL_INVALID_STATION) { |
3500 | IL_ERR("Unable to prepare broadcast station\n"); | |
3501 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
3502 | ||
3503 | return -EINVAL; | |
3504 | } | |
3505 | ||
3506 | il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE; | |
3507 | il->stations[sta_id].used |= IL_STA_BCAST; | |
3508 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
3509 | ||
3510 | link_cmd = il4965_sta_alloc_lq(il, sta_id); | |
3511 | if (!link_cmd) { | |
e7392364 SG |
3512 | IL_ERR |
3513 | ("Unable to initialize rate scaling for bcast station.\n"); | |
eb3cdfb7 SG |
3514 | return -ENOMEM; |
3515 | } | |
3516 | ||
3517 | spin_lock_irqsave(&il->sta_lock, flags); | |
3518 | il->stations[sta_id].lq = link_cmd; | |
3519 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
3520 | ||
3521 | return 0; | |
3522 | } | |
3523 | ||
3524 | /** | |
3525 | * il4965_update_bcast_station - update broadcast station's LQ command | |
3526 | * | |
3527 | * Only used by iwl4965. Placed here to have all bcast station management | |
3528 | * code together. | |
3529 | */ | |
e7392364 | 3530 | static int |
83007196 | 3531 | il4965_update_bcast_station(struct il_priv *il) |
eb3cdfb7 SG |
3532 | { |
3533 | unsigned long flags; | |
3534 | struct il_link_quality_cmd *link_cmd; | |
b16db50a | 3535 | u8 sta_id = il->hw_params.bcast_id; |
eb3cdfb7 SG |
3536 | |
3537 | link_cmd = il4965_sta_alloc_lq(il, sta_id); | |
3538 | if (!link_cmd) { | |
1722f8e1 | 3539 | IL_ERR("Unable to initialize rate scaling for bcast sta.\n"); |
eb3cdfb7 SG |
3540 | return -ENOMEM; |
3541 | } | |
3542 | ||
3543 | spin_lock_irqsave(&il->sta_lock, flags); | |
3544 | if (il->stations[sta_id].lq) | |
3545 | kfree(il->stations[sta_id].lq); | |
3546 | else | |
1722f8e1 | 3547 | D_INFO("Bcast sta rate scaling has not been initialized.\n"); |
eb3cdfb7 SG |
3548 | il->stations[sta_id].lq = link_cmd; |
3549 | spin_unlock_irqrestore(&il->sta_lock, flags); | |
3550 | ||
3551 | return 0; | |
3552 | } | |
3553 | ||
e7392364 SG |
3554 | int |
3555 | il4965_update_bcast_stations(struct il_priv *il) | |
eb3cdfb7 | 3556 | { |
83007196 | 3557 | return il4965_update_bcast_station(il); |
eb3cdfb7 SG |
3558 | } |
3559 | ||
3560 | /** | |
3561 | * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table | |
3562 | */ | |
e7392364 SG |
3563 | int |
3564 | il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid) | |
eb3cdfb7 SG |
3565 | { |
3566 | unsigned long flags; | |
3567 | struct il_addsta_cmd sta_cmd; | |
3568 | ||
3569 | lockdep_assert_held(&il->mutex); | |
3570 | ||
3571 | /* Remove "disable" flag, to enable Tx for this TID */ | |
3572 | spin_lock_irqsave(&il->sta_lock, flags); | |
3573 | il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; | |
3574 | il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); | |
3575 | il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | |
3576 | memcpy(&sta_cmd, &il->stations[sta_id].sta, | |
e7392364 | 3577 | sizeof(struct il_addsta_cmd)); |
eb3cdfb7 SG |
3578 | spin_unlock_irqrestore(&il->sta_lock, flags); |
3579 | ||
3580 | return il_send_add_sta(il, &sta_cmd, CMD_SYNC); | |
3581 | } | |
3582 | ||
e7392364 SG |
3583 | int |
3584 | il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid, | |
3585 | u16 ssn) | |
eb3cdfb7 SG |
3586 | { |
3587 | unsigned long flags; | |
3588 | int sta_id; | |
3589 | struct il_addsta_cmd sta_cmd; | |
3590 | ||
3591 | lockdep_assert_held(&il->mutex); | |
3592 | ||
3593 | sta_id = il_sta_id(sta); | |
3594 | if (sta_id == IL_INVALID_STATION) | |
3595 | return -ENXIO; | |
3596 | ||
3597 | spin_lock_irqsave(&il->sta_lock, flags); | |
3598 | il->stations[sta_id].sta.station_flags_msk = 0; | |
3599 | il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; | |
e7392364 | 3600 | il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid; |
eb3cdfb7 SG |
3601 | il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); |
3602 | il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | |
3603 | memcpy(&sta_cmd, &il->stations[sta_id].sta, | |
e7392364 | 3604 | sizeof(struct il_addsta_cmd)); |
eb3cdfb7 SG |
3605 | spin_unlock_irqrestore(&il->sta_lock, flags); |
3606 | ||
3607 | return il_send_add_sta(il, &sta_cmd, CMD_SYNC); | |
3608 | } | |
3609 | ||
e7392364 SG |
3610 | int |
3611 | il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid) | |
eb3cdfb7 SG |
3612 | { |
3613 | unsigned long flags; | |
3614 | int sta_id; | |
3615 | struct il_addsta_cmd sta_cmd; | |
3616 | ||
3617 | lockdep_assert_held(&il->mutex); | |
3618 | ||
3619 | sta_id = il_sta_id(sta); | |
3620 | if (sta_id == IL_INVALID_STATION) { | |
3621 | IL_ERR("Invalid station for AGG tid %d\n", tid); | |
3622 | return -ENXIO; | |
3623 | } | |
3624 | ||
3625 | spin_lock_irqsave(&il->sta_lock, flags); | |
3626 | il->stations[sta_id].sta.station_flags_msk = 0; | |
3627 | il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; | |
e7392364 | 3628 | il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid; |
eb3cdfb7 SG |
3629 | il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; |
3630 | memcpy(&sta_cmd, &il->stations[sta_id].sta, | |
e7392364 | 3631 | sizeof(struct il_addsta_cmd)); |
eb3cdfb7 SG |
3632 | spin_unlock_irqrestore(&il->sta_lock, flags); |
3633 | ||
3634 | return il_send_add_sta(il, &sta_cmd, CMD_SYNC); | |
3635 | } | |
3636 | ||
3637 | void | |
3638 | il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt) | |
3639 | { | |
3640 | unsigned long flags; | |
3641 | ||
3642 | spin_lock_irqsave(&il->sta_lock, flags); | |
3643 | il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK; | |
3644 | il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; | |
3645 | il->stations[sta_id].sta.sta.modify_mask = | |
e7392364 | 3646 | STA_MODIFY_SLEEP_TX_COUNT_MSK; |
eb3cdfb7 SG |
3647 | il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); |
3648 | il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | |
e7392364 | 3649 | il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC); |
eb3cdfb7 SG |
3650 | spin_unlock_irqrestore(&il->sta_lock, flags); |
3651 | ||
3652 | } | |
3653 | ||
e7392364 SG |
3654 | void |
3655 | il4965_update_chain_flags(struct il_priv *il) | |
be663ab6 | 3656 | { |
c9363551 SG |
3657 | if (il->ops->set_rxon_chain) { |
3658 | il->ops->set_rxon_chain(il); | |
c8b03958 | 3659 | if (il->active.rx_chain != il->staging.rx_chain) |
83007196 | 3660 | il_commit_rxon(il); |
be663ab6 WYG |
3661 | } |
3662 | } | |
3663 | ||
e7392364 SG |
3664 | static void |
3665 | il4965_clear_free_frames(struct il_priv *il) | |
be663ab6 WYG |
3666 | { |
3667 | struct list_head *element; | |
3668 | ||
e7392364 | 3669 | D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count); |
be663ab6 | 3670 | |
46bc8d4b SG |
3671 | while (!list_empty(&il->free_frames)) { |
3672 | element = il->free_frames.next; | |
be663ab6 | 3673 | list_del(element); |
e2ebc833 | 3674 | kfree(list_entry(element, struct il_frame, list)); |
46bc8d4b | 3675 | il->frames_count--; |
be663ab6 WYG |
3676 | } |
3677 | ||
46bc8d4b | 3678 | if (il->frames_count) { |
9406f797 | 3679 | IL_WARN("%d frames still in use. Did we lose one?\n", |
e7392364 | 3680 | il->frames_count); |
46bc8d4b | 3681 | il->frames_count = 0; |
be663ab6 WYG |
3682 | } |
3683 | } | |
3684 | ||
e7392364 SG |
3685 | static struct il_frame * |
3686 | il4965_get_free_frame(struct il_priv *il) | |
be663ab6 | 3687 | { |
e2ebc833 | 3688 | struct il_frame *frame; |
be663ab6 | 3689 | struct list_head *element; |
46bc8d4b | 3690 | if (list_empty(&il->free_frames)) { |
be663ab6 WYG |
3691 | frame = kzalloc(sizeof(*frame), GFP_KERNEL); |
3692 | if (!frame) { | |
9406f797 | 3693 | IL_ERR("Could not allocate frame!\n"); |
be663ab6 WYG |
3694 | return NULL; |
3695 | } | |
3696 | ||
46bc8d4b | 3697 | il->frames_count++; |
be663ab6 WYG |
3698 | return frame; |
3699 | } | |
3700 | ||
46bc8d4b | 3701 | element = il->free_frames.next; |
be663ab6 | 3702 | list_del(element); |
e2ebc833 | 3703 | return list_entry(element, struct il_frame, list); |
be663ab6 WYG |
3704 | } |
3705 | ||
e7392364 SG |
3706 | static void |
3707 | il4965_free_frame(struct il_priv *il, struct il_frame *frame) | |
be663ab6 WYG |
3708 | { |
3709 | memset(frame, 0, sizeof(*frame)); | |
46bc8d4b | 3710 | list_add(&frame->list, &il->free_frames); |
be663ab6 WYG |
3711 | } |
3712 | ||
e7392364 SG |
3713 | static u32 |
3714 | il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr, | |
3715 | int left) | |
be663ab6 | 3716 | { |
46bc8d4b | 3717 | lockdep_assert_held(&il->mutex); |
be663ab6 | 3718 | |
46bc8d4b | 3719 | if (!il->beacon_skb) |
be663ab6 WYG |
3720 | return 0; |
3721 | ||
46bc8d4b | 3722 | if (il->beacon_skb->len > left) |
be663ab6 WYG |
3723 | return 0; |
3724 | ||
46bc8d4b | 3725 | memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len); |
be663ab6 | 3726 | |
46bc8d4b | 3727 | return il->beacon_skb->len; |
be663ab6 WYG |
3728 | } |
3729 | ||
3730 | /* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ | |
e7392364 SG |
3731 | static void |
3732 | il4965_set_beacon_tim(struct il_priv *il, | |
3733 | struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon, | |
3734 | u32 frame_size) | |
be663ab6 WYG |
3735 | { |
3736 | u16 tim_idx; | |
3737 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; | |
3738 | ||
3739 | /* | |
0c2c8852 | 3740 | * The idx is relative to frame start but we start looking at the |
be663ab6 WYG |
3741 | * variable-length part of the beacon. |
3742 | */ | |
3743 | tim_idx = mgmt->u.beacon.variable - beacon; | |
3744 | ||
3745 | /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ | |
3746 | while ((tim_idx < (frame_size - 2)) && | |
e7392364 SG |
3747 | (beacon[tim_idx] != WLAN_EID_TIM)) |
3748 | tim_idx += beacon[tim_idx + 1] + 2; | |
be663ab6 WYG |
3749 | |
3750 | /* If TIM field was found, set variables */ | |
3751 | if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { | |
3752 | tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); | |
e7392364 | 3753 | tx_beacon_cmd->tim_size = beacon[tim_idx + 1]; |
be663ab6 | 3754 | } else |
9406f797 | 3755 | IL_WARN("Unable to find TIM Element in beacon\n"); |
be663ab6 WYG |
3756 | } |
3757 | ||
e7392364 SG |
3758 | static unsigned int |
3759 | il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame) | |
be663ab6 | 3760 | { |
e2ebc833 | 3761 | struct il_tx_beacon_cmd *tx_beacon_cmd; |
be663ab6 WYG |
3762 | u32 frame_size; |
3763 | u32 rate_flags; | |
3764 | u32 rate; | |
3765 | /* | |
3766 | * We have to set up the TX command, the TX Beacon command, and the | |
3767 | * beacon contents. | |
3768 | */ | |
3769 | ||
46bc8d4b | 3770 | lockdep_assert_held(&il->mutex); |
be663ab6 | 3771 | |
83007196 SG |
3772 | if (!il->beacon_enabled) { |
3773 | IL_ERR("Trying to build beacon without beaconing enabled\n"); | |
be663ab6 WYG |
3774 | return 0; |
3775 | } | |
3776 | ||
3777 | /* Initialize memory */ | |
3778 | tx_beacon_cmd = &frame->u.beacon; | |
3779 | memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); | |
3780 | ||
3781 | /* Set up TX beacon contents */ | |
e7392364 SG |
3782 | frame_size = |
3783 | il4965_fill_beacon_frame(il, tx_beacon_cmd->frame, | |
3784 | sizeof(frame->u) - sizeof(*tx_beacon_cmd)); | |
be663ab6 WYG |
3785 | if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) |
3786 | return 0; | |
3787 | if (!frame_size) | |
3788 | return 0; | |
3789 | ||
3790 | /* Set up TX command fields */ | |
e7392364 | 3791 | tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size); |
b16db50a | 3792 | tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id; |
be663ab6 | 3793 | tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; |
e7392364 SG |
3794 | tx_beacon_cmd->tx.tx_flags = |
3795 | TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK | | |
3796 | TX_CMD_FLG_STA_RATE_MSK; | |
be663ab6 WYG |
3797 | |
3798 | /* Set up TX beacon command fields */ | |
e7392364 SG |
3799 | il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame, |
3800 | frame_size); | |
be663ab6 WYG |
3801 | |
3802 | /* Set up packet rate and flags */ | |
83007196 | 3803 | rate = il_get_lowest_plcp(il); |
a0c1ef3b | 3804 | il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant); |
616107ed | 3805 | rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS; |
e2ebc833 | 3806 | if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE)) |
be663ab6 | 3807 | rate_flags |= RATE_MCS_CCK_MSK; |
616107ed | 3808 | tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags); |
be663ab6 WYG |
3809 | |
3810 | return sizeof(*tx_beacon_cmd) + frame_size; | |
3811 | } | |
3812 | ||
e7392364 SG |
3813 | int |
3814 | il4965_send_beacon_cmd(struct il_priv *il) | |
be663ab6 | 3815 | { |
e2ebc833 | 3816 | struct il_frame *frame; |
be663ab6 WYG |
3817 | unsigned int frame_size; |
3818 | int rc; | |
3819 | ||
46bc8d4b | 3820 | frame = il4965_get_free_frame(il); |
be663ab6 | 3821 | if (!frame) { |
9406f797 | 3822 | IL_ERR("Could not obtain free frame buffer for beacon " |
e7392364 | 3823 | "command.\n"); |
be663ab6 WYG |
3824 | return -ENOMEM; |
3825 | } | |
3826 | ||
46bc8d4b | 3827 | frame_size = il4965_hw_get_beacon_cmd(il, frame); |
be663ab6 | 3828 | if (!frame_size) { |
9406f797 | 3829 | IL_ERR("Error configuring the beacon command\n"); |
46bc8d4b | 3830 | il4965_free_frame(il, frame); |
be663ab6 WYG |
3831 | return -EINVAL; |
3832 | } | |
3833 | ||
e7392364 | 3834 | rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]); |
be663ab6 | 3835 | |
46bc8d4b | 3836 | il4965_free_frame(il, frame); |
be663ab6 WYG |
3837 | |
3838 | return rc; | |
3839 | } | |
3840 | ||
e7392364 SG |
3841 | static inline dma_addr_t |
3842 | il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx) | |
be663ab6 | 3843 | { |
e2ebc833 | 3844 | struct il_tfd_tb *tb = &tfd->tbs[idx]; |
be663ab6 WYG |
3845 | |
3846 | dma_addr_t addr = get_unaligned_le32(&tb->lo); | |
3847 | if (sizeof(dma_addr_t) > sizeof(u32)) | |
3848 | addr |= | |
e7392364 SG |
3849 | ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << |
3850 | 16; | |
be663ab6 WYG |
3851 | |
3852 | return addr; | |
3853 | } | |
3854 | ||
e7392364 SG |
3855 | static inline u16 |
3856 | il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx) | |
be663ab6 | 3857 | { |
e2ebc833 | 3858 | struct il_tfd_tb *tb = &tfd->tbs[idx]; |
be663ab6 WYG |
3859 | |
3860 | return le16_to_cpu(tb->hi_n_len) >> 4; | |
3861 | } | |
3862 | ||
e7392364 SG |
3863 | static inline void |
3864 | il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len) | |
be663ab6 | 3865 | { |
e2ebc833 | 3866 | struct il_tfd_tb *tb = &tfd->tbs[idx]; |
be663ab6 WYG |
3867 | u16 hi_n_len = len << 4; |
3868 | ||
3869 | put_unaligned_le32(addr, &tb->lo); | |
3870 | if (sizeof(dma_addr_t) > sizeof(u32)) | |
3871 | hi_n_len |= ((addr >> 16) >> 16) & 0xF; | |
3872 | ||
3873 | tb->hi_n_len = cpu_to_le16(hi_n_len); | |
3874 | ||
3875 | tfd->num_tbs = idx + 1; | |
3876 | } | |
3877 | ||
e7392364 SG |
3878 | static inline u8 |
3879 | il4965_tfd_get_num_tbs(struct il_tfd *tfd) | |
be663ab6 WYG |
3880 | { |
3881 | return tfd->num_tbs & 0x1f; | |
3882 | } | |
3883 | ||
3884 | /** | |
e2ebc833 | 3885 | * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] |
46bc8d4b | 3886 | * @il - driver ilate data |
be663ab6 WYG |
3887 | * @txq - tx queue |
3888 | * | |
0c2c8852 | 3889 | * Does NOT advance any TFD circular buffer read/write idxes |
be663ab6 WYG |
3890 | * Does NOT free the TFD itself (which is within circular buffer) |
3891 | */ | |
e7392364 SG |
3892 | void |
3893 | il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) | |
be663ab6 | 3894 | { |
e2ebc833 SG |
3895 | struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds; |
3896 | struct il_tfd *tfd; | |
46bc8d4b | 3897 | struct pci_dev *dev = il->pci_dev; |
0c2c8852 | 3898 | int idx = txq->q.read_ptr; |
be663ab6 WYG |
3899 | int i; |
3900 | int num_tbs; | |
3901 | ||
0c2c8852 | 3902 | tfd = &tfd_tmp[idx]; |
be663ab6 WYG |
3903 | |
3904 | /* Sanity check on number of chunks */ | |
e2ebc833 | 3905 | num_tbs = il4965_tfd_get_num_tbs(tfd); |
be663ab6 | 3906 | |
e2ebc833 | 3907 | if (num_tbs >= IL_NUM_OF_TBS) { |
9406f797 | 3908 | IL_ERR("Too many chunks: %i\n", num_tbs); |
be663ab6 WYG |
3909 | /* @todo issue fatal error, it is quite serious situation */ |
3910 | return; | |
3911 | } | |
3912 | ||
3913 | /* Unmap tx_cmd */ | |
3914 | if (num_tbs) | |
e7392364 SG |
3915 | pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), |
3916 | dma_unmap_len(&txq->meta[idx], len), | |
3917 | PCI_DMA_BIDIRECTIONAL); | |
be663ab6 WYG |
3918 | |
3919 | /* Unmap chunks, if any. */ | |
3920 | for (i = 1; i < num_tbs; i++) | |
e2ebc833 | 3921 | pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i), |
e7392364 SG |
3922 | il4965_tfd_tb_get_len(tfd, i), |
3923 | PCI_DMA_TODEVICE); | |
be663ab6 WYG |
3924 | |
3925 | /* free SKB */ | |
00ea99e1 SG |
3926 | if (txq->skbs) { |
3927 | struct sk_buff *skb = txq->skbs[txq->q.read_ptr]; | |
be663ab6 WYG |
3928 | |
3929 | /* can be called from irqs-disabled context */ | |
3930 | if (skb) { | |
3931 | dev_kfree_skb_any(skb); | |
00ea99e1 | 3932 | txq->skbs[txq->q.read_ptr] = NULL; |
be663ab6 WYG |
3933 | } |
3934 | } | |
3935 | } | |
3936 | ||
e7392364 SG |
3937 | int |
3938 | il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq, | |
3939 | dma_addr_t addr, u16 len, u8 reset, u8 pad) | |
be663ab6 | 3940 | { |
e2ebc833 SG |
3941 | struct il_queue *q; |
3942 | struct il_tfd *tfd, *tfd_tmp; | |
be663ab6 WYG |
3943 | u32 num_tbs; |
3944 | ||
3945 | q = &txq->q; | |
e2ebc833 | 3946 | tfd_tmp = (struct il_tfd *)txq->tfds; |
be663ab6 WYG |
3947 | tfd = &tfd_tmp[q->write_ptr]; |
3948 | ||
3949 | if (reset) | |
3950 | memset(tfd, 0, sizeof(*tfd)); | |
3951 | ||
e2ebc833 | 3952 | num_tbs = il4965_tfd_get_num_tbs(tfd); |
be663ab6 WYG |
3953 | |
3954 | /* Each TFD can point to a maximum 20 Tx buffers */ | |
e2ebc833 | 3955 | if (num_tbs >= IL_NUM_OF_TBS) { |
9406f797 | 3956 | IL_ERR("Error can not send more than %d chunks\n", |
e7392364 | 3957 | IL_NUM_OF_TBS); |
be663ab6 WYG |
3958 | return -EINVAL; |
3959 | } | |
3960 | ||
3961 | BUG_ON(addr & ~DMA_BIT_MASK(36)); | |
e2ebc833 | 3962 | if (unlikely(addr & ~IL_TX_DMA_MASK)) |
e7392364 | 3963 | IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr); |
be663ab6 | 3964 | |
e2ebc833 | 3965 | il4965_tfd_set_tb(tfd, num_tbs, addr, len); |
be663ab6 WYG |
3966 | |
3967 | return 0; | |
3968 | } | |
3969 | ||
3970 | /* | |
3971 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | |
3972 | * given Tx queue, and enable the DMA channel used for that queue. | |
3973 | * | |
3974 | * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA | |
3975 | * channels supported in hardware. | |
3976 | */ | |
e7392364 SG |
3977 | int |
3978 | il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq) | |
be663ab6 WYG |
3979 | { |
3980 | int txq_id = txq->q.id; | |
3981 | ||
3982 | /* Circular buffer (TFD queue in DRAM) physical base address */ | |
e7392364 | 3983 | il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8); |
be663ab6 WYG |
3984 | |
3985 | return 0; | |
3986 | } | |
3987 | ||
3988 | /****************************************************************************** | |
3989 | * | |
3990 | * Generic RX handler implementations | |
3991 | * | |
3992 | ******************************************************************************/ | |
e7392364 SG |
3993 | static void |
3994 | il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb) | |
be663ab6 | 3995 | { |
dcae1c64 | 3996 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
e2ebc833 | 3997 | struct il_alive_resp *palive; |
be663ab6 WYG |
3998 | struct delayed_work *pwork; |
3999 | ||
4000 | palive = &pkt->u.alive_frame; | |
4001 | ||
e7392364 SG |
4002 | D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n", |
4003 | palive->is_valid, palive->ver_type, palive->ver_subtype); | |
be663ab6 WYG |
4004 | |
4005 | if (palive->ver_subtype == INITIALIZE_SUBTYPE) { | |
58de00a4 | 4006 | D_INFO("Initialization Alive received.\n"); |
e7392364 | 4007 | memcpy(&il->card_alive_init, &pkt->u.alive_frame, |
e2ebc833 | 4008 | sizeof(struct il_init_alive_resp)); |
46bc8d4b | 4009 | pwork = &il->init_alive_start; |
be663ab6 | 4010 | } else { |
58de00a4 | 4011 | D_INFO("Runtime Alive received.\n"); |
46bc8d4b | 4012 | memcpy(&il->card_alive, &pkt->u.alive_frame, |
e2ebc833 | 4013 | sizeof(struct il_alive_resp)); |
46bc8d4b | 4014 | pwork = &il->alive_start; |
be663ab6 WYG |
4015 | } |
4016 | ||
4017 | /* We delay the ALIVE response by 5ms to | |
4018 | * give the HW RF Kill time to activate... */ | |
4019 | if (palive->is_valid == UCODE_VALID_OK) | |
e7392364 | 4020 | queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5)); |
be663ab6 | 4021 | else |
9406f797 | 4022 | IL_WARN("uCode did not respond OK.\n"); |
be663ab6 WYG |
4023 | } |
4024 | ||
4025 | /** | |
ebf0d90d | 4026 | * il4965_bg_stats_periodic - Timer callback to queue stats |
be663ab6 | 4027 | * |
ebf0d90d | 4028 | * This callback is provided in order to send a stats request. |
be663ab6 WYG |
4029 | * |
4030 | * This timer function is continually reset to execute within | |
527901d0 SG |
4031 | * 60 seconds since the last N_STATS was received. We need to |
4032 | * ensure we receive the stats in order to update the temperature | |
4033 | * used for calibrating the TXPOWER. | |
be663ab6 | 4034 | */ |
e7392364 SG |
4035 | static void |
4036 | il4965_bg_stats_periodic(unsigned long data) | |
be663ab6 | 4037 | { |
46bc8d4b | 4038 | struct il_priv *il = (struct il_priv *)data; |
be663ab6 | 4039 | |
a6766ccd | 4040 | if (test_bit(S_EXIT_PENDING, &il->status)) |
be663ab6 WYG |
4041 | return; |
4042 | ||
4043 | /* dont send host command if rf-kill is on */ | |
46bc8d4b | 4044 | if (!il_is_ready_rf(il)) |
be663ab6 WYG |
4045 | return; |
4046 | ||
ebf0d90d | 4047 | il_send_stats_request(il, CMD_ASYNC, false); |
be663ab6 WYG |
4048 | } |
4049 | ||
e7392364 SG |
4050 | static void |
4051 | il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb) | |
be663ab6 | 4052 | { |
dcae1c64 | 4053 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
e2ebc833 | 4054 | struct il4965_beacon_notif *beacon = |
e7392364 | 4055 | (struct il4965_beacon_notif *)pkt->u.raw; |
d3175167 | 4056 | #ifdef CONFIG_IWLEGACY_DEBUG |
e2ebc833 | 4057 | u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); |
be663ab6 | 4058 | |
5bf0dac4 | 4059 | D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n", |
e7392364 SG |
4060 | le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, |
4061 | beacon->beacon_notify_hdr.failure_frame, | |
4062 | le32_to_cpu(beacon->ibss_mgr_status), | |
4063 | le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate); | |
be663ab6 | 4064 | #endif |
46bc8d4b | 4065 | il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); |
be663ab6 WYG |
4066 | } |
4067 | ||
e7392364 SG |
4068 | static void |
4069 | il4965_perform_ct_kill_task(struct il_priv *il) | |
be663ab6 WYG |
4070 | { |
4071 | unsigned long flags; | |
4072 | ||
58de00a4 | 4073 | D_POWER("Stop all queues\n"); |
be663ab6 | 4074 | |
46bc8d4b SG |
4075 | if (il->mac80211_registered) |
4076 | ieee80211_stop_queues(il->hw); | |
be663ab6 | 4077 | |
841b2cca | 4078 | _il_wr(il, CSR_UCODE_DRV_GP1_SET, |
e7392364 | 4079 | CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); |
841b2cca | 4080 | _il_rd(il, CSR_UCODE_DRV_GP1); |
be663ab6 | 4081 | |
46bc8d4b | 4082 | spin_lock_irqsave(&il->reg_lock, flags); |
1e0f32a4 | 4083 | if (likely(_il_grab_nic_access(il))) |
13882269 | 4084 | _il_release_nic_access(il); |
46bc8d4b | 4085 | spin_unlock_irqrestore(&il->reg_lock, flags); |
be663ab6 WYG |
4086 | } |
4087 | ||
4088 | /* Handle notification from uCode that card's power state is changing | |
4089 | * due to software, hardware, or critical temperature RFKILL */ | |
e7392364 SG |
4090 | static void |
4091 | il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb) | |
be663ab6 | 4092 | { |
dcae1c64 | 4093 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
be663ab6 | 4094 | u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); |
46bc8d4b | 4095 | unsigned long status = il->status; |
be663ab6 | 4096 | |
58de00a4 | 4097 | D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n", |
e7392364 SG |
4098 | (flags & HW_CARD_DISABLED) ? "Kill" : "On", |
4099 | (flags & SW_CARD_DISABLED) ? "Kill" : "On", | |
4100 | (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached"); | |
be663ab6 | 4101 | |
e7392364 | 4102 | if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) { |
be663ab6 | 4103 | |
841b2cca | 4104 | _il_wr(il, CSR_UCODE_DRV_GP1_SET, |
e7392364 | 4105 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); |
be663ab6 | 4106 | |
e7392364 | 4107 | il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); |
be663ab6 WYG |
4108 | |
4109 | if (!(flags & RXON_CARD_DISABLED)) { | |
841b2cca | 4110 | _il_wr(il, CSR_UCODE_DRV_GP1_CLR, |
e7392364 | 4111 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); |
0c1a94e2 | 4112 | il_wr(il, HBUS_TARG_MBX_C, |
e7392364 | 4113 | HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); |
be663ab6 WYG |
4114 | } |
4115 | } | |
4116 | ||
4117 | if (flags & CT_CARD_DISABLED) | |
46bc8d4b | 4118 | il4965_perform_ct_kill_task(il); |
be663ab6 WYG |
4119 | |
4120 | if (flags & HW_CARD_DISABLED) | |
a6766ccd | 4121 | set_bit(S_RF_KILL_HW, &il->status); |
be663ab6 | 4122 | else |
a6766ccd | 4123 | clear_bit(S_RF_KILL_HW, &il->status); |
be663ab6 WYG |
4124 | |
4125 | if (!(flags & RXON_CARD_DISABLED)) | |
46bc8d4b | 4126 | il_scan_cancel(il); |
be663ab6 | 4127 | |
a6766ccd SG |
4128 | if ((test_bit(S_RF_KILL_HW, &status) != |
4129 | test_bit(S_RF_KILL_HW, &il->status))) | |
46bc8d4b | 4130 | wiphy_rfkill_set_hw_state(il->hw->wiphy, |
e7392364 | 4131 | test_bit(S_RF_KILL_HW, &il->status)); |
be663ab6 | 4132 | else |
46bc8d4b | 4133 | wake_up(&il->wait_command_queue); |
be663ab6 WYG |
4134 | } |
4135 | ||
4136 | /** | |
d0c72347 | 4137 | * il4965_setup_handlers - Initialize Rx handler callbacks |
be663ab6 WYG |
4138 | * |
4139 | * Setup the RX handlers for each of the reply types sent from the uCode | |
4140 | * to the host. | |
4141 | * | |
4142 | * This function chains into the hardware specific files for them to setup | |
4143 | * any hardware specific handlers as well. | |
4144 | */ | |
e7392364 SG |
4145 | static void |
4146 | il4965_setup_handlers(struct il_priv *il) | |
be663ab6 | 4147 | { |
6e9848b4 SG |
4148 | il->handlers[N_ALIVE] = il4965_hdl_alive; |
4149 | il->handlers[N_ERROR] = il_hdl_error; | |
d2dfb33e | 4150 | il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa; |
e7392364 | 4151 | il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement; |
d2dfb33e | 4152 | il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep; |
e7392364 | 4153 | il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats; |
d2dfb33e | 4154 | il->handlers[N_BEACON] = il4965_hdl_beacon; |
be663ab6 WYG |
4155 | |
4156 | /* | |
4157 | * The same handler is used for both the REPLY to a discrete | |
ebf0d90d SG |
4158 | * stats request from the host as well as for the periodic |
4159 | * stats notifications (after received beacons) from the uCode. | |
be663ab6 | 4160 | */ |
d2dfb33e SG |
4161 | il->handlers[C_STATS] = il4965_hdl_c_stats; |
4162 | il->handlers[N_STATS] = il4965_hdl_stats; | |
be663ab6 | 4163 | |
46bc8d4b | 4164 | il_setup_rx_scan_handlers(il); |
be663ab6 WYG |
4165 | |
4166 | /* status change handler */ | |
e7392364 | 4167 | il->handlers[N_CARD_STATE] = il4965_hdl_card_state; |
be663ab6 | 4168 | |
e7392364 | 4169 | il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon; |
be663ab6 | 4170 | /* Rx handlers */ |
6e9848b4 SG |
4171 | il->handlers[N_RX_PHY] = il4965_hdl_rx_phy; |
4172 | il->handlers[N_RX_MPDU] = il4965_hdl_rx; | |
3dfea27d | 4173 | il->handlers[N_RX] = il4965_hdl_rx; |
be663ab6 | 4174 | /* block ack */ |
6e9848b4 | 4175 | il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba; |
3dfea27d SG |
4176 | /* Tx response */ |
4177 | il->handlers[C_TX] = il4965_hdl_tx; | |
be663ab6 WYG |
4178 | } |
4179 | ||
4180 | /** | |
e2ebc833 | 4181 | * il4965_rx_handle - Main entry function for receiving responses from uCode |
be663ab6 | 4182 | * |
d0c72347 | 4183 | * Uses the il->handlers callback function array to invoke |
be663ab6 WYG |
4184 | * the appropriate handlers, including command responses, |
4185 | * frame-received notifications, and other notifications. | |
4186 | */ | |
e7392364 SG |
4187 | void |
4188 | il4965_rx_handle(struct il_priv *il) | |
be663ab6 | 4189 | { |
b73bb5f1 | 4190 | struct il_rx_buf *rxb; |
dcae1c64 | 4191 | struct il_rx_pkt *pkt; |
46bc8d4b | 4192 | struct il_rx_queue *rxq = &il->rxq; |
be663ab6 WYG |
4193 | u32 r, i; |
4194 | int reclaim; | |
4195 | unsigned long flags; | |
4196 | u8 fill_rx = 0; | |
4197 | u32 count = 8; | |
4198 | int total_empty; | |
4199 | ||
0c2c8852 | 4200 | /* uCode's read idx (stored in shared DRAM) indicates the last Rx |
be663ab6 | 4201 | * buffer that the driver may process (last buffer filled by ucode). */ |
e7392364 | 4202 | r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; |
be663ab6 WYG |
4203 | i = rxq->read; |
4204 | ||
4205 | /* Rx interrupt, but nothing sent from uCode */ | |
4206 | if (i == r) | |
58de00a4 | 4207 | D_RX("r = %d, i = %d\n", r, i); |
be663ab6 WYG |
4208 | |
4209 | /* calculate total frames need to be restock after handling RX */ | |
4210 | total_empty = r - rxq->write_actual; | |
4211 | if (total_empty < 0) | |
4212 | total_empty += RX_QUEUE_SIZE; | |
4213 | ||
4214 | if (total_empty > (RX_QUEUE_SIZE / 2)) | |
4215 | fill_rx = 1; | |
4216 | ||
4217 | while (i != r) { | |
4218 | int len; | |
4219 | ||
4220 | rxb = rxq->queue[i]; | |
4221 | ||
4222 | /* If an RXB doesn't have a Rx queue slot associated with it, | |
4223 | * then a bug has been introduced in the queue refilling | |
4224 | * routines -- catch it here */ | |
4225 | BUG_ON(rxb == NULL); | |
4226 | ||
4227 | rxq->queue[i] = NULL; | |
4228 | ||
46bc8d4b SG |
4229 | pci_unmap_page(il->pci_dev, rxb->page_dma, |
4230 | PAGE_SIZE << il->hw_params.rx_page_order, | |
be663ab6 WYG |
4231 | PCI_DMA_FROMDEVICE); |
4232 | pkt = rxb_addr(rxb); | |
4233 | ||
e94a4099 | 4234 | len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; |
e7392364 | 4235 | len += sizeof(u32); /* account for status word */ |
be663ab6 WYG |
4236 | |
4237 | /* Reclaim a command buffer only if this packet is a response | |
4238 | * to a (driver-originated) command. | |
4239 | * If the packet (e.g. Rx frame) originated from uCode, | |
4240 | * there is no command buffer to reclaim. | |
4241 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | |
4242 | * but apparently a few don't get set; catch them here. */ | |
4243 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && | |
e7392364 SG |
4244 | (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) && |
4245 | (pkt->hdr.cmd != N_RX_MPDU) && | |
4246 | (pkt->hdr.cmd != N_COMPRESSED_BA) && | |
4247 | (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX); | |
be663ab6 WYG |
4248 | |
4249 | /* Based on type of command response or notification, | |
4250 | * handle those that need handling via function in | |
d0c72347 SG |
4251 | * handlers table. See il4965_setup_handlers() */ |
4252 | if (il->handlers[pkt->hdr.cmd]) { | |
e7392364 SG |
4253 | D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i, |
4254 | il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); | |
d0c72347 SG |
4255 | il->isr_stats.handlers[pkt->hdr.cmd]++; |
4256 | il->handlers[pkt->hdr.cmd] (il, rxb); | |
be663ab6 WYG |
4257 | } else { |
4258 | /* No handling needed */ | |
e7392364 SG |
4259 | D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r, |
4260 | i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); | |
be663ab6 WYG |
4261 | } |
4262 | ||
4263 | /* | |
4264 | * XXX: After here, we should always check rxb->page | |
4265 | * against NULL before touching it or its virtual | |
d0c72347 | 4266 | * memory (pkt). Because some handler might have |
be663ab6 WYG |
4267 | * already taken or freed the pages. |
4268 | */ | |
4269 | ||
4270 | if (reclaim) { | |
4271 | /* Invoke any callbacks, transfer the buffer to caller, | |
e2ebc833 | 4272 | * and fire off the (possibly) blocking il_send_cmd() |
be663ab6 WYG |
4273 | * as we reclaim the driver command queue */ |
4274 | if (rxb->page) | |
46bc8d4b | 4275 | il_tx_cmd_complete(il, rxb); |
be663ab6 | 4276 | else |
9406f797 | 4277 | IL_WARN("Claim null rxb?\n"); |
be663ab6 WYG |
4278 | } |
4279 | ||
4280 | /* Reuse the page if possible. For notification packets and | |
4281 | * SKBs that fail to Rx correctly, add them back into the | |
4282 | * rx_free list for reuse later. */ | |
4283 | spin_lock_irqsave(&rxq->lock, flags); | |
4284 | if (rxb->page != NULL) { | |
e7392364 SG |
4285 | rxb->page_dma = |
4286 | pci_map_page(il->pci_dev, rxb->page, 0, | |
4287 | PAGE_SIZE << il->hw_params. | |
4288 | rx_page_order, PCI_DMA_FROMDEVICE); | |
be663ab6 WYG |
4289 | list_add_tail(&rxb->list, &rxq->rx_free); |
4290 | rxq->free_count++; | |
4291 | } else | |
4292 | list_add_tail(&rxb->list, &rxq->rx_used); | |
4293 | ||
4294 | spin_unlock_irqrestore(&rxq->lock, flags); | |
4295 | ||
4296 | i = (i + 1) & RX_QUEUE_MASK; | |
4297 | /* If there are a lot of unused frames, | |
4298 | * restock the Rx queue so ucode wont assert. */ | |
4299 | if (fill_rx) { | |
4300 | count++; | |
4301 | if (count >= 8) { | |
4302 | rxq->read = i; | |
46bc8d4b | 4303 | il4965_rx_replenish_now(il); |
be663ab6 WYG |
4304 | count = 0; |
4305 | } | |
4306 | } | |
4307 | } | |
4308 | ||
4309 | /* Backtrack one entry */ | |
4310 | rxq->read = i; | |
4311 | if (fill_rx) | |
46bc8d4b | 4312 | il4965_rx_replenish_now(il); |
be663ab6 | 4313 | else |
46bc8d4b | 4314 | il4965_rx_queue_restock(il); |
be663ab6 WYG |
4315 | } |
4316 | ||
4317 | /* call this function to flush any scheduled tasklet */ | |
e7392364 SG |
4318 | static inline void |
4319 | il4965_synchronize_irq(struct il_priv *il) | |
be663ab6 | 4320 | { |
e7392364 | 4321 | /* wait to make sure we flush pending tasklet */ |
46bc8d4b SG |
4322 | synchronize_irq(il->pci_dev->irq); |
4323 | tasklet_kill(&il->irq_tasklet); | |
be663ab6 WYG |
4324 | } |
4325 | ||
e7392364 SG |
4326 | static void |
4327 | il4965_irq_tasklet(struct il_priv *il) | |
be663ab6 WYG |
4328 | { |
4329 | u32 inta, handled = 0; | |
4330 | u32 inta_fh; | |
4331 | unsigned long flags; | |
4332 | u32 i; | |
d3175167 | 4333 | #ifdef CONFIG_IWLEGACY_DEBUG |
be663ab6 WYG |
4334 | u32 inta_mask; |
4335 | #endif | |
4336 | ||
46bc8d4b | 4337 | spin_lock_irqsave(&il->lock, flags); |
be663ab6 WYG |
4338 | |
4339 | /* Ack/clear/reset pending uCode interrupts. | |
4340 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | |
4341 | * and will clear only when CSR_FH_INT_STATUS gets cleared. */ | |
841b2cca SG |
4342 | inta = _il_rd(il, CSR_INT); |
4343 | _il_wr(il, CSR_INT, inta); | |
be663ab6 WYG |
4344 | |
4345 | /* Ack/clear/reset pending flow-handler (DMA) interrupts. | |
4346 | * Any new interrupts that happen after this, either while we're | |
4347 | * in this tasklet, or later, will show up in next ISR/tasklet. */ | |
841b2cca SG |
4348 | inta_fh = _il_rd(il, CSR_FH_INT_STATUS); |
4349 | _il_wr(il, CSR_FH_INT_STATUS, inta_fh); | |
be663ab6 | 4350 | |
d3175167 | 4351 | #ifdef CONFIG_IWLEGACY_DEBUG |
46bc8d4b | 4352 | if (il_get_debug_level(il) & IL_DL_ISR) { |
be663ab6 | 4353 | /* just for debug */ |
841b2cca | 4354 | inta_mask = _il_rd(il, CSR_INT_MASK); |
e7392364 SG |
4355 | D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, |
4356 | inta_mask, inta_fh); | |
be663ab6 WYG |
4357 | } |
4358 | #endif | |
4359 | ||
46bc8d4b | 4360 | spin_unlock_irqrestore(&il->lock, flags); |
be663ab6 WYG |
4361 | |
4362 | /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not | |
4363 | * atomic, make sure that inta covers all the interrupts that | |
4364 | * we've discovered, even if FH interrupt came in just after | |
4365 | * reading CSR_INT. */ | |
4366 | if (inta_fh & CSR49_FH_INT_RX_MASK) | |
4367 | inta |= CSR_INT_BIT_FH_RX; | |
4368 | if (inta_fh & CSR49_FH_INT_TX_MASK) | |
4369 | inta |= CSR_INT_BIT_FH_TX; | |
4370 | ||
4371 | /* Now service all interrupt bits discovered above. */ | |
4372 | if (inta & CSR_INT_BIT_HW_ERR) { | |
9406f797 | 4373 | IL_ERR("Hardware error detected. Restarting.\n"); |
be663ab6 WYG |
4374 | |
4375 | /* Tell the device to stop sending interrupts */ | |
46bc8d4b | 4376 | il_disable_interrupts(il); |
be663ab6 | 4377 | |
46bc8d4b SG |
4378 | il->isr_stats.hw++; |
4379 | il_irq_handle_error(il); | |
be663ab6 WYG |
4380 | |
4381 | handled |= CSR_INT_BIT_HW_ERR; | |
4382 | ||
4383 | return; | |
4384 | } | |
d3175167 | 4385 | #ifdef CONFIG_IWLEGACY_DEBUG |
46bc8d4b | 4386 | if (il_get_debug_level(il) & (IL_DL_ISR)) { |
be663ab6 WYG |
4387 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ |
4388 | if (inta & CSR_INT_BIT_SCD) { | |
58de00a4 | 4389 | D_ISR("Scheduler finished to transmit " |
e7392364 | 4390 | "the frame/frames.\n"); |
46bc8d4b | 4391 | il->isr_stats.sch++; |
be663ab6 WYG |
4392 | } |
4393 | ||
4394 | /* Alive notification via Rx interrupt will do the real work */ | |
4395 | if (inta & CSR_INT_BIT_ALIVE) { | |
58de00a4 | 4396 | D_ISR("Alive interrupt\n"); |
46bc8d4b | 4397 | il->isr_stats.alive++; |
be663ab6 WYG |
4398 | } |
4399 | } | |
4400 | #endif | |
4401 | /* Safely ignore these bits for debug checks below */ | |
4402 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | |
4403 | ||
4404 | /* HW RF KILL switch toggled */ | |
4405 | if (inta & CSR_INT_BIT_RF_KILL) { | |
4406 | int hw_rf_kill = 0; | |
c9363551 SG |
4407 | |
4408 | if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) | |
be663ab6 WYG |
4409 | hw_rf_kill = 1; |
4410 | ||
9406f797 | 4411 | IL_WARN("RF_KILL bit toggled to %s.\n", |
e7392364 | 4412 | hw_rf_kill ? "disable radio" : "enable radio"); |
be663ab6 | 4413 | |
46bc8d4b | 4414 | il->isr_stats.rfkill++; |
be663ab6 WYG |
4415 | |
4416 | /* driver only loads ucode once setting the interface up. | |
4417 | * the driver allows loading the ucode even if the radio | |
4418 | * is killed. Hence update the killswitch state here. The | |
4419 | * rfkill handler will care about restarting if needed. | |
4420 | */ | |
a6766ccd | 4421 | if (!test_bit(S_ALIVE, &il->status)) { |
be663ab6 | 4422 | if (hw_rf_kill) |
a6766ccd | 4423 | set_bit(S_RF_KILL_HW, &il->status); |
be663ab6 | 4424 | else |
a6766ccd | 4425 | clear_bit(S_RF_KILL_HW, &il->status); |
46bc8d4b | 4426 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); |
be663ab6 WYG |
4427 | } |
4428 | ||
4429 | handled |= CSR_INT_BIT_RF_KILL; | |
4430 | } | |
4431 | ||
4432 | /* Chip got too hot and stopped itself */ | |
4433 | if (inta & CSR_INT_BIT_CT_KILL) { | |
9406f797 | 4434 | IL_ERR("Microcode CT kill error detected.\n"); |
46bc8d4b | 4435 | il->isr_stats.ctkill++; |
be663ab6 WYG |
4436 | handled |= CSR_INT_BIT_CT_KILL; |
4437 | } | |
4438 | ||
4439 | /* Error detected by uCode */ | |
4440 | if (inta & CSR_INT_BIT_SW_ERR) { | |
e7392364 SG |
4441 | IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n", |
4442 | inta); | |
46bc8d4b SG |
4443 | il->isr_stats.sw++; |
4444 | il_irq_handle_error(il); | |
be663ab6 WYG |
4445 | handled |= CSR_INT_BIT_SW_ERR; |
4446 | } | |
4447 | ||
4448 | /* | |
4449 | * uCode wakes up after power-down sleep. | |
4450 | * Tell device about any new tx or host commands enqueued, | |
4451 | * and about any Rx buffers made available while asleep. | |
4452 | */ | |
4453 | if (inta & CSR_INT_BIT_WAKEUP) { | |
58de00a4 | 4454 | D_ISR("Wakeup interrupt\n"); |
46bc8d4b SG |
4455 | il_rx_queue_update_write_ptr(il, &il->rxq); |
4456 | for (i = 0; i < il->hw_params.max_txq_num; i++) | |
4457 | il_txq_update_write_ptr(il, &il->txq[i]); | |
4458 | il->isr_stats.wakeup++; | |
be663ab6 WYG |
4459 | handled |= CSR_INT_BIT_WAKEUP; |
4460 | } | |
4461 | ||
4462 | /* All uCode command responses, including Tx command responses, | |
4463 | * Rx "responses" (frame-received notification), and other | |
4464 | * notifications from uCode come through here*/ | |
4465 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { | |
46bc8d4b SG |
4466 | il4965_rx_handle(il); |
4467 | il->isr_stats.rx++; | |
be663ab6 WYG |
4468 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); |
4469 | } | |
4470 | ||
4471 | /* This "Tx" DMA channel is used only for loading uCode */ | |
4472 | if (inta & CSR_INT_BIT_FH_TX) { | |
58de00a4 | 4473 | D_ISR("uCode load interrupt\n"); |
46bc8d4b | 4474 | il->isr_stats.tx++; |
be663ab6 WYG |
4475 | handled |= CSR_INT_BIT_FH_TX; |
4476 | /* Wake up uCode load routine, now that load is complete */ | |
46bc8d4b SG |
4477 | il->ucode_write_complete = 1; |
4478 | wake_up(&il->wait_command_queue); | |
be663ab6 WYG |
4479 | } |
4480 | ||
4481 | if (inta & ~handled) { | |
9406f797 | 4482 | IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled); |
46bc8d4b | 4483 | il->isr_stats.unhandled++; |
be663ab6 WYG |
4484 | } |
4485 | ||
46bc8d4b | 4486 | if (inta & ~(il->inta_mask)) { |
9406f797 | 4487 | IL_WARN("Disabled INTA bits 0x%08x were pending\n", |
e7392364 | 4488 | inta & ~il->inta_mask); |
9a95b370 | 4489 | IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh); |
be663ab6 WYG |
4490 | } |
4491 | ||
4492 | /* Re-enable all interrupts */ | |
93fd74e3 | 4493 | /* only Re-enable if disabled by irq */ |
a6766ccd | 4494 | if (test_bit(S_INT_ENABLED, &il->status)) |
46bc8d4b | 4495 | il_enable_interrupts(il); |
a078a1fd SG |
4496 | /* Re-enable RF_KILL if it occurred */ |
4497 | else if (handled & CSR_INT_BIT_RF_KILL) | |
46bc8d4b | 4498 | il_enable_rfkill_int(il); |
be663ab6 | 4499 | |
d3175167 | 4500 | #ifdef CONFIG_IWLEGACY_DEBUG |
46bc8d4b | 4501 | if (il_get_debug_level(il) & (IL_DL_ISR)) { |
841b2cca SG |
4502 | inta = _il_rd(il, CSR_INT); |
4503 | inta_mask = _il_rd(il, CSR_INT_MASK); | |
4504 | inta_fh = _il_rd(il, CSR_FH_INT_STATUS); | |
e7392364 SG |
4505 | D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " |
4506 | "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); | |
be663ab6 WYG |
4507 | } |
4508 | #endif | |
4509 | } | |
4510 | ||
4511 | /***************************************************************************** | |
4512 | * | |
4513 | * sysfs attributes | |
4514 | * | |
4515 | *****************************************************************************/ | |
4516 | ||
d3175167 | 4517 | #ifdef CONFIG_IWLEGACY_DEBUG |
be663ab6 WYG |
4518 | |
4519 | /* | |
4520 | * The following adds a new attribute to the sysfs representation | |
4521 | * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) | |
4522 | * used for controlling the debug level. | |
4523 | * | |
4524 | * See the level definitions in iwl for details. | |
4525 | * | |
4526 | * The debug_level being managed using sysfs below is a per device debug | |
4527 | * level that is used instead of the global debug level if it (the per | |
4528 | * device debug level) is set. | |
4529 | */ | |
e7392364 SG |
4530 | static ssize_t |
4531 | il4965_show_debug_level(struct device *d, struct device_attribute *attr, | |
4532 | char *buf) | |
be663ab6 | 4533 | { |
46bc8d4b SG |
4534 | struct il_priv *il = dev_get_drvdata(d); |
4535 | return sprintf(buf, "0x%08X\n", il_get_debug_level(il)); | |
be663ab6 | 4536 | } |
e7392364 SG |
4537 | |
4538 | static ssize_t | |
4539 | il4965_store_debug_level(struct device *d, struct device_attribute *attr, | |
4540 | const char *buf, size_t count) | |
be663ab6 | 4541 | { |
46bc8d4b | 4542 | struct il_priv *il = dev_get_drvdata(d); |
be663ab6 WYG |
4543 | unsigned long val; |
4544 | int ret; | |
4545 | ||
4546 | ret = strict_strtoul(buf, 0, &val); | |
4547 | if (ret) | |
9406f797 | 4548 | IL_ERR("%s is not in hex or decimal form.\n", buf); |
288f9954 | 4549 | else |
46bc8d4b | 4550 | il->debug_level = val; |
288f9954 | 4551 | |
be663ab6 WYG |
4552 | return strnlen(buf, count); |
4553 | } | |
4554 | ||
e7392364 SG |
4555 | static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level, |
4556 | il4965_store_debug_level); | |
be663ab6 | 4557 | |
d3175167 | 4558 | #endif /* CONFIG_IWLEGACY_DEBUG */ |
be663ab6 | 4559 | |
e7392364 SG |
4560 | static ssize_t |
4561 | il4965_show_temperature(struct device *d, struct device_attribute *attr, | |
4562 | char *buf) | |
be663ab6 | 4563 | { |
46bc8d4b | 4564 | struct il_priv *il = dev_get_drvdata(d); |
be663ab6 | 4565 | |
46bc8d4b | 4566 | if (!il_is_alive(il)) |
be663ab6 WYG |
4567 | return -EAGAIN; |
4568 | ||
46bc8d4b | 4569 | return sprintf(buf, "%d\n", il->temperature); |
be663ab6 WYG |
4570 | } |
4571 | ||
e2ebc833 | 4572 | static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL); |
be663ab6 | 4573 | |
e7392364 SG |
4574 | static ssize_t |
4575 | il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) | |
be663ab6 | 4576 | { |
46bc8d4b | 4577 | struct il_priv *il = dev_get_drvdata(d); |
be663ab6 | 4578 | |
46bc8d4b | 4579 | if (!il_is_ready_rf(il)) |
be663ab6 WYG |
4580 | return sprintf(buf, "off\n"); |
4581 | else | |
46bc8d4b | 4582 | return sprintf(buf, "%d\n", il->tx_power_user_lmt); |
be663ab6 WYG |
4583 | } |
4584 | ||
e7392364 SG |
4585 | static ssize_t |
4586 | il4965_store_tx_power(struct device *d, struct device_attribute *attr, | |
4587 | const char *buf, size_t count) | |
be663ab6 | 4588 | { |
46bc8d4b | 4589 | struct il_priv *il = dev_get_drvdata(d); |
be663ab6 WYG |
4590 | unsigned long val; |
4591 | int ret; | |
4592 | ||
4593 | ret = strict_strtoul(buf, 10, &val); | |
4594 | if (ret) | |
9406f797 | 4595 | IL_INFO("%s is not in decimal form.\n", buf); |
be663ab6 | 4596 | else { |
46bc8d4b | 4597 | ret = il_set_tx_power(il, val, false); |
be663ab6 | 4598 | if (ret) |
e7392364 | 4599 | IL_ERR("failed setting tx power (0x%d).\n", ret); |
be663ab6 WYG |
4600 | else |
4601 | ret = count; | |
4602 | } | |
4603 | return ret; | |
4604 | } | |
4605 | ||
e7392364 SG |
4606 | static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power, |
4607 | il4965_store_tx_power); | |
be663ab6 | 4608 | |
e2ebc833 | 4609 | static struct attribute *il_sysfs_entries[] = { |
be663ab6 WYG |
4610 | &dev_attr_temperature.attr, |
4611 | &dev_attr_tx_power.attr, | |
d3175167 | 4612 | #ifdef CONFIG_IWLEGACY_DEBUG |
be663ab6 WYG |
4613 | &dev_attr_debug_level.attr, |
4614 | #endif | |
4615 | NULL | |
4616 | }; | |
4617 | ||
e2ebc833 | 4618 | static struct attribute_group il_attribute_group = { |
be663ab6 | 4619 | .name = NULL, /* put in device directory */ |
e2ebc833 | 4620 | .attrs = il_sysfs_entries, |
be663ab6 WYG |
4621 | }; |
4622 | ||
4623 | /****************************************************************************** | |
4624 | * | |
4625 | * uCode download functions | |
4626 | * | |
4627 | ******************************************************************************/ | |
4628 | ||
e7392364 SG |
4629 | static void |
4630 | il4965_dealloc_ucode_pci(struct il_priv *il) | |
be663ab6 | 4631 | { |
46bc8d4b SG |
4632 | il_free_fw_desc(il->pci_dev, &il->ucode_code); |
4633 | il_free_fw_desc(il->pci_dev, &il->ucode_data); | |
4634 | il_free_fw_desc(il->pci_dev, &il->ucode_data_backup); | |
4635 | il_free_fw_desc(il->pci_dev, &il->ucode_init); | |
4636 | il_free_fw_desc(il->pci_dev, &il->ucode_init_data); | |
4637 | il_free_fw_desc(il->pci_dev, &il->ucode_boot); | |
be663ab6 WYG |
4638 | } |
4639 | ||
e7392364 SG |
4640 | static void |
4641 | il4965_nic_start(struct il_priv *il) | |
be663ab6 WYG |
4642 | { |
4643 | /* Remove all resets to allow NIC to operate */ | |
841b2cca | 4644 | _il_wr(il, CSR_RESET, 0); |
be663ab6 WYG |
4645 | } |
4646 | ||
e2ebc833 | 4647 | static void il4965_ucode_callback(const struct firmware *ucode_raw, |
e7392364 SG |
4648 | void *context); |
4649 | static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length); | |
be663ab6 | 4650 | |
e7392364 SG |
4651 | static int __must_check |
4652 | il4965_request_firmware(struct il_priv *il, bool first) | |
be663ab6 | 4653 | { |
46bc8d4b | 4654 | const char *name_pre = il->cfg->fw_name_pre; |
be663ab6 WYG |
4655 | char tag[8]; |
4656 | ||
4657 | if (first) { | |
0c2c8852 SG |
4658 | il->fw_idx = il->cfg->ucode_api_max; |
4659 | sprintf(tag, "%d", il->fw_idx); | |
be663ab6 | 4660 | } else { |
0c2c8852 SG |
4661 | il->fw_idx--; |
4662 | sprintf(tag, "%d", il->fw_idx); | |
be663ab6 WYG |
4663 | } |
4664 | ||
0c2c8852 | 4665 | if (il->fw_idx < il->cfg->ucode_api_min) { |
9406f797 | 4666 | IL_ERR("no suitable firmware found!\n"); |
be663ab6 WYG |
4667 | return -ENOENT; |
4668 | } | |
4669 | ||
46bc8d4b | 4670 | sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode"); |
be663ab6 | 4671 | |
e7392364 | 4672 | D_INFO("attempting to load firmware '%s'\n", il->firmware_name); |
be663ab6 | 4673 | |
46bc8d4b SG |
4674 | return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name, |
4675 | &il->pci_dev->dev, GFP_KERNEL, il, | |
e2ebc833 | 4676 | il4965_ucode_callback); |
be663ab6 WYG |
4677 | } |
4678 | ||
e2ebc833 | 4679 | struct il4965_firmware_pieces { |
be663ab6 WYG |
4680 | const void *inst, *data, *init, *init_data, *boot; |
4681 | size_t inst_size, data_size, init_size, init_data_size, boot_size; | |
4682 | }; | |
4683 | ||
e7392364 SG |
4684 | static int |
4685 | il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw, | |
4686 | struct il4965_firmware_pieces *pieces) | |
be663ab6 | 4687 | { |
e2ebc833 | 4688 | struct il_ucode_header *ucode = (void *)ucode_raw->data; |
be663ab6 WYG |
4689 | u32 api_ver, hdr_size; |
4690 | const u8 *src; | |
4691 | ||
46bc8d4b SG |
4692 | il->ucode_ver = le32_to_cpu(ucode->ver); |
4693 | api_ver = IL_UCODE_API(il->ucode_ver); | |
be663ab6 WYG |
4694 | |
4695 | switch (api_ver) { | |
4696 | default: | |
4697 | case 0: | |
4698 | case 1: | |
4699 | case 2: | |
4700 | hdr_size = 24; | |
4701 | if (ucode_raw->size < hdr_size) { | |
9406f797 | 4702 | IL_ERR("File size too small!\n"); |
be663ab6 WYG |
4703 | return -EINVAL; |
4704 | } | |
4705 | pieces->inst_size = le32_to_cpu(ucode->v1.inst_size); | |
4706 | pieces->data_size = le32_to_cpu(ucode->v1.data_size); | |
4707 | pieces->init_size = le32_to_cpu(ucode->v1.init_size); | |
e7392364 | 4708 | pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size); |
be663ab6 WYG |
4709 | pieces->boot_size = le32_to_cpu(ucode->v1.boot_size); |
4710 | src = ucode->v1.data; | |
4711 | break; | |
4712 | } | |
4713 | ||
4714 | /* Verify size of file vs. image size info in file's header */ | |
e7392364 SG |
4715 | if (ucode_raw->size != |
4716 | hdr_size + pieces->inst_size + pieces->data_size + | |
4717 | pieces->init_size + pieces->init_data_size + pieces->boot_size) { | |
be663ab6 | 4718 | |
e7392364 SG |
4719 | IL_ERR("uCode file size %d does not match expected size\n", |
4720 | (int)ucode_raw->size); | |
be663ab6 WYG |
4721 | return -EINVAL; |
4722 | } | |
4723 | ||
4724 | pieces->inst = src; | |
4725 | src += pieces->inst_size; | |
4726 | pieces->data = src; | |
4727 | src += pieces->data_size; | |
4728 | pieces->init = src; | |
4729 | src += pieces->init_size; | |
4730 | pieces->init_data = src; | |
4731 | src += pieces->init_data_size; | |
4732 | pieces->boot = src; | |
4733 | src += pieces->boot_size; | |
4734 | ||
4735 | return 0; | |
4736 | } | |
4737 | ||
4738 | /** | |
e2ebc833 | 4739 | * il4965_ucode_callback - callback when firmware was loaded |
be663ab6 WYG |
4740 | * |
4741 | * If loaded successfully, copies the firmware into buffers | |
4742 | * for the card to fetch (via DMA). | |
4743 | */ | |
4744 | static void | |
e2ebc833 | 4745 | il4965_ucode_callback(const struct firmware *ucode_raw, void *context) |
be663ab6 | 4746 | { |
46bc8d4b | 4747 | struct il_priv *il = context; |
e2ebc833 | 4748 | struct il_ucode_header *ucode; |
be663ab6 | 4749 | int err; |
e2ebc833 | 4750 | struct il4965_firmware_pieces pieces; |
46bc8d4b SG |
4751 | const unsigned int api_max = il->cfg->ucode_api_max; |
4752 | const unsigned int api_min = il->cfg->ucode_api_min; | |
be663ab6 WYG |
4753 | u32 api_ver; |
4754 | ||
4755 | u32 max_probe_length = 200; | |
4756 | u32 standard_phy_calibration_size = | |
e7392364 | 4757 | IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; |
be663ab6 WYG |
4758 | |
4759 | memset(&pieces, 0, sizeof(pieces)); | |
4760 | ||
4761 | if (!ucode_raw) { | |
0c2c8852 | 4762 | if (il->fw_idx <= il->cfg->ucode_api_max) |
e7392364 SG |
4763 | IL_ERR("request for firmware file '%s' failed.\n", |
4764 | il->firmware_name); | |
be663ab6 WYG |
4765 | goto try_again; |
4766 | } | |
4767 | ||
e7392364 SG |
4768 | D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name, |
4769 | ucode_raw->size); | |
be663ab6 WYG |
4770 | |
4771 | /* Make sure that we got at least the API version number */ | |
4772 | if (ucode_raw->size < 4) { | |
9406f797 | 4773 | IL_ERR("File size way too small!\n"); |
be663ab6 WYG |
4774 | goto try_again; |
4775 | } | |
4776 | ||
4777 | /* Data from ucode file: header followed by uCode images */ | |
e2ebc833 | 4778 | ucode = (struct il_ucode_header *)ucode_raw->data; |
be663ab6 | 4779 | |
46bc8d4b | 4780 | err = il4965_load_firmware(il, ucode_raw, &pieces); |
be663ab6 WYG |
4781 | |
4782 | if (err) | |
4783 | goto try_again; | |
4784 | ||
46bc8d4b | 4785 | api_ver = IL_UCODE_API(il->ucode_ver); |
be663ab6 WYG |
4786 | |
4787 | /* | |
4788 | * api_ver should match the api version forming part of the | |
4789 | * firmware filename ... but we don't check for that and only rely | |
4790 | * on the API version read from firmware header from here on forward | |
4791 | */ | |
4792 | if (api_ver < api_min || api_ver > api_max) { | |
e7392364 SG |
4793 | IL_ERR("Driver unable to support your firmware API. " |
4794 | "Driver supports v%u, firmware is v%u.\n", api_max, | |
4795 | api_ver); | |
be663ab6 WYG |
4796 | goto try_again; |
4797 | } | |
4798 | ||
4799 | if (api_ver != api_max) | |
e7392364 SG |
4800 | IL_ERR("Firmware has old API version. Expected v%u, " |
4801 | "got v%u. New firmware can be obtained " | |
4802 | "from http://www.intellinuxwireless.org.\n", api_max, | |
4803 | api_ver); | |
be663ab6 | 4804 | |
9406f797 | 4805 | IL_INFO("loaded firmware version %u.%u.%u.%u\n", |
e7392364 SG |
4806 | IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver), |
4807 | IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver)); | |
be663ab6 | 4808 | |
e7392364 SG |
4809 | snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version), |
4810 | "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver), | |
4811 | IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver), | |
46bc8d4b | 4812 | IL_UCODE_SERIAL(il->ucode_ver)); |
be663ab6 WYG |
4813 | |
4814 | /* | |
4815 | * For any of the failures below (before allocating pci memory) | |
4816 | * we will try to load a version with a smaller API -- maybe the | |
4817 | * user just got a corrupted version of the latest API. | |
4818 | */ | |
4819 | ||
e7392364 SG |
4820 | D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver); |
4821 | D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size); | |
4822 | D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size); | |
4823 | D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size); | |
4824 | D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size); | |
4825 | D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size); | |
be663ab6 WYG |
4826 | |
4827 | /* Verify that uCode images will fit in card's SRAM */ | |
46bc8d4b | 4828 | if (pieces.inst_size > il->hw_params.max_inst_size) { |
9406f797 | 4829 | IL_ERR("uCode instr len %Zd too large to fit in\n", |
e7392364 | 4830 | pieces.inst_size); |
be663ab6 WYG |
4831 | goto try_again; |
4832 | } | |
4833 | ||
46bc8d4b | 4834 | if (pieces.data_size > il->hw_params.max_data_size) { |
9406f797 | 4835 | IL_ERR("uCode data len %Zd too large to fit in\n", |
e7392364 | 4836 | pieces.data_size); |
be663ab6 WYG |
4837 | goto try_again; |
4838 | } | |
4839 | ||
46bc8d4b | 4840 | if (pieces.init_size > il->hw_params.max_inst_size) { |
9406f797 | 4841 | IL_ERR("uCode init instr len %Zd too large to fit in\n", |
e7392364 | 4842 | pieces.init_size); |
be663ab6 WYG |
4843 | goto try_again; |
4844 | } | |
4845 | ||
46bc8d4b | 4846 | if (pieces.init_data_size > il->hw_params.max_data_size) { |
9406f797 | 4847 | IL_ERR("uCode init data len %Zd too large to fit in\n", |
e7392364 | 4848 | pieces.init_data_size); |
be663ab6 WYG |
4849 | goto try_again; |
4850 | } | |
4851 | ||
46bc8d4b | 4852 | if (pieces.boot_size > il->hw_params.max_bsm_size) { |
9406f797 | 4853 | IL_ERR("uCode boot instr len %Zd too large to fit in\n", |
e7392364 | 4854 | pieces.boot_size); |
be663ab6 WYG |
4855 | goto try_again; |
4856 | } | |
4857 | ||
4858 | /* Allocate ucode buffers for card's bus-master loading ... */ | |
4859 | ||
4860 | /* Runtime instructions and 2 copies of data: | |
4861 | * 1) unmodified from disk | |
4862 | * 2) backup cache for save/restore during power-downs */ | |
46bc8d4b SG |
4863 | il->ucode_code.len = pieces.inst_size; |
4864 | il_alloc_fw_desc(il->pci_dev, &il->ucode_code); | |
be663ab6 | 4865 | |
46bc8d4b SG |
4866 | il->ucode_data.len = pieces.data_size; |
4867 | il_alloc_fw_desc(il->pci_dev, &il->ucode_data); | |
be663ab6 | 4868 | |
46bc8d4b SG |
4869 | il->ucode_data_backup.len = pieces.data_size; |
4870 | il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup); | |
be663ab6 | 4871 | |
46bc8d4b SG |
4872 | if (!il->ucode_code.v_addr || !il->ucode_data.v_addr || |
4873 | !il->ucode_data_backup.v_addr) | |
be663ab6 WYG |
4874 | goto err_pci_alloc; |
4875 | ||
4876 | /* Initialization instructions and data */ | |
4877 | if (pieces.init_size && pieces.init_data_size) { | |
46bc8d4b SG |
4878 | il->ucode_init.len = pieces.init_size; |
4879 | il_alloc_fw_desc(il->pci_dev, &il->ucode_init); | |
be663ab6 | 4880 | |
46bc8d4b SG |
4881 | il->ucode_init_data.len = pieces.init_data_size; |
4882 | il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data); | |
be663ab6 | 4883 | |
46bc8d4b | 4884 | if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr) |
be663ab6 WYG |
4885 | goto err_pci_alloc; |
4886 | } | |
4887 | ||
4888 | /* Bootstrap (instructions only, no data) */ | |
4889 | if (pieces.boot_size) { | |
46bc8d4b SG |
4890 | il->ucode_boot.len = pieces.boot_size; |
4891 | il_alloc_fw_desc(il->pci_dev, &il->ucode_boot); | |
be663ab6 | 4892 | |
46bc8d4b | 4893 | if (!il->ucode_boot.v_addr) |
be663ab6 WYG |
4894 | goto err_pci_alloc; |
4895 | } | |
4896 | ||
4897 | /* Now that we can no longer fail, copy information */ | |
4898 | ||
46bc8d4b | 4899 | il->sta_key_max_num = STA_KEY_MAX_NUM; |
be663ab6 WYG |
4900 | |
4901 | /* Copy images into buffers for card's bus-master reads ... */ | |
4902 | ||
4903 | /* Runtime instructions (first block of data in file) */ | |
58de00a4 | 4904 | D_INFO("Copying (but not loading) uCode instr len %Zd\n", |
e7392364 | 4905 | pieces.inst_size); |
46bc8d4b | 4906 | memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size); |
be663ab6 | 4907 | |
58de00a4 | 4908 | D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", |
e7392364 | 4909 | il->ucode_code.v_addr, (u32) il->ucode_code.p_addr); |
be663ab6 WYG |
4910 | |
4911 | /* | |
4912 | * Runtime data | |
e2ebc833 | 4913 | * NOTE: Copy into backup buffer will be done in il_up() |
be663ab6 | 4914 | */ |
58de00a4 | 4915 | D_INFO("Copying (but not loading) uCode data len %Zd\n", |
e7392364 | 4916 | pieces.data_size); |
46bc8d4b SG |
4917 | memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size); |
4918 | memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size); | |
be663ab6 WYG |
4919 | |
4920 | /* Initialization instructions */ | |
4921 | if (pieces.init_size) { | |
e7392364 SG |
4922 | D_INFO("Copying (but not loading) init instr len %Zd\n", |
4923 | pieces.init_size); | |
46bc8d4b | 4924 | memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size); |
be663ab6 WYG |
4925 | } |
4926 | ||
4927 | /* Initialization data */ | |
4928 | if (pieces.init_data_size) { | |
e7392364 SG |
4929 | D_INFO("Copying (but not loading) init data len %Zd\n", |
4930 | pieces.init_data_size); | |
46bc8d4b | 4931 | memcpy(il->ucode_init_data.v_addr, pieces.init_data, |
be663ab6 WYG |
4932 | pieces.init_data_size); |
4933 | } | |
4934 | ||
4935 | /* Bootstrap instructions */ | |
58de00a4 | 4936 | D_INFO("Copying (but not loading) boot instr len %Zd\n", |
e7392364 | 4937 | pieces.boot_size); |
46bc8d4b | 4938 | memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size); |
be663ab6 WYG |
4939 | |
4940 | /* | |
4941 | * figure out the offset of chain noise reset and gain commands | |
4942 | * base on the size of standard phy calibration commands table size | |
4943 | */ | |
46bc8d4b | 4944 | il->_4965.phy_calib_chain_noise_reset_cmd = |
e7392364 | 4945 | standard_phy_calibration_size; |
46bc8d4b | 4946 | il->_4965.phy_calib_chain_noise_gain_cmd = |
e7392364 | 4947 | standard_phy_calibration_size + 1; |
be663ab6 WYG |
4948 | |
4949 | /************************************************** | |
4950 | * This is still part of probe() in a sense... | |
4951 | * | |
4952 | * 9. Setup and register with mac80211 and debugfs | |
4953 | **************************************************/ | |
46bc8d4b | 4954 | err = il4965_mac_setup_register(il, max_probe_length); |
be663ab6 WYG |
4955 | if (err) |
4956 | goto out_unbind; | |
4957 | ||
46bc8d4b | 4958 | err = il_dbgfs_register(il, DRV_NAME); |
be663ab6 | 4959 | if (err) |
e7392364 SG |
4960 | IL_ERR("failed to create debugfs files. Ignoring error: %d\n", |
4961 | err); | |
be663ab6 | 4962 | |
e7392364 | 4963 | err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group); |
be663ab6 | 4964 | if (err) { |
9406f797 | 4965 | IL_ERR("failed to create sysfs device attributes\n"); |
be663ab6 WYG |
4966 | goto out_unbind; |
4967 | } | |
4968 | ||
4969 | /* We have our copies now, allow OS release its copies */ | |
4970 | release_firmware(ucode_raw); | |
46bc8d4b | 4971 | complete(&il->_4965.firmware_loading_complete); |
be663ab6 WYG |
4972 | return; |
4973 | ||
e7392364 | 4974 | try_again: |
be663ab6 | 4975 | /* try next, if any */ |
46bc8d4b | 4976 | if (il4965_request_firmware(il, false)) |
be663ab6 WYG |
4977 | goto out_unbind; |
4978 | release_firmware(ucode_raw); | |
4979 | return; | |
4980 | ||
e7392364 | 4981 | err_pci_alloc: |
9406f797 | 4982 | IL_ERR("failed to allocate pci memory\n"); |
46bc8d4b | 4983 | il4965_dealloc_ucode_pci(il); |
e7392364 | 4984 | out_unbind: |
46bc8d4b SG |
4985 | complete(&il->_4965.firmware_loading_complete); |
4986 | device_release_driver(&il->pci_dev->dev); | |
be663ab6 WYG |
4987 | release_firmware(ucode_raw); |
4988 | } | |
4989 | ||
e7392364 | 4990 | static const char *const desc_lookup_text[] = { |
be663ab6 WYG |
4991 | "OK", |
4992 | "FAIL", | |
4993 | "BAD_PARAM", | |
4994 | "BAD_CHECKSUM", | |
4995 | "NMI_INTERRUPT_WDG", | |
4996 | "SYSASSERT", | |
4997 | "FATAL_ERROR", | |
4998 | "BAD_COMMAND", | |
4999 | "HW_ERROR_TUNE_LOCK", | |
5000 | "HW_ERROR_TEMPERATURE", | |
5001 | "ILLEGAL_CHAN_FREQ", | |
3b98c7f4 | 5002 | "VCC_NOT_STBL", |
9a95b370 | 5003 | "FH49_ERROR", |
be663ab6 WYG |
5004 | "NMI_INTERRUPT_HOST", |
5005 | "NMI_INTERRUPT_ACTION_PT", | |
5006 | "NMI_INTERRUPT_UNKNOWN", | |
5007 | "UCODE_VERSION_MISMATCH", | |
5008 | "HW_ERROR_ABS_LOCK", | |
5009 | "HW_ERROR_CAL_LOCK_FAIL", | |
5010 | "NMI_INTERRUPT_INST_ACTION_PT", | |
5011 | "NMI_INTERRUPT_DATA_ACTION_PT", | |
5012 | "NMI_TRM_HW_ER", | |
5013 | "NMI_INTERRUPT_TRM", | |
861d9c3f | 5014 | "NMI_INTERRUPT_BREAK_POINT", |
be663ab6 WYG |
5015 | "DEBUG_0", |
5016 | "DEBUG_1", | |
5017 | "DEBUG_2", | |
5018 | "DEBUG_3", | |
5019 | }; | |
5020 | ||
e7392364 SG |
5021 | static struct { |
5022 | char *name; | |
5023 | u8 num; | |
5024 | } advanced_lookup[] = { | |
5025 | { | |
5026 | "NMI_INTERRUPT_WDG", 0x34}, { | |
5027 | "SYSASSERT", 0x35}, { | |
5028 | "UCODE_VERSION_MISMATCH", 0x37}, { | |
5029 | "BAD_COMMAND", 0x38}, { | |
5030 | "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, { | |
5031 | "FATAL_ERROR", 0x3D}, { | |
5032 | "NMI_TRM_HW_ERR", 0x46}, { | |
5033 | "NMI_INTERRUPT_TRM", 0x4C}, { | |
5034 | "NMI_INTERRUPT_BREAK_POINT", 0x54}, { | |
5035 | "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, { | |
5036 | "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, { | |
5037 | "NMI_INTERRUPT_HOST", 0x66}, { | |
5038 | "NMI_INTERRUPT_ACTION_PT", 0x7C}, { | |
5039 | "NMI_INTERRUPT_UNKNOWN", 0x84}, { | |
5040 | "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, { | |
5041 | "ADVANCED_SYSASSERT", 0},}; | |
5042 | ||
5043 | static const char * | |
5044 | il4965_desc_lookup(u32 num) | |
be663ab6 WYG |
5045 | { |
5046 | int i; | |
5047 | int max = ARRAY_SIZE(desc_lookup_text); | |
5048 | ||
5049 | if (num < max) | |
5050 | return desc_lookup_text[num]; | |
5051 | ||
5052 | max = ARRAY_SIZE(advanced_lookup) - 1; | |
5053 | for (i = 0; i < max; i++) { | |
5054 | if (advanced_lookup[i].num == num) | |
5055 | break; | |
5056 | } | |
5057 | return advanced_lookup[i].name; | |
5058 | } | |
5059 | ||
5060 | #define ERROR_START_OFFSET (1 * sizeof(u32)) | |
5061 | #define ERROR_ELEM_SIZE (7 * sizeof(u32)) | |
5062 | ||
e7392364 SG |
5063 | void |
5064 | il4965_dump_nic_error_log(struct il_priv *il) | |
be663ab6 WYG |
5065 | { |
5066 | u32 data2, line; | |
5067 | u32 desc, time, count, base, data1; | |
5068 | u32 blink1, blink2, ilink1, ilink2; | |
5069 | u32 pc, hcmd; | |
5070 | ||
1722f8e1 | 5071 | if (il->ucode_type == UCODE_INIT) |
46bc8d4b | 5072 | base = le32_to_cpu(il->card_alive_init.error_event_table_ptr); |
1722f8e1 | 5073 | else |
46bc8d4b | 5074 | base = le32_to_cpu(il->card_alive.error_event_table_ptr); |
be663ab6 | 5075 | |
1600b875 | 5076 | if (!il->ops->is_valid_rtc_data_addr(base)) { |
e7392364 SG |
5077 | IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n", |
5078 | base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT"); | |
be663ab6 WYG |
5079 | return; |
5080 | } | |
5081 | ||
46bc8d4b | 5082 | count = il_read_targ_mem(il, base); |
be663ab6 WYG |
5083 | |
5084 | if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { | |
9406f797 | 5085 | IL_ERR("Start IWL Error Log Dump:\n"); |
e7392364 | 5086 | IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count); |
46bc8d4b SG |
5087 | } |
5088 | ||
5089 | desc = il_read_targ_mem(il, base + 1 * sizeof(u32)); | |
5090 | il->isr_stats.err_code = desc; | |
5091 | pc = il_read_targ_mem(il, base + 2 * sizeof(u32)); | |
5092 | blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32)); | |
5093 | blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32)); | |
5094 | ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32)); | |
5095 | ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32)); | |
5096 | data1 = il_read_targ_mem(il, base + 7 * sizeof(u32)); | |
5097 | data2 = il_read_targ_mem(il, base + 8 * sizeof(u32)); | |
5098 | line = il_read_targ_mem(il, base + 9 * sizeof(u32)); | |
5099 | time = il_read_targ_mem(il, base + 11 * sizeof(u32)); | |
5100 | hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32)); | |
5101 | ||
9406f797 | 5102 | IL_ERR("Desc Time " |
e7392364 | 5103 | "data1 data2 line\n"); |
9406f797 | 5104 | IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", |
e7392364 | 5105 | il4965_desc_lookup(desc), desc, time, data1, data2, line); |
9406f797 | 5106 | IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n"); |
e7392364 SG |
5107 | IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1, |
5108 | blink2, ilink1, ilink2, hcmd); | |
be663ab6 WYG |
5109 | } |
5110 | ||
e7392364 SG |
5111 | static void |
5112 | il4965_rf_kill_ct_config(struct il_priv *il) | |
be663ab6 | 5113 | { |
e2ebc833 | 5114 | struct il_ct_kill_config cmd; |
be663ab6 WYG |
5115 | unsigned long flags; |
5116 | int ret = 0; | |
5117 | ||
46bc8d4b | 5118 | spin_lock_irqsave(&il->lock, flags); |
841b2cca | 5119 | _il_wr(il, CSR_UCODE_DRV_GP1_CLR, |
e7392364 | 5120 | CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); |
46bc8d4b | 5121 | spin_unlock_irqrestore(&il->lock, flags); |
be663ab6 WYG |
5122 | |
5123 | cmd.critical_temperature_R = | |
e7392364 | 5124 | cpu_to_le32(il->hw_params.ct_kill_threshold); |
be663ab6 | 5125 | |
e7392364 | 5126 | ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd); |
be663ab6 | 5127 | if (ret) |
4d69c752 | 5128 | IL_ERR("C_CT_KILL_CONFIG failed\n"); |
be663ab6 | 5129 | else |
e7392364 SG |
5130 | D_INFO("C_CT_KILL_CONFIG " "succeeded, " |
5131 | "critical temperature is %d\n", | |
5132 | il->hw_params.ct_kill_threshold); | |
be663ab6 WYG |
5133 | } |
5134 | ||
5135 | static const s8 default_queue_to_tx_fifo[] = { | |
e2ebc833 SG |
5136 | IL_TX_FIFO_VO, |
5137 | IL_TX_FIFO_VI, | |
5138 | IL_TX_FIFO_BE, | |
5139 | IL_TX_FIFO_BK, | |
d3175167 | 5140 | IL49_CMD_FIFO_NUM, |
e2ebc833 SG |
5141 | IL_TX_FIFO_UNUSED, |
5142 | IL_TX_FIFO_UNUSED, | |
be663ab6 WYG |
5143 | }; |
5144 | ||
e53aac42 SG |
5145 | #define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) |
5146 | ||
e7392364 SG |
5147 | static int |
5148 | il4965_alive_notify(struct il_priv *il) | |
be663ab6 WYG |
5149 | { |
5150 | u32 a; | |
5151 | unsigned long flags; | |
5152 | int i, chan; | |
5153 | u32 reg_val; | |
5154 | ||
46bc8d4b | 5155 | spin_lock_irqsave(&il->lock, flags); |
be663ab6 WYG |
5156 | |
5157 | /* Clear 4965's internal Tx Scheduler data base */ | |
e7392364 | 5158 | il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR); |
d3175167 SG |
5159 | a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET; |
5160 | for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4) | |
46bc8d4b | 5161 | il_write_targ_mem(il, a, 0); |
d3175167 | 5162 | for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) |
46bc8d4b | 5163 | il_write_targ_mem(il, a, 0); |
e7392364 SG |
5164 | for (; |
5165 | a < | |
5166 | il->scd_base_addr + | |
5167 | IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num); | |
5168 | a += 4) | |
46bc8d4b | 5169 | il_write_targ_mem(il, a, 0); |
be663ab6 WYG |
5170 | |
5171 | /* Tel 4965 where to find Tx byte count tables */ | |
e7392364 | 5172 | il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10); |
be663ab6 WYG |
5173 | |
5174 | /* Enable DMA channel */ | |
e7392364 SG |
5175 | for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++) |
5176 | il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan), | |
5177 | FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | |
5178 | FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | |
be663ab6 WYG |
5179 | |
5180 | /* Update FH chicken bits */ | |
9a95b370 SG |
5181 | reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG); |
5182 | il_wr(il, FH49_TX_CHICKEN_BITS_REG, | |
e7392364 | 5183 | reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); |
be663ab6 WYG |
5184 | |
5185 | /* Disable chain mode for all queues */ | |
d3175167 | 5186 | il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0); |
be663ab6 WYG |
5187 | |
5188 | /* Initialize each Tx queue (including the command queue) */ | |
46bc8d4b | 5189 | for (i = 0; i < il->hw_params.max_txq_num; i++) { |
be663ab6 | 5190 | |
0c2c8852 | 5191 | /* TFD circular buffer read/write idxes */ |
d3175167 | 5192 | il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0); |
0c1a94e2 | 5193 | il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8)); |
be663ab6 WYG |
5194 | |
5195 | /* Max Tx Window size for Scheduler-ACK mode */ | |
e7392364 SG |
5196 | il_write_targ_mem(il, |
5197 | il->scd_base_addr + | |
5198 | IL49_SCD_CONTEXT_QUEUE_OFFSET(i), | |
5199 | (SCD_WIN_SIZE << | |
5200 | IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & | |
5201 | IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); | |
be663ab6 WYG |
5202 | |
5203 | /* Frame limit */ | |
e7392364 SG |
5204 | il_write_targ_mem(il, |
5205 | il->scd_base_addr + | |
5206 | IL49_SCD_CONTEXT_QUEUE_OFFSET(i) + | |
5207 | sizeof(u32), | |
5208 | (SCD_FRAME_LIMIT << | |
5209 | IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | |
5210 | IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); | |
be663ab6 WYG |
5211 | |
5212 | } | |
d3175167 | 5213 | il_wr_prph(il, IL49_SCD_INTERRUPT_MASK, |
e7392364 | 5214 | (1 << il->hw_params.max_txq_num) - 1); |
be663ab6 WYG |
5215 | |
5216 | /* Activate all Tx DMA/FIFO channels */ | |
46bc8d4b | 5217 | il4965_txq_set_sched(il, IL_MASK(0, 6)); |
be663ab6 | 5218 | |
46bc8d4b | 5219 | il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0); |
be663ab6 WYG |
5220 | |
5221 | /* make sure all queue are not stopped */ | |
46bc8d4b | 5222 | memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped)); |
be663ab6 | 5223 | for (i = 0; i < 4; i++) |
46bc8d4b | 5224 | atomic_set(&il->queue_stop_count[i], 0); |
be663ab6 WYG |
5225 | |
5226 | /* reset to 0 to enable all the queue first */ | |
46bc8d4b | 5227 | il->txq_ctx_active_msk = 0; |
be663ab6 WYG |
5228 | /* Map each Tx/cmd queue to its corresponding fifo */ |
5229 | BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7); | |
5230 | ||
5231 | for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { | |
5232 | int ac = default_queue_to_tx_fifo[i]; | |
5233 | ||
46bc8d4b | 5234 | il_txq_ctx_activate(il, i); |
be663ab6 | 5235 | |
e2ebc833 | 5236 | if (ac == IL_TX_FIFO_UNUSED) |
be663ab6 WYG |
5237 | continue; |
5238 | ||
46bc8d4b | 5239 | il4965_tx_queue_set_status(il, &il->txq[i], ac, 0); |
be663ab6 WYG |
5240 | } |
5241 | ||
46bc8d4b | 5242 | spin_unlock_irqrestore(&il->lock, flags); |
be663ab6 WYG |
5243 | |
5244 | return 0; | |
5245 | } | |
5246 | ||
5247 | /** | |
4d69c752 | 5248 | * il4965_alive_start - called after N_ALIVE notification received |
be663ab6 | 5249 | * from protocol/runtime uCode (initialization uCode's |
e2ebc833 | 5250 | * Alive gets handled by il_init_alive_start()). |
be663ab6 | 5251 | */ |
e7392364 SG |
5252 | static void |
5253 | il4965_alive_start(struct il_priv *il) | |
be663ab6 WYG |
5254 | { |
5255 | int ret = 0; | |
be663ab6 | 5256 | |
58de00a4 | 5257 | D_INFO("Runtime Alive received.\n"); |
be663ab6 | 5258 | |
46bc8d4b | 5259 | if (il->card_alive.is_valid != UCODE_VALID_OK) { |
be663ab6 WYG |
5260 | /* We had an error bringing up the hardware, so take it |
5261 | * all the way back down so we can try again */ | |
58de00a4 | 5262 | D_INFO("Alive failed.\n"); |
be663ab6 WYG |
5263 | goto restart; |
5264 | } | |
5265 | ||
5266 | /* Initialize uCode has loaded Runtime uCode ... verify inst image. | |
5267 | * This is a paranoid check, because we would not have gotten the | |
5268 | * "runtime" alive if code weren't properly loaded. */ | |
46bc8d4b | 5269 | if (il4965_verify_ucode(il)) { |
be663ab6 WYG |
5270 | /* Runtime instruction load was bad; |
5271 | * take it all the way back down so we can try again */ | |
58de00a4 | 5272 | D_INFO("Bad runtime uCode load.\n"); |
be663ab6 WYG |
5273 | goto restart; |
5274 | } | |
5275 | ||
46bc8d4b | 5276 | ret = il4965_alive_notify(il); |
be663ab6 | 5277 | if (ret) { |
e7392364 | 5278 | IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret); |
be663ab6 WYG |
5279 | goto restart; |
5280 | } | |
5281 | ||
be663ab6 | 5282 | /* After the ALIVE response, we can send host commands to the uCode */ |
a6766ccd | 5283 | set_bit(S_ALIVE, &il->status); |
be663ab6 WYG |
5284 | |
5285 | /* Enable watchdog to monitor the driver tx queues */ | |
46bc8d4b | 5286 | il_setup_watchdog(il); |
be663ab6 | 5287 | |
46bc8d4b | 5288 | if (il_is_rfkill(il)) |
be663ab6 WYG |
5289 | return; |
5290 | ||
46bc8d4b | 5291 | ieee80211_wake_queues(il->hw); |
be663ab6 | 5292 | |
2eb05816 | 5293 | il->active_rate = RATES_MASK; |
be663ab6 | 5294 | |
c8b03958 | 5295 | if (il_is_associated(il)) { |
e2ebc833 | 5296 | struct il_rxon_cmd *active_rxon = |
c8b03958 | 5297 | (struct il_rxon_cmd *)&il->active; |
be663ab6 | 5298 | /* apply any changes in staging */ |
c8b03958 | 5299 | il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; |
be663ab6 WYG |
5300 | active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
5301 | } else { | |
be663ab6 | 5302 | /* Initialize our rx_config data */ |
83007196 | 5303 | il_connection_init_rx_config(il); |
be663ab6 | 5304 | |
c9363551 SG |
5305 | if (il->ops->set_rxon_chain) |
5306 | il->ops->set_rxon_chain(il); | |
be663ab6 WYG |
5307 | } |
5308 | ||
5309 | /* Configure bluetooth coexistence if enabled */ | |
46bc8d4b | 5310 | il_send_bt_config(il); |
be663ab6 | 5311 | |
46bc8d4b | 5312 | il4965_reset_run_time_calib(il); |
be663ab6 | 5313 | |
a6766ccd | 5314 | set_bit(S_READY, &il->status); |
be663ab6 WYG |
5315 | |
5316 | /* Configure the adapter for unassociated operation */ | |
83007196 | 5317 | il_commit_rxon(il); |
be663ab6 WYG |
5318 | |
5319 | /* At this point, the NIC is initialized and operational */ | |
46bc8d4b | 5320 | il4965_rf_kill_ct_config(il); |
be663ab6 | 5321 | |
58de00a4 | 5322 | D_INFO("ALIVE processing complete.\n"); |
46bc8d4b | 5323 | wake_up(&il->wait_command_queue); |
be663ab6 | 5324 | |
46bc8d4b | 5325 | il_power_update_mode(il, true); |
58de00a4 | 5326 | D_INFO("Updated power mode\n"); |
be663ab6 WYG |
5327 | |
5328 | return; | |
5329 | ||
e7392364 | 5330 | restart: |
46bc8d4b | 5331 | queue_work(il->workqueue, &il->restart); |
be663ab6 WYG |
5332 | } |
5333 | ||
46bc8d4b | 5334 | static void il4965_cancel_deferred_work(struct il_priv *il); |
be663ab6 | 5335 | |
e7392364 SG |
5336 | static void |
5337 | __il4965_down(struct il_priv *il) | |
be663ab6 WYG |
5338 | { |
5339 | unsigned long flags; | |
ab42b404 | 5340 | int exit_pending; |
be663ab6 | 5341 | |
58de00a4 | 5342 | D_INFO(DRV_NAME " is going down\n"); |
be663ab6 | 5343 | |
46bc8d4b | 5344 | il_scan_cancel_timeout(il, 200); |
be663ab6 | 5345 | |
a6766ccd | 5346 | exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status); |
be663ab6 | 5347 | |
a6766ccd | 5348 | /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set |
be663ab6 | 5349 | * to prevent rearm timer */ |
46bc8d4b | 5350 | del_timer_sync(&il->watchdog); |
be663ab6 | 5351 | |
83007196 | 5352 | il_clear_ucode_stations(il); |
d735f921 SG |
5353 | |
5354 | /* FIXME: race conditions ? */ | |
5355 | spin_lock_irq(&il->sta_lock); | |
5356 | /* | |
5357 | * Remove all key information that is not stored as part | |
5358 | * of station information since mac80211 may not have had | |
5359 | * a chance to remove all the keys. When device is | |
5360 | * reconfigured by mac80211 after an error all keys will | |
5361 | * be reconfigured. | |
5362 | */ | |
5363 | memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys)); | |
5364 | il->_4965.key_mapping_keys = 0; | |
5365 | spin_unlock_irq(&il->sta_lock); | |
5366 | ||
46bc8d4b SG |
5367 | il_dealloc_bcast_stations(il); |
5368 | il_clear_driver_stations(il); | |
be663ab6 WYG |
5369 | |
5370 | /* Unblock any waiting calls */ | |
46bc8d4b | 5371 | wake_up_all(&il->wait_command_queue); |
be663ab6 WYG |
5372 | |
5373 | /* Wipe out the EXIT_PENDING status bit if we are not actually | |
5374 | * exiting the module */ | |
5375 | if (!exit_pending) | |
a6766ccd | 5376 | clear_bit(S_EXIT_PENDING, &il->status); |
be663ab6 WYG |
5377 | |
5378 | /* stop and reset the on-board processor */ | |
841b2cca | 5379 | _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); |
be663ab6 WYG |
5380 | |
5381 | /* tell the device to stop sending interrupts */ | |
46bc8d4b SG |
5382 | spin_lock_irqsave(&il->lock, flags); |
5383 | il_disable_interrupts(il); | |
5384 | spin_unlock_irqrestore(&il->lock, flags); | |
5385 | il4965_synchronize_irq(il); | |
be663ab6 | 5386 | |
46bc8d4b SG |
5387 | if (il->mac80211_registered) |
5388 | ieee80211_stop_queues(il->hw); | |
be663ab6 | 5389 | |
e2ebc833 | 5390 | /* If we have not previously called il_init() then |
be663ab6 | 5391 | * clear all bits but the RF Kill bit and return */ |
46bc8d4b | 5392 | if (!il_is_init(il)) { |
e7392364 | 5393 | il->status = |
c37281a0 SG |
5394 | test_bit(S_RF_KILL_HW, &il->status) << S_RF_KILL_HW | |
5395 | test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED | | |
e7392364 | 5396 | test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; |
be663ab6 WYG |
5397 | goto exit; |
5398 | } | |
5399 | ||
5400 | /* ...otherwise clear out all the status bits but the RF Kill | |
5401 | * bit and continue taking the NIC down. */ | |
e7392364 | 5402 | il->status &= |
c37281a0 SG |
5403 | test_bit(S_RF_KILL_HW, &il->status) << S_RF_KILL_HW | |
5404 | test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED | | |
5405 | test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR | | |
e7392364 | 5406 | test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING; |
be663ab6 | 5407 | |
775ed8ab SG |
5408 | /* |
5409 | * We disabled and synchronized interrupt, and priv->mutex is taken, so | |
5410 | * here is the only thread which will program device registers, but | |
5411 | * still have lockdep assertions, so we are taking reg_lock. | |
5412 | */ | |
5413 | spin_lock_irq(&il->reg_lock); | |
5414 | /* FIXME: il_grab_nic_access if rfkill is off ? */ | |
5415 | ||
46bc8d4b SG |
5416 | il4965_txq_ctx_stop(il); |
5417 | il4965_rxq_stop(il); | |
be663ab6 | 5418 | /* Power-down device's busmaster DMA clocks */ |
775ed8ab | 5419 | _il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); |
be663ab6 | 5420 | udelay(5); |
be663ab6 | 5421 | /* Make sure (redundant) we've released our request to stay awake */ |
775ed8ab | 5422 | _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
be663ab6 | 5423 | /* Stop the device, and put it in low power state */ |
775ed8ab SG |
5424 | _il_apm_stop(il); |
5425 | ||
5426 | spin_unlock_irq(&il->reg_lock); | |
be663ab6 | 5427 | |
775ed8ab | 5428 | il4965_txq_ctx_unmap(il); |
e7392364 | 5429 | exit: |
46bc8d4b | 5430 | memset(&il->card_alive, 0, sizeof(struct il_alive_resp)); |
be663ab6 | 5431 | |
46bc8d4b SG |
5432 | dev_kfree_skb(il->beacon_skb); |
5433 | il->beacon_skb = NULL; | |
be663ab6 WYG |
5434 | |
5435 | /* clear out any free frames */ | |
46bc8d4b | 5436 | il4965_clear_free_frames(il); |
be663ab6 WYG |
5437 | } |
5438 | ||
e7392364 SG |
5439 | static void |
5440 | il4965_down(struct il_priv *il) | |
be663ab6 | 5441 | { |
46bc8d4b SG |
5442 | mutex_lock(&il->mutex); |
5443 | __il4965_down(il); | |
5444 | mutex_unlock(&il->mutex); | |
be663ab6 | 5445 | |
46bc8d4b | 5446 | il4965_cancel_deferred_work(il); |
be663ab6 WYG |
5447 | } |
5448 | ||
be663ab6 | 5449 | |
71e0c6c2 | 5450 | static void |
e7392364 | 5451 | il4965_set_hw_ready(struct il_priv *il) |
be663ab6 | 5452 | { |
71e0c6c2 | 5453 | int ret; |
be663ab6 | 5454 | |
46bc8d4b | 5455 | il_set_bit(il, CSR_HW_IF_CONFIG_REG, |
e7392364 | 5456 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); |
be663ab6 WYG |
5457 | |
5458 | /* See if we got it */ | |
71e0c6c2 SG |
5459 | ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG, |
5460 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | |
5461 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | |
5462 | 100); | |
5463 | if (ret >= 0) | |
46bc8d4b | 5464 | il->hw_ready = true; |
be663ab6 | 5465 | |
71e0c6c2 | 5466 | D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not"); |
be663ab6 WYG |
5467 | } |
5468 | ||
71e0c6c2 | 5469 | static void |
e7392364 | 5470 | il4965_prepare_card_hw(struct il_priv *il) |
be663ab6 | 5471 | { |
71e0c6c2 | 5472 | int ret; |
be663ab6 | 5473 | |
71e0c6c2 | 5474 | il->hw_ready = false; |
be663ab6 | 5475 | |
71e0c6c2 | 5476 | il4965_set_hw_ready(il); |
46bc8d4b | 5477 | if (il->hw_ready) |
71e0c6c2 | 5478 | return; |
be663ab6 WYG |
5479 | |
5480 | /* If HW is not ready, prepare the conditions to check again */ | |
e7392364 | 5481 | il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE); |
be663ab6 | 5482 | |
e7392364 SG |
5483 | ret = |
5484 | _il_poll_bit(il, CSR_HW_IF_CONFIG_REG, | |
5485 | ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, | |
5486 | CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); | |
be663ab6 WYG |
5487 | |
5488 | /* HW should be ready by now, check again. */ | |
5489 | if (ret != -ETIMEDOUT) | |
46bc8d4b | 5490 | il4965_set_hw_ready(il); |
be663ab6 WYG |
5491 | } |
5492 | ||
5493 | #define MAX_HW_RESTARTS 5 | |
5494 | ||
e7392364 SG |
5495 | static int |
5496 | __il4965_up(struct il_priv *il) | |
be663ab6 | 5497 | { |
be663ab6 WYG |
5498 | int i; |
5499 | int ret; | |
5500 | ||
a6766ccd | 5501 | if (test_bit(S_EXIT_PENDING, &il->status)) { |
9406f797 | 5502 | IL_WARN("Exit pending; will not bring the NIC up\n"); |
be663ab6 WYG |
5503 | return -EIO; |
5504 | } | |
5505 | ||
46bc8d4b | 5506 | if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) { |
9406f797 | 5507 | IL_ERR("ucode not available for device bringup\n"); |
be663ab6 WYG |
5508 | return -EIO; |
5509 | } | |
5510 | ||
83007196 | 5511 | ret = il4965_alloc_bcast_station(il); |
17d6e557 SG |
5512 | if (ret) { |
5513 | il_dealloc_bcast_stations(il); | |
5514 | return ret; | |
be663ab6 WYG |
5515 | } |
5516 | ||
46bc8d4b | 5517 | il4965_prepare_card_hw(il); |
46bc8d4b | 5518 | if (!il->hw_ready) { |
71e0c6c2 | 5519 | IL_ERR("HW not ready\n"); |
be663ab6 WYG |
5520 | return -EIO; |
5521 | } | |
5522 | ||
5523 | /* If platform's RF_KILL switch is NOT set to KILL */ | |
e7392364 | 5524 | if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) |
a6766ccd | 5525 | clear_bit(S_RF_KILL_HW, &il->status); |
3976b451 | 5526 | else { |
a6766ccd | 5527 | set_bit(S_RF_KILL_HW, &il->status); |
46bc8d4b | 5528 | wiphy_rfkill_set_hw_state(il->hw->wiphy, true); |
be663ab6 | 5529 | |
3976b451 | 5530 | il_enable_rfkill_int(il); |
9406f797 | 5531 | IL_WARN("Radio disabled by HW RF Kill switch\n"); |
be663ab6 WYG |
5532 | return 0; |
5533 | } | |
5534 | ||
841b2cca | 5535 | _il_wr(il, CSR_INT, 0xFFFFFFFF); |
be663ab6 | 5536 | |
e2ebc833 | 5537 | /* must be initialised before il_hw_nic_init */ |
46bc8d4b | 5538 | il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM; |
be663ab6 | 5539 | |
46bc8d4b | 5540 | ret = il4965_hw_nic_init(il); |
be663ab6 | 5541 | if (ret) { |
9406f797 | 5542 | IL_ERR("Unable to init nic\n"); |
be663ab6 WYG |
5543 | return ret; |
5544 | } | |
5545 | ||
5546 | /* make sure rfkill handshake bits are cleared */ | |
841b2cca | 5547 | _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); |
e7392364 | 5548 | _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); |
be663ab6 WYG |
5549 | |
5550 | /* clear (again), then enable host interrupts */ | |
841b2cca | 5551 | _il_wr(il, CSR_INT, 0xFFFFFFFF); |
46bc8d4b | 5552 | il_enable_interrupts(il); |
be663ab6 WYG |
5553 | |
5554 | /* really make sure rfkill handshake bits are cleared */ | |
841b2cca SG |
5555 | _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); |
5556 | _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | |
be663ab6 WYG |
5557 | |
5558 | /* Copy original ucode data image from disk into backup cache. | |
5559 | * This will be used to initialize the on-board processor's | |
5560 | * data SRAM for a clean start when the runtime program first loads. */ | |
46bc8d4b SG |
5561 | memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr, |
5562 | il->ucode_data.len); | |
be663ab6 WYG |
5563 | |
5564 | for (i = 0; i < MAX_HW_RESTARTS; i++) { | |
5565 | ||
5566 | /* load bootstrap state machine, | |
5567 | * load bootstrap program into processor's memory, | |
5568 | * prepare to load the "initialize" uCode */ | |
1600b875 | 5569 | ret = il->ops->load_ucode(il); |
be663ab6 WYG |
5570 | |
5571 | if (ret) { | |
e7392364 | 5572 | IL_ERR("Unable to set up bootstrap uCode: %d\n", ret); |
be663ab6 WYG |
5573 | continue; |
5574 | } | |
5575 | ||
5576 | /* start card; "initialize" will load runtime ucode */ | |
46bc8d4b | 5577 | il4965_nic_start(il); |
be663ab6 | 5578 | |
58de00a4 | 5579 | D_INFO(DRV_NAME " is coming up\n"); |
be663ab6 WYG |
5580 | |
5581 | return 0; | |
5582 | } | |
5583 | ||
a6766ccd | 5584 | set_bit(S_EXIT_PENDING, &il->status); |
46bc8d4b | 5585 | __il4965_down(il); |
a6766ccd | 5586 | clear_bit(S_EXIT_PENDING, &il->status); |
be663ab6 WYG |
5587 | |
5588 | /* tried to restart and config the device for as long as our | |
5589 | * patience could withstand */ | |
9406f797 | 5590 | IL_ERR("Unable to initialize device after %d attempts.\n", i); |
be663ab6 WYG |
5591 | return -EIO; |
5592 | } | |
5593 | ||
be663ab6 WYG |
5594 | /***************************************************************************** |
5595 | * | |
5596 | * Workqueue callbacks | |
5597 | * | |
5598 | *****************************************************************************/ | |
5599 | ||
e7392364 SG |
5600 | static void |
5601 | il4965_bg_init_alive_start(struct work_struct *data) | |
be663ab6 | 5602 | { |
46bc8d4b | 5603 | struct il_priv *il = |
e2ebc833 | 5604 | container_of(data, struct il_priv, init_alive_start.work); |
be663ab6 | 5605 | |
46bc8d4b | 5606 | mutex_lock(&il->mutex); |
a6766ccd | 5607 | if (test_bit(S_EXIT_PENDING, &il->status)) |
28a6e577 | 5608 | goto out; |
be663ab6 | 5609 | |
1600b875 | 5610 | il->ops->init_alive_start(il); |
28a6e577 | 5611 | out: |
46bc8d4b | 5612 | mutex_unlock(&il->mutex); |
be663ab6 WYG |
5613 | } |
5614 | ||
e7392364 SG |
5615 | static void |
5616 | il4965_bg_alive_start(struct work_struct *data) | |
be663ab6 | 5617 | { |
46bc8d4b | 5618 | struct il_priv *il = |
e2ebc833 | 5619 | container_of(data, struct il_priv, alive_start.work); |
be663ab6 | 5620 | |
46bc8d4b | 5621 | mutex_lock(&il->mutex); |
a6766ccd | 5622 | if (test_bit(S_EXIT_PENDING, &il->status)) |
28a6e577 | 5623 | goto out; |
be663ab6 | 5624 | |
46bc8d4b | 5625 | il4965_alive_start(il); |
28a6e577 | 5626 | out: |
46bc8d4b | 5627 | mutex_unlock(&il->mutex); |
be663ab6 WYG |
5628 | } |
5629 | ||
e7392364 SG |
5630 | static void |
5631 | il4965_bg_run_time_calib_work(struct work_struct *work) | |
be663ab6 | 5632 | { |
46bc8d4b | 5633 | struct il_priv *il = container_of(work, struct il_priv, |
e7392364 | 5634 | run_time_calib_work); |
be663ab6 | 5635 | |
46bc8d4b | 5636 | mutex_lock(&il->mutex); |
be663ab6 | 5637 | |
a6766ccd SG |
5638 | if (test_bit(S_EXIT_PENDING, &il->status) || |
5639 | test_bit(S_SCANNING, &il->status)) { | |
46bc8d4b | 5640 | mutex_unlock(&il->mutex); |
be663ab6 WYG |
5641 | return; |
5642 | } | |
5643 | ||
46bc8d4b | 5644 | if (il->start_calib) { |
e7392364 SG |
5645 | il4965_chain_noise_calibration(il, (void *)&il->_4965.stats); |
5646 | il4965_sensitivity_calibration(il, (void *)&il->_4965.stats); | |
be663ab6 WYG |
5647 | } |
5648 | ||
46bc8d4b | 5649 | mutex_unlock(&il->mutex); |
be663ab6 WYG |
5650 | } |
5651 | ||
e7392364 SG |
5652 | static void |
5653 | il4965_bg_restart(struct work_struct *data) | |
be663ab6 | 5654 | { |
46bc8d4b | 5655 | struct il_priv *il = container_of(data, struct il_priv, restart); |
be663ab6 | 5656 | |
a6766ccd | 5657 | if (test_bit(S_EXIT_PENDING, &il->status)) |
be663ab6 WYG |
5658 | return; |
5659 | ||
a6766ccd | 5660 | if (test_and_clear_bit(S_FW_ERROR, &il->status)) { |
46bc8d4b | 5661 | mutex_lock(&il->mutex); |
83007196 SG |
5662 | /* FIXME: do we dereference vif without mutex locked ? */ |
5663 | il->vif = NULL; | |
46bc8d4b | 5664 | il->is_open = 0; |
be663ab6 | 5665 | |
46bc8d4b | 5666 | __il4965_down(il); |
be663ab6 | 5667 | |
46bc8d4b SG |
5668 | mutex_unlock(&il->mutex); |
5669 | il4965_cancel_deferred_work(il); | |
5670 | ieee80211_restart_hw(il->hw); | |
be663ab6 | 5671 | } else { |
46bc8d4b | 5672 | il4965_down(il); |
be663ab6 | 5673 | |
46bc8d4b | 5674 | mutex_lock(&il->mutex); |
a6766ccd | 5675 | if (test_bit(S_EXIT_PENDING, &il->status)) { |
46bc8d4b | 5676 | mutex_unlock(&il->mutex); |
be663ab6 | 5677 | return; |
28a6e577 | 5678 | } |
be663ab6 | 5679 | |
46bc8d4b SG |
5680 | __il4965_up(il); |
5681 | mutex_unlock(&il->mutex); | |
be663ab6 WYG |
5682 | } |
5683 | } | |
5684 | ||
e7392364 SG |
5685 | static void |
5686 | il4965_bg_rx_replenish(struct work_struct *data) | |
be663ab6 | 5687 | { |
e7392364 | 5688 | struct il_priv *il = container_of(data, struct il_priv, rx_replenish); |
be663ab6 | 5689 | |
a6766ccd | 5690 | if (test_bit(S_EXIT_PENDING, &il->status)) |
be663ab6 WYG |
5691 | return; |
5692 | ||
46bc8d4b SG |
5693 | mutex_lock(&il->mutex); |
5694 | il4965_rx_replenish(il); | |
5695 | mutex_unlock(&il->mutex); | |
be663ab6 WYG |
5696 | } |
5697 | ||
5698 | /***************************************************************************** | |
5699 | * | |
5700 | * mac80211 entry point functions | |
5701 | * | |
5702 | *****************************************************************************/ | |
5703 | ||
5704 | #define UCODE_READY_TIMEOUT (4 * HZ) | |
5705 | ||
5706 | /* | |
5707 | * Not a mac80211 entry point function, but it fits in with all the | |
5708 | * other mac80211 functions grouped here. | |
5709 | */ | |
e7392364 SG |
5710 | static int |
5711 | il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) | |
be663ab6 WYG |
5712 | { |
5713 | int ret; | |
46bc8d4b | 5714 | struct ieee80211_hw *hw = il->hw; |
be663ab6 WYG |
5715 | |
5716 | hw->rate_control_algorithm = "iwl-4965-rs"; | |
5717 | ||
5718 | /* Tell mac80211 our characteristics */ | |
e7392364 SG |
5719 | hw->flags = |
5720 | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | | |
5721 | IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT | | |
5722 | IEEE80211_HW_REPORTS_TX_ACK_STATUS; | |
be663ab6 | 5723 | |
46bc8d4b | 5724 | if (il->cfg->sku & IL_SKU_N) |
e7392364 SG |
5725 | hw->flags |= |
5726 | IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | | |
5727 | IEEE80211_HW_SUPPORTS_STATIC_SMPS; | |
be663ab6 | 5728 | |
e2ebc833 SG |
5729 | hw->sta_data_size = sizeof(struct il_station_priv); |
5730 | hw->vif_data_size = sizeof(struct il_vif_priv); | |
be663ab6 | 5731 | |
8c9c48d5 SG |
5732 | hw->wiphy->interface_modes = |
5733 | BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); | |
be663ab6 | 5734 | |
e7392364 SG |
5735 | hw->wiphy->flags |= |
5736 | WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS; | |
be663ab6 WYG |
5737 | |
5738 | /* | |
5739 | * For now, disable PS by default because it affects | |
5740 | * RX performance significantly. | |
5741 | */ | |
5742 | hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; | |
5743 | ||
5744 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; | |
5745 | /* we create the 802.11 header and a zero-length SSID element */ | |
5746 | hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2; | |
5747 | ||
5748 | /* Default value; 4 EDCA QOS priorities */ | |
5749 | hw->queues = 4; | |
5750 | ||
e2ebc833 | 5751 | hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL; |
be663ab6 | 5752 | |
46bc8d4b SG |
5753 | if (il->bands[IEEE80211_BAND_2GHZ].n_channels) |
5754 | il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = | |
e7392364 | 5755 | &il->bands[IEEE80211_BAND_2GHZ]; |
46bc8d4b SG |
5756 | if (il->bands[IEEE80211_BAND_5GHZ].n_channels) |
5757 | il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = | |
e7392364 | 5758 | &il->bands[IEEE80211_BAND_5GHZ]; |
be663ab6 | 5759 | |
46bc8d4b | 5760 | il_leds_init(il); |
be663ab6 | 5761 | |
46bc8d4b | 5762 | ret = ieee80211_register_hw(il->hw); |
be663ab6 | 5763 | if (ret) { |
9406f797 | 5764 | IL_ERR("Failed to register hw (error %d)\n", ret); |
be663ab6 WYG |
5765 | return ret; |
5766 | } | |
46bc8d4b | 5767 | il->mac80211_registered = 1; |
be663ab6 WYG |
5768 | |
5769 | return 0; | |
5770 | } | |
5771 | ||
e7392364 SG |
5772 | int |
5773 | il4965_mac_start(struct ieee80211_hw *hw) | |
be663ab6 | 5774 | { |
46bc8d4b | 5775 | struct il_priv *il = hw->priv; |
be663ab6 WYG |
5776 | int ret; |
5777 | ||
58de00a4 | 5778 | D_MAC80211("enter\n"); |
be663ab6 WYG |
5779 | |
5780 | /* we should be verifying the device is ready to be opened */ | |
46bc8d4b SG |
5781 | mutex_lock(&il->mutex); |
5782 | ret = __il4965_up(il); | |
5783 | mutex_unlock(&il->mutex); | |
be663ab6 WYG |
5784 | |
5785 | if (ret) | |
5786 | return ret; | |
5787 | ||
46bc8d4b | 5788 | if (il_is_rfkill(il)) |
be663ab6 WYG |
5789 | goto out; |
5790 | ||
58de00a4 | 5791 | D_INFO("Start UP work done.\n"); |
be663ab6 WYG |
5792 | |
5793 | /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from | |
5794 | * mac80211 will not be run successfully. */ | |
46bc8d4b | 5795 | ret = wait_event_timeout(il->wait_command_queue, |
e7392364 SG |
5796 | test_bit(S_READY, &il->status), |
5797 | UCODE_READY_TIMEOUT); | |
be663ab6 | 5798 | if (!ret) { |
a6766ccd | 5799 | if (!test_bit(S_READY, &il->status)) { |
9406f797 | 5800 | IL_ERR("START_ALIVE timeout after %dms.\n", |
be663ab6 WYG |
5801 | jiffies_to_msecs(UCODE_READY_TIMEOUT)); |
5802 | return -ETIMEDOUT; | |
5803 | } | |
5804 | } | |
5805 | ||
46bc8d4b | 5806 | il4965_led_enable(il); |
be663ab6 WYG |
5807 | |
5808 | out: | |
46bc8d4b | 5809 | il->is_open = 1; |
58de00a4 | 5810 | D_MAC80211("leave\n"); |
be663ab6 WYG |
5811 | return 0; |
5812 | } | |
5813 | ||
e7392364 SG |
5814 | void |
5815 | il4965_mac_stop(struct ieee80211_hw *hw) | |
be663ab6 | 5816 | { |
46bc8d4b | 5817 | struct il_priv *il = hw->priv; |
be663ab6 | 5818 | |
58de00a4 | 5819 | D_MAC80211("enter\n"); |
be663ab6 | 5820 | |
46bc8d4b | 5821 | if (!il->is_open) |
be663ab6 WYG |
5822 | return; |
5823 | ||
46bc8d4b | 5824 | il->is_open = 0; |
be663ab6 | 5825 | |
46bc8d4b | 5826 | il4965_down(il); |
be663ab6 | 5827 | |
46bc8d4b | 5828 | flush_workqueue(il->workqueue); |
be663ab6 | 5829 | |
a078a1fd SG |
5830 | /* User space software may expect getting rfkill changes |
5831 | * even if interface is down */ | |
841b2cca | 5832 | _il_wr(il, CSR_INT, 0xFFFFFFFF); |
46bc8d4b | 5833 | il_enable_rfkill_int(il); |
be663ab6 | 5834 | |
58de00a4 | 5835 | D_MAC80211("leave\n"); |
be663ab6 WYG |
5836 | } |
5837 | ||
e7392364 SG |
5838 | void |
5839 | il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | |
be663ab6 | 5840 | { |
46bc8d4b | 5841 | struct il_priv *il = hw->priv; |
be663ab6 | 5842 | |
58de00a4 | 5843 | D_MACDUMP("enter\n"); |
be663ab6 | 5844 | |
58de00a4 | 5845 | D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, |
e7392364 | 5846 | ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); |
be663ab6 | 5847 | |
46bc8d4b | 5848 | if (il4965_tx_skb(il, skb)) |
be663ab6 WYG |
5849 | dev_kfree_skb_any(skb); |
5850 | ||
58de00a4 | 5851 | D_MACDUMP("leave\n"); |
be663ab6 WYG |
5852 | } |
5853 | ||
e7392364 SG |
5854 | void |
5855 | il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |
5856 | struct ieee80211_key_conf *keyconf, | |
5857 | struct ieee80211_sta *sta, u32 iv32, u16 * phase1key) | |
be663ab6 | 5858 | { |
46bc8d4b | 5859 | struct il_priv *il = hw->priv; |
be663ab6 | 5860 | |
58de00a4 | 5861 | D_MAC80211("enter\n"); |
be663ab6 | 5862 | |
83007196 | 5863 | il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key); |
be663ab6 | 5864 | |
58de00a4 | 5865 | D_MAC80211("leave\n"); |
be663ab6 WYG |
5866 | } |
5867 | ||
e7392364 SG |
5868 | int |
5869 | il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |
5870 | struct ieee80211_vif *vif, struct ieee80211_sta *sta, | |
5871 | struct ieee80211_key_conf *key) | |
be663ab6 | 5872 | { |
46bc8d4b | 5873 | struct il_priv *il = hw->priv; |
be663ab6 WYG |
5874 | int ret; |
5875 | u8 sta_id; | |
5876 | bool is_default_wep_key = false; | |
5877 | ||
58de00a4 | 5878 | D_MAC80211("enter\n"); |
be663ab6 | 5879 | |
46bc8d4b | 5880 | if (il->cfg->mod_params->sw_crypto) { |
58de00a4 | 5881 | D_MAC80211("leave - hwcrypto disabled\n"); |
be663ab6 WYG |
5882 | return -EOPNOTSUPP; |
5883 | } | |
5884 | ||
83007196 | 5885 | sta_id = il_sta_id_or_broadcast(il, sta); |
e2ebc833 | 5886 | if (sta_id == IL_INVALID_STATION) |
be663ab6 WYG |
5887 | return -EINVAL; |
5888 | ||
46bc8d4b SG |
5889 | mutex_lock(&il->mutex); |
5890 | il_scan_cancel_timeout(il, 100); | |
be663ab6 WYG |
5891 | |
5892 | /* | |
5893 | * If we are getting WEP group key and we didn't receive any key mapping | |
5894 | * so far, we are in legacy wep mode (group key only), otherwise we are | |
5895 | * in 1X mode. | |
5896 | * In legacy wep mode, we use another host command to the uCode. | |
5897 | */ | |
5898 | if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || | |
e7392364 | 5899 | key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) { |
be663ab6 | 5900 | if (cmd == SET_KEY) |
d735f921 | 5901 | is_default_wep_key = !il->_4965.key_mapping_keys; |
be663ab6 WYG |
5902 | else |
5903 | is_default_wep_key = | |
e7392364 | 5904 | (key->hw_key_idx == HW_KEY_DEFAULT); |
be663ab6 WYG |
5905 | } |
5906 | ||
5907 | switch (cmd) { | |
5908 | case SET_KEY: | |
5909 | if (is_default_wep_key) | |
83007196 | 5910 | ret = il4965_set_default_wep_key(il, key); |
be663ab6 | 5911 | else |
83007196 | 5912 | ret = il4965_set_dynamic_key(il, key, sta_id); |
be663ab6 | 5913 | |
58de00a4 | 5914 | D_MAC80211("enable hwcrypto key\n"); |
be663ab6 WYG |
5915 | break; |
5916 | case DISABLE_KEY: | |
5917 | if (is_default_wep_key) | |
83007196 | 5918 | ret = il4965_remove_default_wep_key(il, key); |
be663ab6 | 5919 | else |
83007196 | 5920 | ret = il4965_remove_dynamic_key(il, key, sta_id); |
be663ab6 | 5921 | |
58de00a4 | 5922 | D_MAC80211("disable hwcrypto key\n"); |
be663ab6 WYG |
5923 | break; |
5924 | default: | |
5925 | ret = -EINVAL; | |
5926 | } | |
5927 | ||
46bc8d4b | 5928 | mutex_unlock(&il->mutex); |
58de00a4 | 5929 | D_MAC80211("leave\n"); |
be663ab6 WYG |
5930 | |
5931 | return ret; | |
5932 | } | |
5933 | ||
e7392364 SG |
5934 | int |
5935 | il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |
5936 | enum ieee80211_ampdu_mlme_action action, | |
5937 | struct ieee80211_sta *sta, u16 tid, u16 * ssn, | |
5938 | u8 buf_size) | |
be663ab6 | 5939 | { |
46bc8d4b | 5940 | struct il_priv *il = hw->priv; |
be663ab6 WYG |
5941 | int ret = -EINVAL; |
5942 | ||
e7392364 | 5943 | D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid); |
be663ab6 | 5944 | |
46bc8d4b | 5945 | if (!(il->cfg->sku & IL_SKU_N)) |
be663ab6 WYG |
5946 | return -EACCES; |
5947 | ||
46bc8d4b | 5948 | mutex_lock(&il->mutex); |
be663ab6 WYG |
5949 | |
5950 | switch (action) { | |
5951 | case IEEE80211_AMPDU_RX_START: | |
58de00a4 | 5952 | D_HT("start Rx\n"); |
46bc8d4b | 5953 | ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn); |
be663ab6 WYG |
5954 | break; |
5955 | case IEEE80211_AMPDU_RX_STOP: | |
58de00a4 | 5956 | D_HT("stop Rx\n"); |
46bc8d4b | 5957 | ret = il4965_sta_rx_agg_stop(il, sta, tid); |
a6766ccd | 5958 | if (test_bit(S_EXIT_PENDING, &il->status)) |
be663ab6 WYG |
5959 | ret = 0; |
5960 | break; | |
5961 | case IEEE80211_AMPDU_TX_START: | |
58de00a4 | 5962 | D_HT("start Tx\n"); |
46bc8d4b | 5963 | ret = il4965_tx_agg_start(il, vif, sta, tid, ssn); |
be663ab6 WYG |
5964 | break; |
5965 | case IEEE80211_AMPDU_TX_STOP: | |
58de00a4 | 5966 | D_HT("stop Tx\n"); |
46bc8d4b | 5967 | ret = il4965_tx_agg_stop(il, vif, sta, tid); |
a6766ccd | 5968 | if (test_bit(S_EXIT_PENDING, &il->status)) |
be663ab6 WYG |
5969 | ret = 0; |
5970 | break; | |
5971 | case IEEE80211_AMPDU_TX_OPERATIONAL: | |
5972 | ret = 0; | |
5973 | break; | |
5974 | } | |
46bc8d4b | 5975 | mutex_unlock(&il->mutex); |
be663ab6 WYG |
5976 | |
5977 | return ret; | |
5978 | } | |
5979 | ||
e7392364 SG |
5980 | int |
5981 | il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |
5982 | struct ieee80211_sta *sta) | |
be663ab6 | 5983 | { |
46bc8d4b | 5984 | struct il_priv *il = hw->priv; |
e2ebc833 | 5985 | struct il_station_priv *sta_priv = (void *)sta->drv_priv; |
be663ab6 WYG |
5986 | bool is_ap = vif->type == NL80211_IFTYPE_STATION; |
5987 | int ret; | |
5988 | u8 sta_id; | |
5989 | ||
e7392364 | 5990 | D_INFO("received request to add station %pM\n", sta->addr); |
46bc8d4b | 5991 | mutex_lock(&il->mutex); |
e7392364 | 5992 | D_INFO("proceeding to add station %pM\n", sta->addr); |
e2ebc833 | 5993 | sta_priv->common.sta_id = IL_INVALID_STATION; |
be663ab6 WYG |
5994 | |
5995 | atomic_set(&sta_priv->pending_frames, 0); | |
5996 | ||
e7392364 | 5997 | ret = |
83007196 | 5998 | il_add_station_common(il, sta->addr, is_ap, sta, &sta_id); |
be663ab6 | 5999 | if (ret) { |
e7392364 | 6000 | IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret); |
be663ab6 | 6001 | /* Should we return success if return code is EEXIST ? */ |
46bc8d4b | 6002 | mutex_unlock(&il->mutex); |
be663ab6 WYG |
6003 | return ret; |
6004 | } | |
6005 | ||
6006 | sta_priv->common.sta_id = sta_id; | |
6007 | ||
6008 | /* Initialize rate scaling */ | |
e7392364 | 6009 | D_INFO("Initializing rate scaling for station %pM\n", sta->addr); |
46bc8d4b SG |
6010 | il4965_rs_rate_init(il, sta, sta_id); |
6011 | mutex_unlock(&il->mutex); | |
be663ab6 WYG |
6012 | |
6013 | return 0; | |
6014 | } | |
6015 | ||
e7392364 SG |
6016 | void |
6017 | il4965_mac_channel_switch(struct ieee80211_hw *hw, | |
6018 | struct ieee80211_channel_switch *ch_switch) | |
be663ab6 | 6019 | { |
46bc8d4b | 6020 | struct il_priv *il = hw->priv; |
e2ebc833 | 6021 | const struct il_channel_info *ch_info; |
be663ab6 WYG |
6022 | struct ieee80211_conf *conf = &hw->conf; |
6023 | struct ieee80211_channel *channel = ch_switch->channel; | |
46bc8d4b | 6024 | struct il_ht_config *ht_conf = &il->current_ht_config; |
be663ab6 | 6025 | u16 ch; |
be663ab6 | 6026 | |
58de00a4 | 6027 | D_MAC80211("enter\n"); |
be663ab6 | 6028 | |
46bc8d4b | 6029 | mutex_lock(&il->mutex); |
28a6e577 | 6030 | |
46bc8d4b | 6031 | if (il_is_rfkill(il)) |
28a6e577 | 6032 | goto out; |
be663ab6 | 6033 | |
a6766ccd SG |
6034 | if (test_bit(S_EXIT_PENDING, &il->status) || |
6035 | test_bit(S_SCANNING, &il->status) || | |
6036 | test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) | |
28a6e577 | 6037 | goto out; |
be663ab6 | 6038 | |
c8b03958 | 6039 | if (!il_is_associated(il)) |
28a6e577 | 6040 | goto out; |
be663ab6 | 6041 | |
1600b875 | 6042 | if (!il->ops->set_channel_switch) |
7f1f9742 | 6043 | goto out; |
be663ab6 | 6044 | |
7f1f9742 | 6045 | ch = channel->hw_value; |
c8b03958 | 6046 | if (le16_to_cpu(il->active.channel) == ch) |
7f1f9742 SG |
6047 | goto out; |
6048 | ||
46bc8d4b | 6049 | ch_info = il_get_channel_info(il, channel->band, ch); |
e2ebc833 | 6050 | if (!il_is_channel_valid(ch_info)) { |
58de00a4 | 6051 | D_MAC80211("invalid channel\n"); |
7f1f9742 SG |
6052 | goto out; |
6053 | } | |
6054 | ||
46bc8d4b | 6055 | spin_lock_irq(&il->lock); |
7f1f9742 | 6056 | |
46bc8d4b | 6057 | il->current_ht_config.smps = conf->smps_mode; |
7f1f9742 SG |
6058 | |
6059 | /* Configure HT40 channels */ | |
1c03c462 SG |
6060 | il->ht.enabled = conf_is_ht(conf); |
6061 | if (il->ht.enabled) { | |
7f1f9742 | 6062 | if (conf_is_ht40_minus(conf)) { |
1c03c462 | 6063 | il->ht.extension_chan_offset = |
e7392364 | 6064 | IEEE80211_HT_PARAM_CHA_SEC_BELOW; |
1c03c462 | 6065 | il->ht.is_40mhz = true; |
7f1f9742 | 6066 | } else if (conf_is_ht40_plus(conf)) { |
1c03c462 | 6067 | il->ht.extension_chan_offset = |
e7392364 | 6068 | IEEE80211_HT_PARAM_CHA_SEC_ABOVE; |
1c03c462 | 6069 | il->ht.is_40mhz = true; |
7f1f9742 | 6070 | } else { |
1c03c462 | 6071 | il->ht.extension_chan_offset = |
e7392364 | 6072 | IEEE80211_HT_PARAM_CHA_SEC_NONE; |
1c03c462 | 6073 | il->ht.is_40mhz = false; |
be663ab6 | 6074 | } |
7f1f9742 | 6075 | } else |
1c03c462 | 6076 | il->ht.is_40mhz = false; |
7f1f9742 | 6077 | |
c8b03958 SG |
6078 | if ((le16_to_cpu(il->staging.channel) != ch)) |
6079 | il->staging.flags = 0; | |
7f1f9742 | 6080 | |
83007196 | 6081 | il_set_rxon_channel(il, channel); |
46bc8d4b | 6082 | il_set_rxon_ht(il, ht_conf); |
83007196 | 6083 | il_set_flags_for_band(il, channel->band, il->vif); |
7f1f9742 | 6084 | |
46bc8d4b | 6085 | spin_unlock_irq(&il->lock); |
7f1f9742 | 6086 | |
46bc8d4b | 6087 | il_set_rate(il); |
7f1f9742 SG |
6088 | /* |
6089 | * at this point, staging_rxon has the | |
6090 | * configuration for channel switch | |
6091 | */ | |
a6766ccd | 6092 | set_bit(S_CHANNEL_SWITCH_PENDING, &il->status); |
46bc8d4b | 6093 | il->switch_channel = cpu_to_le16(ch); |
1600b875 | 6094 | if (il->ops->set_channel_switch(il, ch_switch)) { |
a6766ccd | 6095 | clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status); |
46bc8d4b | 6096 | il->switch_channel = 0; |
83007196 | 6097 | ieee80211_chswitch_done(il->vif, false); |
be663ab6 | 6098 | } |
7f1f9742 | 6099 | |
be663ab6 | 6100 | out: |
46bc8d4b | 6101 | mutex_unlock(&il->mutex); |
58de00a4 | 6102 | D_MAC80211("leave\n"); |
be663ab6 WYG |
6103 | } |
6104 | ||
e7392364 SG |
6105 | void |
6106 | il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, | |
6107 | unsigned int *total_flags, u64 multicast) | |
be663ab6 | 6108 | { |
46bc8d4b | 6109 | struct il_priv *il = hw->priv; |
be663ab6 | 6110 | __le32 filter_or = 0, filter_nand = 0; |
be663ab6 WYG |
6111 | |
6112 | #define CHK(test, flag) do { \ | |
6113 | if (*total_flags & (test)) \ | |
6114 | filter_or |= (flag); \ | |
6115 | else \ | |
6116 | filter_nand |= (flag); \ | |
6117 | } while (0) | |
6118 | ||
e7392364 SG |
6119 | D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags, |
6120 | *total_flags); | |
be663ab6 WYG |
6121 | |
6122 | CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); | |
6123 | /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ | |
6124 | CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); | |
6125 | CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); | |
6126 | ||
6127 | #undef CHK | |
6128 | ||
46bc8d4b | 6129 | mutex_lock(&il->mutex); |
be663ab6 | 6130 | |
c8b03958 SG |
6131 | il->staging.filter_flags &= ~filter_nand; |
6132 | il->staging.filter_flags |= filter_or; | |
be663ab6 | 6133 | |
17d6e557 SG |
6134 | /* |
6135 | * Not committing directly because hardware can perform a scan, | |
6136 | * but we'll eventually commit the filter flags change anyway. | |
6137 | */ | |
be663ab6 | 6138 | |
46bc8d4b | 6139 | mutex_unlock(&il->mutex); |
be663ab6 WYG |
6140 | |
6141 | /* | |
6142 | * Receiving all multicast frames is always enabled by the | |
e2ebc833 | 6143 | * default flags setup in il_connection_init_rx_config() |
be663ab6 WYG |
6144 | * since we currently do not support programming multicast |
6145 | * filters into the device. | |
6146 | */ | |
e7392364 SG |
6147 | *total_flags &= |
6148 | FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | | |
6149 | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; | |
be663ab6 WYG |
6150 | } |
6151 | ||
6152 | /***************************************************************************** | |
6153 | * | |
6154 | * driver setup and teardown | |
6155 | * | |
6156 | *****************************************************************************/ | |
6157 | ||
e7392364 SG |
6158 | static void |
6159 | il4965_bg_txpower_work(struct work_struct *work) | |
be663ab6 | 6160 | { |
46bc8d4b | 6161 | struct il_priv *il = container_of(work, struct il_priv, |
e7392364 | 6162 | txpower_work); |
be663ab6 | 6163 | |
46bc8d4b | 6164 | mutex_lock(&il->mutex); |
f325757a | 6165 | |
be663ab6 | 6166 | /* If a scan happened to start before we got here |
ebf0d90d | 6167 | * then just return; the stats notification will |
be663ab6 WYG |
6168 | * kick off another scheduled work to compensate for |
6169 | * any temperature delta we missed here. */ | |
a6766ccd SG |
6170 | if (test_bit(S_EXIT_PENDING, &il->status) || |
6171 | test_bit(S_SCANNING, &il->status)) | |
f325757a | 6172 | goto out; |
be663ab6 WYG |
6173 | |
6174 | /* Regardless of if we are associated, we must reconfigure the | |
6175 | * TX power since frames can be sent on non-radar channels while | |
6176 | * not associated */ | |
1600b875 | 6177 | il->ops->send_tx_power(il); |
be663ab6 WYG |
6178 | |
6179 | /* Update last_temperature to keep is_calib_needed from running | |
6180 | * when it isn't needed... */ | |
46bc8d4b | 6181 | il->last_temperature = il->temperature; |
f325757a | 6182 | out: |
46bc8d4b | 6183 | mutex_unlock(&il->mutex); |
be663ab6 WYG |
6184 | } |
6185 | ||
e7392364 SG |
6186 | static void |
6187 | il4965_setup_deferred_work(struct il_priv *il) | |
be663ab6 | 6188 | { |
46bc8d4b | 6189 | il->workqueue = create_singlethread_workqueue(DRV_NAME); |
be663ab6 | 6190 | |
46bc8d4b | 6191 | init_waitqueue_head(&il->wait_command_queue); |
be663ab6 | 6192 | |
46bc8d4b SG |
6193 | INIT_WORK(&il->restart, il4965_bg_restart); |
6194 | INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish); | |
6195 | INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work); | |
6196 | INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start); | |
6197 | INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start); | |
be663ab6 | 6198 | |
46bc8d4b | 6199 | il_setup_scan_deferred_work(il); |
be663ab6 | 6200 | |
46bc8d4b | 6201 | INIT_WORK(&il->txpower_work, il4965_bg_txpower_work); |
be663ab6 | 6202 | |
ebf0d90d SG |
6203 | init_timer(&il->stats_periodic); |
6204 | il->stats_periodic.data = (unsigned long)il; | |
6205 | il->stats_periodic.function = il4965_bg_stats_periodic; | |
be663ab6 | 6206 | |
46bc8d4b SG |
6207 | init_timer(&il->watchdog); |
6208 | il->watchdog.data = (unsigned long)il; | |
6209 | il->watchdog.function = il_bg_watchdog; | |
be663ab6 | 6210 | |
e7392364 SG |
6211 | tasklet_init(&il->irq_tasklet, |
6212 | (void (*)(unsigned long))il4965_irq_tasklet, | |
6213 | (unsigned long)il); | |
be663ab6 WYG |
6214 | } |
6215 | ||
e7392364 SG |
6216 | static void |
6217 | il4965_cancel_deferred_work(struct il_priv *il) | |
be663ab6 | 6218 | { |
46bc8d4b SG |
6219 | cancel_work_sync(&il->txpower_work); |
6220 | cancel_delayed_work_sync(&il->init_alive_start); | |
6221 | cancel_delayed_work(&il->alive_start); | |
6222 | cancel_work_sync(&il->run_time_calib_work); | |
be663ab6 | 6223 | |
46bc8d4b | 6224 | il_cancel_scan_deferred_work(il); |
be663ab6 | 6225 | |
ebf0d90d | 6226 | del_timer_sync(&il->stats_periodic); |
be663ab6 WYG |
6227 | } |
6228 | ||
e7392364 SG |
6229 | static void |
6230 | il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates) | |
be663ab6 WYG |
6231 | { |
6232 | int i; | |
6233 | ||
2eb05816 | 6234 | for (i = 0; i < RATE_COUNT_LEGACY; i++) { |
d2ddf621 | 6235 | rates[i].bitrate = il_rates[i].ieee * 5; |
e7392364 | 6236 | rates[i].hw_value = i; /* Rate scaling will work on idxes */ |
be663ab6 WYG |
6237 | rates[i].hw_value_short = i; |
6238 | rates[i].flags = 0; | |
e2ebc833 | 6239 | if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) { |
be663ab6 WYG |
6240 | /* |
6241 | * If CCK != 1M then set short preamble rate flag. | |
6242 | */ | |
6243 | rates[i].flags |= | |
e7392364 SG |
6244 | (il_rates[i].plcp == |
6245 | RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE; | |
be663ab6 WYG |
6246 | } |
6247 | } | |
6248 | } | |
e7392364 | 6249 | |
be663ab6 | 6250 | /* |
46bc8d4b | 6251 | * Acquire il->lock before calling this function ! |
be663ab6 | 6252 | */ |
e7392364 SG |
6253 | void |
6254 | il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx) | |
be663ab6 | 6255 | { |
e7392364 | 6256 | il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8)); |
0c2c8852 | 6257 | il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx); |
be663ab6 WYG |
6258 | } |
6259 | ||
e7392364 SG |
6260 | void |
6261 | il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq, | |
6262 | int tx_fifo_id, int scd_retry) | |
be663ab6 WYG |
6263 | { |
6264 | int txq_id = txq->q.id; | |
6265 | ||
6266 | /* Find out whether to activate Tx queue */ | |
46bc8d4b | 6267 | int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0; |
be663ab6 WYG |
6268 | |
6269 | /* Set up and activate */ | |
d3175167 | 6270 | il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id), |
1722f8e1 SG |
6271 | (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | |
6272 | (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) | | |
6273 | (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) | | |
6274 | (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) | | |
6275 | IL49_SCD_QUEUE_STTS_REG_MSK); | |
be663ab6 WYG |
6276 | |
6277 | txq->sched_retry = scd_retry; | |
6278 | ||
e7392364 SG |
6279 | D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate", |
6280 | scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); | |
be663ab6 WYG |
6281 | } |
6282 | ||
c39ae9fd SG |
6283 | const struct ieee80211_ops il4965_mac_ops = { |
6284 | .tx = il4965_mac_tx, | |
6285 | .start = il4965_mac_start, | |
6286 | .stop = il4965_mac_stop, | |
6287 | .add_interface = il_mac_add_interface, | |
6288 | .remove_interface = il_mac_remove_interface, | |
6289 | .change_interface = il_mac_change_interface, | |
6290 | .config = il_mac_config, | |
6291 | .configure_filter = il4965_configure_filter, | |
6292 | .set_key = il4965_mac_set_key, | |
6293 | .update_tkip_key = il4965_mac_update_tkip_key, | |
6294 | .conf_tx = il_mac_conf_tx, | |
6295 | .reset_tsf = il_mac_reset_tsf, | |
6296 | .bss_info_changed = il_mac_bss_info_changed, | |
6297 | .ampdu_action = il4965_mac_ampdu_action, | |
6298 | .hw_scan = il_mac_hw_scan, | |
6299 | .sta_add = il4965_mac_sta_add, | |
6300 | .sta_remove = il_mac_sta_remove, | |
6301 | .channel_switch = il4965_mac_channel_switch, | |
6302 | .tx_last_beacon = il_mac_tx_last_beacon, | |
6303 | }; | |
6304 | ||
e7392364 SG |
6305 | static int |
6306 | il4965_init_drv(struct il_priv *il) | |
be663ab6 WYG |
6307 | { |
6308 | int ret; | |
6309 | ||
46bc8d4b SG |
6310 | spin_lock_init(&il->sta_lock); |
6311 | spin_lock_init(&il->hcmd_lock); | |
be663ab6 | 6312 | |
46bc8d4b | 6313 | INIT_LIST_HEAD(&il->free_frames); |
be663ab6 | 6314 | |
46bc8d4b | 6315 | mutex_init(&il->mutex); |
be663ab6 | 6316 | |
46bc8d4b SG |
6317 | il->ieee_channels = NULL; |
6318 | il->ieee_rates = NULL; | |
6319 | il->band = IEEE80211_BAND_2GHZ; | |
be663ab6 | 6320 | |
46bc8d4b SG |
6321 | il->iw_mode = NL80211_IFTYPE_STATION; |
6322 | il->current_ht_config.smps = IEEE80211_SMPS_STATIC; | |
6323 | il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF; | |
be663ab6 WYG |
6324 | |
6325 | /* initialize force reset */ | |
46bc8d4b | 6326 | il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD; |
be663ab6 WYG |
6327 | |
6328 | /* Choose which receivers/antennas to use */ | |
c9363551 SG |
6329 | if (il->ops->set_rxon_chain) |
6330 | il->ops->set_rxon_chain(il); | |
be663ab6 | 6331 | |
46bc8d4b | 6332 | il_init_scan_params(il); |
be663ab6 | 6333 | |
46bc8d4b | 6334 | ret = il_init_channel_map(il); |
be663ab6 | 6335 | if (ret) { |
9406f797 | 6336 | IL_ERR("initializing regulatory failed: %d\n", ret); |
be663ab6 WYG |
6337 | goto err; |
6338 | } | |
6339 | ||
46bc8d4b | 6340 | ret = il_init_geos(il); |
be663ab6 | 6341 | if (ret) { |
9406f797 | 6342 | IL_ERR("initializing geos failed: %d\n", ret); |
be663ab6 WYG |
6343 | goto err_free_channel_map; |
6344 | } | |
46bc8d4b | 6345 | il4965_init_hw_rates(il, il->ieee_rates); |
be663ab6 WYG |
6346 | |
6347 | return 0; | |
6348 | ||
6349 | err_free_channel_map: | |
46bc8d4b | 6350 | il_free_channel_map(il); |
be663ab6 WYG |
6351 | err: |
6352 | return ret; | |
6353 | } | |
6354 | ||
e7392364 SG |
6355 | static void |
6356 | il4965_uninit_drv(struct il_priv *il) | |
be663ab6 | 6357 | { |
46bc8d4b SG |
6358 | il4965_calib_free_results(il); |
6359 | il_free_geos(il); | |
6360 | il_free_channel_map(il); | |
6361 | kfree(il->scan_cmd); | |
be663ab6 WYG |
6362 | } |
6363 | ||
e7392364 SG |
6364 | static void |
6365 | il4965_hw_detect(struct il_priv *il) | |
be663ab6 | 6366 | { |
841b2cca SG |
6367 | il->hw_rev = _il_rd(il, CSR_HW_REV); |
6368 | il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG); | |
46bc8d4b | 6369 | il->rev_id = il->pci_dev->revision; |
58de00a4 | 6370 | D_INFO("HW Revision ID = 0x%X\n", il->rev_id); |
be663ab6 WYG |
6371 | } |
6372 | ||
1023f3bc SG |
6373 | static struct il_sensitivity_ranges il4965_sensitivity = { |
6374 | .min_nrg_cck = 97, | |
6375 | .max_nrg_cck = 0, /* not used, set to 0 */ | |
6376 | ||
6377 | .auto_corr_min_ofdm = 85, | |
6378 | .auto_corr_min_ofdm_mrc = 170, | |
6379 | .auto_corr_min_ofdm_x1 = 105, | |
6380 | .auto_corr_min_ofdm_mrc_x1 = 220, | |
6381 | ||
6382 | .auto_corr_max_ofdm = 120, | |
6383 | .auto_corr_max_ofdm_mrc = 210, | |
6384 | .auto_corr_max_ofdm_x1 = 140, | |
6385 | .auto_corr_max_ofdm_mrc_x1 = 270, | |
6386 | ||
6387 | .auto_corr_min_cck = 125, | |
6388 | .auto_corr_max_cck = 200, | |
6389 | .auto_corr_min_cck_mrc = 200, | |
6390 | .auto_corr_max_cck_mrc = 400, | |
6391 | ||
6392 | .nrg_th_cck = 100, | |
6393 | .nrg_th_ofdm = 100, | |
6394 | ||
6395 | .barker_corr_th_min = 190, | |
6396 | .barker_corr_th_min_mrc = 390, | |
6397 | .nrg_th_cca = 62, | |
6398 | }; | |
6399 | ||
6400 | static void | |
e7392364 | 6401 | il4965_set_hw_params(struct il_priv *il) |
be663ab6 | 6402 | { |
b16db50a | 6403 | il->hw_params.bcast_id = IL4965_BROADCAST_ID; |
46bc8d4b SG |
6404 | il->hw_params.max_rxq_size = RX_QUEUE_SIZE; |
6405 | il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; | |
6406 | if (il->cfg->mod_params->amsdu_size_8K) | |
6407 | il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K); | |
be663ab6 | 6408 | else |
46bc8d4b | 6409 | il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K); |
be663ab6 | 6410 | |
46bc8d4b | 6411 | il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL; |
be663ab6 | 6412 | |
46bc8d4b SG |
6413 | if (il->cfg->mod_params->disable_11n) |
6414 | il->cfg->sku &= ~IL_SKU_N; | |
be663ab6 | 6415 | |
1023f3bc SG |
6416 | if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES && |
6417 | il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES) | |
6418 | il->cfg->num_of_queues = | |
6419 | il->cfg->mod_params->num_of_queues; | |
6420 | ||
6421 | il->hw_params.max_txq_num = il->cfg->num_of_queues; | |
6422 | il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; | |
6423 | il->hw_params.scd_bc_tbls_size = | |
6424 | il->cfg->num_of_queues * | |
6425 | sizeof(struct il4965_scd_bc_tbl); | |
6426 | ||
6427 | il->hw_params.tfd_size = sizeof(struct il_tfd); | |
6428 | il->hw_params.max_stations = IL4965_STATION_COUNT; | |
6429 | il->hw_params.max_data_size = IL49_RTC_DATA_SIZE; | |
6430 | il->hw_params.max_inst_size = IL49_RTC_INST_SIZE; | |
6431 | il->hw_params.max_bsm_size = BSM_SRAM_SIZE; | |
6432 | il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ); | |
6433 | ||
6434 | il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR; | |
6435 | ||
6436 | il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant); | |
6437 | il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant); | |
6438 | il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant; | |
6439 | il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant; | |
6440 | ||
6441 | il->hw_params.ct_kill_threshold = | |
6442 | CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY); | |
6443 | ||
6444 | il->hw_params.sens = &il4965_sensitivity; | |
6445 | il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS; | |
be663ab6 WYG |
6446 | } |
6447 | ||
be663ab6 | 6448 | static int |
e2ebc833 | 6449 | il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
be663ab6 | 6450 | { |
7c2cde2e | 6451 | int err = 0; |
46bc8d4b | 6452 | struct il_priv *il; |
be663ab6 | 6453 | struct ieee80211_hw *hw; |
e2ebc833 | 6454 | struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data); |
be663ab6 WYG |
6455 | unsigned long flags; |
6456 | u16 pci_cmd; | |
6457 | ||
6458 | /************************ | |
6459 | * 1. Allocating HW data | |
6460 | ************************/ | |
6461 | ||
c39ae9fd | 6462 | hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops); |
be663ab6 WYG |
6463 | if (!hw) { |
6464 | err = -ENOMEM; | |
6465 | goto out; | |
6466 | } | |
46bc8d4b | 6467 | il = hw->priv; |
c39ae9fd | 6468 | il->hw = hw; |
be663ab6 WYG |
6469 | SET_IEEE80211_DEV(hw, &pdev->dev); |
6470 | ||
58de00a4 | 6471 | D_INFO("*** LOAD DRIVER ***\n"); |
46bc8d4b | 6472 | il->cfg = cfg; |
c39ae9fd | 6473 | il->ops = &il4965_ops; |
93b7654e SG |
6474 | #ifdef CONFIG_IWLEGACY_DEBUGFS |
6475 | il->debugfs_ops = &il4965_debugfs_ops; | |
6476 | #endif | |
46bc8d4b SG |
6477 | il->pci_dev = pdev; |
6478 | il->inta_mask = CSR_INI_SET_MASK; | |
be663ab6 | 6479 | |
be663ab6 WYG |
6480 | /************************** |
6481 | * 2. Initializing PCI bus | |
6482 | **************************/ | |
e7392364 SG |
6483 | pci_disable_link_state(pdev, |
6484 | PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | |
6485 | PCIE_LINK_STATE_CLKPM); | |
be663ab6 WYG |
6486 | |
6487 | if (pci_enable_device(pdev)) { | |
6488 | err = -ENODEV; | |
6489 | goto out_ieee80211_free_hw; | |
6490 | } | |
6491 | ||
6492 | pci_set_master(pdev); | |
6493 | ||
6494 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); | |
6495 | if (!err) | |
6496 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); | |
6497 | if (err) { | |
6498 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
6499 | if (!err) | |
e7392364 SG |
6500 | err = |
6501 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | |
be663ab6 WYG |
6502 | /* both attempts failed: */ |
6503 | if (err) { | |
9406f797 | 6504 | IL_WARN("No suitable DMA available.\n"); |
be663ab6 WYG |
6505 | goto out_pci_disable_device; |
6506 | } | |
6507 | } | |
6508 | ||
6509 | err = pci_request_regions(pdev, DRV_NAME); | |
6510 | if (err) | |
6511 | goto out_pci_disable_device; | |
6512 | ||
46bc8d4b | 6513 | pci_set_drvdata(pdev, il); |
be663ab6 | 6514 | |
be663ab6 WYG |
6515 | /*********************** |
6516 | * 3. Read REV register | |
6517 | ***********************/ | |
a5f16137 | 6518 | il->hw_base = pci_ioremap_bar(pdev, 0); |
46bc8d4b | 6519 | if (!il->hw_base) { |
be663ab6 WYG |
6520 | err = -ENODEV; |
6521 | goto out_pci_release_regions; | |
6522 | } | |
6523 | ||
58de00a4 | 6524 | D_INFO("pci_resource_len = 0x%08llx\n", |
e7392364 | 6525 | (unsigned long long)pci_resource_len(pdev, 0)); |
58de00a4 | 6526 | D_INFO("pci_resource_base = %p\n", il->hw_base); |
be663ab6 WYG |
6527 | |
6528 | /* these spin locks will be used in apm_ops.init and EEPROM access | |
6529 | * we should init now | |
6530 | */ | |
46bc8d4b SG |
6531 | spin_lock_init(&il->reg_lock); |
6532 | spin_lock_init(&il->lock); | |
be663ab6 WYG |
6533 | |
6534 | /* | |
6535 | * stop and reset the on-board processor just in case it is in a | |
6536 | * strange state ... like being left stranded by a primary kernel | |
6537 | * and this is now the kdump kernel trying to start up | |
6538 | */ | |
841b2cca | 6539 | _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); |
be663ab6 | 6540 | |
46bc8d4b | 6541 | il4965_hw_detect(il); |
e7392364 | 6542 | IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev); |
be663ab6 WYG |
6543 | |
6544 | /* We disable the RETRY_TIMEOUT register (0x41) to keep | |
6545 | * PCI Tx retries from interfering with C3 CPU state */ | |
6546 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); | |
6547 | ||
46bc8d4b SG |
6548 | il4965_prepare_card_hw(il); |
6549 | if (!il->hw_ready) { | |
9406f797 | 6550 | IL_WARN("Failed, HW not ready\n"); |
be663ab6 WYG |
6551 | goto out_iounmap; |
6552 | } | |
6553 | ||
6554 | /***************** | |
6555 | * 4. Read EEPROM | |
6556 | *****************/ | |
6557 | /* Read the EEPROM */ | |
46bc8d4b | 6558 | err = il_eeprom_init(il); |
be663ab6 | 6559 | if (err) { |
9406f797 | 6560 | IL_ERR("Unable to init EEPROM\n"); |
be663ab6 WYG |
6561 | goto out_iounmap; |
6562 | } | |
46bc8d4b | 6563 | err = il4965_eeprom_check_version(il); |
be663ab6 WYG |
6564 | if (err) |
6565 | goto out_free_eeprom; | |
6566 | ||
6567 | if (err) | |
6568 | goto out_free_eeprom; | |
6569 | ||
6570 | /* extract MAC Address */ | |
46bc8d4b | 6571 | il4965_eeprom_get_mac(il, il->addresses[0].addr); |
58de00a4 | 6572 | D_INFO("MAC address: %pM\n", il->addresses[0].addr); |
46bc8d4b SG |
6573 | il->hw->wiphy->addresses = il->addresses; |
6574 | il->hw->wiphy->n_addresses = 1; | |
be663ab6 WYG |
6575 | |
6576 | /************************ | |
6577 | * 5. Setup HW constants | |
6578 | ************************/ | |
1023f3bc | 6579 | il4965_set_hw_params(il); |
be663ab6 WYG |
6580 | |
6581 | /******************* | |
46bc8d4b | 6582 | * 6. Setup il |
be663ab6 WYG |
6583 | *******************/ |
6584 | ||
46bc8d4b | 6585 | err = il4965_init_drv(il); |
be663ab6 WYG |
6586 | if (err) |
6587 | goto out_free_eeprom; | |
46bc8d4b | 6588 | /* At this point both hw and il are initialized. */ |
be663ab6 WYG |
6589 | |
6590 | /******************** | |
6591 | * 7. Setup services | |
6592 | ********************/ | |
46bc8d4b SG |
6593 | spin_lock_irqsave(&il->lock, flags); |
6594 | il_disable_interrupts(il); | |
6595 | spin_unlock_irqrestore(&il->lock, flags); | |
be663ab6 | 6596 | |
46bc8d4b | 6597 | pci_enable_msi(il->pci_dev); |
be663ab6 | 6598 | |
e7392364 | 6599 | err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il); |
be663ab6 | 6600 | if (err) { |
9406f797 | 6601 | IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq); |
be663ab6 WYG |
6602 | goto out_disable_msi; |
6603 | } | |
6604 | ||
46bc8d4b | 6605 | il4965_setup_deferred_work(il); |
d0c72347 | 6606 | il4965_setup_handlers(il); |
be663ab6 WYG |
6607 | |
6608 | /********************************************* | |
6609 | * 8. Enable interrupts and read RFKILL state | |
6610 | *********************************************/ | |
6611 | ||
a078a1fd | 6612 | /* enable rfkill interrupt: hw bug w/a */ |
46bc8d4b | 6613 | pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd); |
be663ab6 WYG |
6614 | if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { |
6615 | pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; | |
46bc8d4b | 6616 | pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd); |
be663ab6 WYG |
6617 | } |
6618 | ||
46bc8d4b | 6619 | il_enable_rfkill_int(il); |
be663ab6 WYG |
6620 | |
6621 | /* If platform's RF_KILL switch is NOT set to KILL */ | |
e7392364 | 6622 | if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) |
a6766ccd | 6623 | clear_bit(S_RF_KILL_HW, &il->status); |
be663ab6 | 6624 | else |
a6766ccd | 6625 | set_bit(S_RF_KILL_HW, &il->status); |
be663ab6 | 6626 | |
46bc8d4b | 6627 | wiphy_rfkill_set_hw_state(il->hw->wiphy, |
e7392364 | 6628 | test_bit(S_RF_KILL_HW, &il->status)); |
be663ab6 | 6629 | |
46bc8d4b | 6630 | il_power_initialize(il); |
be663ab6 | 6631 | |
46bc8d4b | 6632 | init_completion(&il->_4965.firmware_loading_complete); |
be663ab6 | 6633 | |
46bc8d4b | 6634 | err = il4965_request_firmware(il, true); |
be663ab6 WYG |
6635 | if (err) |
6636 | goto out_destroy_workqueue; | |
6637 | ||
6638 | return 0; | |
6639 | ||
e7392364 | 6640 | out_destroy_workqueue: |
46bc8d4b SG |
6641 | destroy_workqueue(il->workqueue); |
6642 | il->workqueue = NULL; | |
6643 | free_irq(il->pci_dev->irq, il); | |
e7392364 | 6644 | out_disable_msi: |
46bc8d4b SG |
6645 | pci_disable_msi(il->pci_dev); |
6646 | il4965_uninit_drv(il); | |
e7392364 | 6647 | out_free_eeprom: |
46bc8d4b | 6648 | il_eeprom_free(il); |
e7392364 | 6649 | out_iounmap: |
a5f16137 | 6650 | iounmap(il->hw_base); |
e7392364 | 6651 | out_pci_release_regions: |
be663ab6 WYG |
6652 | pci_set_drvdata(pdev, NULL); |
6653 | pci_release_regions(pdev); | |
e7392364 | 6654 | out_pci_disable_device: |
be663ab6 | 6655 | pci_disable_device(pdev); |
e7392364 | 6656 | out_ieee80211_free_hw: |
46bc8d4b | 6657 | ieee80211_free_hw(il->hw); |
e7392364 | 6658 | out: |
be663ab6 WYG |
6659 | return err; |
6660 | } | |
6661 | ||
e7392364 SG |
6662 | static void __devexit |
6663 | il4965_pci_remove(struct pci_dev *pdev) | |
be663ab6 | 6664 | { |
46bc8d4b | 6665 | struct il_priv *il = pci_get_drvdata(pdev); |
be663ab6 WYG |
6666 | unsigned long flags; |
6667 | ||
46bc8d4b | 6668 | if (!il) |
be663ab6 WYG |
6669 | return; |
6670 | ||
46bc8d4b | 6671 | wait_for_completion(&il->_4965.firmware_loading_complete); |
be663ab6 | 6672 | |
58de00a4 | 6673 | D_INFO("*** UNLOAD DRIVER ***\n"); |
be663ab6 | 6674 | |
46bc8d4b | 6675 | il_dbgfs_unregister(il); |
e2ebc833 | 6676 | sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group); |
be663ab6 | 6677 | |
e2ebc833 SG |
6678 | /* ieee80211_unregister_hw call wil cause il_mac_stop to |
6679 | * to be called and il4965_down since we are removing the device | |
a6766ccd | 6680 | * we need to set S_EXIT_PENDING bit. |
be663ab6 | 6681 | */ |
a6766ccd | 6682 | set_bit(S_EXIT_PENDING, &il->status); |
be663ab6 | 6683 | |
46bc8d4b | 6684 | il_leds_exit(il); |
be663ab6 | 6685 | |
46bc8d4b SG |
6686 | if (il->mac80211_registered) { |
6687 | ieee80211_unregister_hw(il->hw); | |
6688 | il->mac80211_registered = 0; | |
be663ab6 | 6689 | } else { |
46bc8d4b | 6690 | il4965_down(il); |
be663ab6 WYG |
6691 | } |
6692 | ||
6693 | /* | |
6694 | * Make sure device is reset to low power before unloading driver. | |
e2ebc833 SG |
6695 | * This may be redundant with il4965_down(), but there are paths to |
6696 | * run il4965_down() without calling apm_ops.stop(), and there are | |
6697 | * paths to avoid running il4965_down() at all before leaving driver. | |
be663ab6 WYG |
6698 | * This (inexpensive) call *makes sure* device is reset. |
6699 | */ | |
46bc8d4b | 6700 | il_apm_stop(il); |
be663ab6 WYG |
6701 | |
6702 | /* make sure we flush any pending irq or | |
6703 | * tasklet for the driver | |
6704 | */ | |
46bc8d4b SG |
6705 | spin_lock_irqsave(&il->lock, flags); |
6706 | il_disable_interrupts(il); | |
6707 | spin_unlock_irqrestore(&il->lock, flags); | |
be663ab6 | 6708 | |
46bc8d4b | 6709 | il4965_synchronize_irq(il); |
be663ab6 | 6710 | |
46bc8d4b | 6711 | il4965_dealloc_ucode_pci(il); |
be663ab6 | 6712 | |
46bc8d4b SG |
6713 | if (il->rxq.bd) |
6714 | il4965_rx_queue_free(il, &il->rxq); | |
6715 | il4965_hw_txq_ctx_free(il); | |
be663ab6 | 6716 | |
46bc8d4b | 6717 | il_eeprom_free(il); |
be663ab6 | 6718 | |
be663ab6 | 6719 | /*netif_stop_queue(dev); */ |
46bc8d4b | 6720 | flush_workqueue(il->workqueue); |
be663ab6 | 6721 | |
e2ebc833 | 6722 | /* ieee80211_unregister_hw calls il_mac_stop, which flushes |
46bc8d4b | 6723 | * il->workqueue... so we can't take down the workqueue |
be663ab6 | 6724 | * until now... */ |
46bc8d4b SG |
6725 | destroy_workqueue(il->workqueue); |
6726 | il->workqueue = NULL; | |
be663ab6 | 6727 | |
46bc8d4b SG |
6728 | free_irq(il->pci_dev->irq, il); |
6729 | pci_disable_msi(il->pci_dev); | |
a5f16137 | 6730 | iounmap(il->hw_base); |
be663ab6 WYG |
6731 | pci_release_regions(pdev); |
6732 | pci_disable_device(pdev); | |
6733 | pci_set_drvdata(pdev, NULL); | |
6734 | ||
46bc8d4b | 6735 | il4965_uninit_drv(il); |
be663ab6 | 6736 | |
46bc8d4b | 6737 | dev_kfree_skb(il->beacon_skb); |
be663ab6 | 6738 | |
46bc8d4b | 6739 | ieee80211_free_hw(il->hw); |
be663ab6 WYG |
6740 | } |
6741 | ||
6742 | /* | |
6743 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | |
46bc8d4b | 6744 | * must be called under il->lock and mac access |
be663ab6 | 6745 | */ |
e7392364 SG |
6746 | void |
6747 | il4965_txq_set_sched(struct il_priv *il, u32 mask) | |
be663ab6 | 6748 | { |
d3175167 | 6749 | il_wr_prph(il, IL49_SCD_TXFACT, mask); |
be663ab6 WYG |
6750 | } |
6751 | ||
6752 | /***************************************************************************** | |
6753 | * | |
6754 | * driver and module entry point | |
6755 | * | |
6756 | *****************************************************************************/ | |
6757 | ||
6758 | /* Hardware specific file defines the PCI IDs table for that hardware module */ | |
e2ebc833 | 6759 | static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = { |
e2ebc833 SG |
6760 | {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)}, |
6761 | {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)}, | |
be663ab6 WYG |
6762 | {0} |
6763 | }; | |
e2ebc833 | 6764 | MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids); |
be663ab6 | 6765 | |
e2ebc833 | 6766 | static struct pci_driver il4965_driver = { |
be663ab6 | 6767 | .name = DRV_NAME, |
e2ebc833 SG |
6768 | .id_table = il4965_hw_card_ids, |
6769 | .probe = il4965_pci_probe, | |
6770 | .remove = __devexit_p(il4965_pci_remove), | |
6771 | .driver.pm = IL_LEGACY_PM_OPS, | |
be663ab6 WYG |
6772 | }; |
6773 | ||
e7392364 SG |
6774 | static int __init |
6775 | il4965_init(void) | |
be663ab6 WYG |
6776 | { |
6777 | ||
6778 | int ret; | |
6779 | pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); | |
6780 | pr_info(DRV_COPYRIGHT "\n"); | |
6781 | ||
e2ebc833 | 6782 | ret = il4965_rate_control_register(); |
be663ab6 WYG |
6783 | if (ret) { |
6784 | pr_err("Unable to register rate control algorithm: %d\n", ret); | |
6785 | return ret; | |
6786 | } | |
6787 | ||
e2ebc833 | 6788 | ret = pci_register_driver(&il4965_driver); |
be663ab6 WYG |
6789 | if (ret) { |
6790 | pr_err("Unable to initialize PCI module\n"); | |
6791 | goto error_register; | |
6792 | } | |
6793 | ||
6794 | return ret; | |
6795 | ||
6796 | error_register: | |
e2ebc833 | 6797 | il4965_rate_control_unregister(); |
be663ab6 WYG |
6798 | return ret; |
6799 | } | |
6800 | ||
e7392364 SG |
6801 | static void __exit |
6802 | il4965_exit(void) | |
be663ab6 | 6803 | { |
e2ebc833 SG |
6804 | pci_unregister_driver(&il4965_driver); |
6805 | il4965_rate_control_unregister(); | |
be663ab6 WYG |
6806 | } |
6807 | ||
e2ebc833 SG |
6808 | module_exit(il4965_exit); |
6809 | module_init(il4965_init); | |
be663ab6 | 6810 | |
d3175167 | 6811 | #ifdef CONFIG_IWLEGACY_DEBUG |
d2ddf621 | 6812 | module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR); |
be663ab6 WYG |
6813 | MODULE_PARM_DESC(debug, "debug output mask"); |
6814 | #endif | |
6815 | ||
e2ebc833 | 6816 | module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO); |
be663ab6 | 6817 | MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); |
e2ebc833 | 6818 | module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO); |
be663ab6 | 6819 | MODULE_PARM_DESC(queues_num, "number of hw queues."); |
e2ebc833 | 6820 | module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO); |
be663ab6 | 6821 | MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); |
e7392364 SG |
6822 | module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, |
6823 | S_IRUGO); | |
be663ab6 | 6824 | MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); |
e2ebc833 | 6825 | module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO); |
be663ab6 | 6826 | MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); |