iwlwifi: removes the RUN_TIME_CALIB ifdef
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-rx.c
CommitLineData
a55360e4
TW
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31#include "iwl-eeprom.h"
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-io.h"
c1354754 36#include "iwl-calib.h"
a55360e4
TW
37#include "iwl-helpers.h"
38/************************** RX-FUNCTIONS ****************************/
39/*
40 * Rx theory of operation
41 *
42 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
43 * each of which point to Receive Buffers to be filled by the NIC. These get
44 * used not only for Rx frames, but for any command response or notification
45 * from the NIC. The driver and NIC manage the Rx buffers by means
46 * of indexes into the circular buffer.
47 *
48 * Rx Queue Indexes
49 * The host/firmware share two index registers for managing the Rx buffers.
50 *
51 * The READ index maps to the first position that the firmware may be writing
52 * to -- the driver can read up to (but not including) this position and get
53 * good data.
54 * The READ index is managed by the firmware once the card is enabled.
55 *
56 * The WRITE index maps to the last position the driver has read from -- the
57 * position preceding WRITE is the last slot the firmware can place a packet.
58 *
59 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
60 * WRITE = READ.
61 *
62 * During initialization, the host sets up the READ queue position to the first
63 * INDEX position, and WRITE to the last (READ - 1 wrapped)
64 *
65 * When the firmware places a packet in a buffer, it will advance the READ index
66 * and fire the RX interrupt. The driver can then query the READ index and
67 * process as many packets as possible, moving the WRITE index forward as it
68 * resets the Rx queue buffers with new memory.
69 *
70 * The management in the driver is as follows:
71 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
72 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
73 * to replenish the iwl->rxq->rx_free.
74 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
75 * iwl->rxq is replenished and the READ INDEX is updated (updating the
76 * 'processed' and 'read' driver indexes as well)
77 * + A received packet is processed and handed to the kernel network stack,
78 * detached from the iwl->rxq. The driver 'processed' index is updated.
79 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
80 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
81 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
82 * were enough free buffers and RX_STALLED is set it is cleared.
83 *
84 *
85 * Driver sequence:
86 *
87 * iwl_rx_queue_alloc() Allocates rx_free
88 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
89 * iwl_rx_queue_restock
90 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
91 * queue, updates firmware pointers, and updates
92 * the WRITE index. If insufficient rx_free buffers
93 * are available, schedules iwl_rx_replenish
94 *
95 * -- enable interrupts --
96 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
97 * READ INDEX, detaching the SKB from the pool.
98 * Moves the packet buffer from queue to rx_used.
99 * Calls iwl_rx_queue_restock to refill any empty
100 * slots.
101 * ...
102 *
103 */
104
105/**
106 * iwl_rx_queue_space - Return number of free slots available in queue.
107 */
108int iwl_rx_queue_space(const struct iwl_rx_queue *q)
109{
110 int s = q->read - q->write;
111 if (s <= 0)
112 s += RX_QUEUE_SIZE;
113 /* keep some buffer to not confuse full and empty queue */
114 s -= 2;
115 if (s < 0)
116 s = 0;
117 return s;
118}
119EXPORT_SYMBOL(iwl_rx_queue_space);
120
121/**
122 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
123 */
124int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
125{
126 u32 reg = 0;
127 int ret = 0;
128 unsigned long flags;
129
130 spin_lock_irqsave(&q->lock, flags);
131
132 if (q->need_update == 0)
133 goto exit_unlock;
134
135 /* If power-saving is in use, make sure device is awake */
136 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
137 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
138
139 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
140 iwl_set_bit(priv, CSR_GP_CNTRL,
141 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
142 goto exit_unlock;
143 }
144
145 ret = iwl_grab_nic_access(priv);
146 if (ret)
147 goto exit_unlock;
148
149 /* Device expects a multiple of 8 */
150 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
151 q->write & ~0x7);
152 iwl_release_nic_access(priv);
153
154 /* Else device is assumed to be awake */
155 } else
156 /* Device expects a multiple of 8 */
157 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
158
159
160 q->need_update = 0;
161
162 exit_unlock:
163 spin_unlock_irqrestore(&q->lock, flags);
164 return ret;
165}
166EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
167/**
168 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
169 */
170static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
171 dma_addr_t dma_addr)
172{
173 return cpu_to_le32((u32)(dma_addr >> 8));
174}
175
176/**
177 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
178 *
179 * If there are slots in the RX queue that need to be restocked,
180 * and we have free pre-allocated buffers, fill the ranks as much
181 * as we can, pulling from rx_free.
182 *
183 * This moves the 'write' index forward to catch up with 'processed', and
184 * also updates the memory address in the firmware to reference the new
185 * target buffer.
186 */
187int iwl_rx_queue_restock(struct iwl_priv *priv)
188{
189 struct iwl_rx_queue *rxq = &priv->rxq;
190 struct list_head *element;
191 struct iwl_rx_mem_buffer *rxb;
192 unsigned long flags;
193 int write;
194 int ret = 0;
195
196 spin_lock_irqsave(&rxq->lock, flags);
197 write = rxq->write & ~0x7;
198 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
199 /* Get next free Rx buffer, remove from free list */
200 element = rxq->rx_free.next;
201 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
202 list_del(element);
203
204 /* Point to Rx buffer via next RBD in circular buffer */
205 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
206 rxq->queue[rxq->write] = rxb;
207 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
208 rxq->free_count--;
209 }
210 spin_unlock_irqrestore(&rxq->lock, flags);
211 /* If the pre-allocated buffer pool is dropping low, schedule to
212 * refill it */
213 if (rxq->free_count <= RX_LOW_WATERMARK)
214 queue_work(priv->workqueue, &priv->rx_replenish);
215
216
217 /* If we've added more space for the firmware to place data, tell it.
218 * Increment device's write pointer in multiples of 8. */
219 if ((write != (rxq->write & ~0x7))
220 || (abs(rxq->write - rxq->read) > 7)) {
221 spin_lock_irqsave(&rxq->lock, flags);
222 rxq->need_update = 1;
223 spin_unlock_irqrestore(&rxq->lock, flags);
224 ret = iwl_rx_queue_update_write_ptr(priv, rxq);
225 }
226
227 return ret;
228}
229EXPORT_SYMBOL(iwl_rx_queue_restock);
230
231
232/**
233 * iwl_rx_replenish - Move all used packet from rx_used to rx_free
234 *
235 * When moving to rx_free an SKB is allocated for the slot.
236 *
237 * Also restock the Rx queue via iwl_rx_queue_restock.
238 * This is called as a scheduled work item (except for during initialization)
239 */
240void iwl_rx_allocate(struct iwl_priv *priv)
241{
242 struct iwl_rx_queue *rxq = &priv->rxq;
243 struct list_head *element;
244 struct iwl_rx_mem_buffer *rxb;
245 unsigned long flags;
246 spin_lock_irqsave(&rxq->lock, flags);
247 while (!list_empty(&rxq->rx_used)) {
248 element = rxq->rx_used.next;
249 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
250
251 /* Alloc a new receive buffer */
252 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size,
253 __GFP_NOWARN | GFP_ATOMIC);
254 if (!rxb->skb) {
255 if (net_ratelimit())
256 printk(KERN_CRIT DRV_NAME
257 ": Can not allocate SKB buffers\n");
258 /* We don't reschedule replenish work here -- we will
259 * call the restock method and if it still needs
260 * more buffers it will schedule replenish */
261 break;
262 }
263 priv->alloc_rxb_skb++;
264 list_del(element);
265
266 /* Get physical address of RB/SKB */
267 rxb->dma_addr =
268 pci_map_single(priv->pci_dev, rxb->skb->data,
269 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
270 list_add_tail(&rxb->list, &rxq->rx_free);
271 rxq->free_count++;
272 }
273 spin_unlock_irqrestore(&rxq->lock, flags);
274}
275EXPORT_SYMBOL(iwl_rx_allocate);
276
277void iwl_rx_replenish(struct iwl_priv *priv)
278{
279 unsigned long flags;
280
281 iwl_rx_allocate(priv);
282
283 spin_lock_irqsave(&priv->lock, flags);
284 iwl_rx_queue_restock(priv);
285 spin_unlock_irqrestore(&priv->lock, flags);
286}
287EXPORT_SYMBOL(iwl_rx_replenish);
288
289
290/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
291 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
292 * This free routine walks the list of POOL entries and if SKB is set to
293 * non NULL it is unmapped and freed
294 */
295void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
296{
297 int i;
298 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
299 if (rxq->pool[i].skb != NULL) {
300 pci_unmap_single(priv->pci_dev,
301 rxq->pool[i].dma_addr,
302 priv->hw_params.rx_buf_size,
303 PCI_DMA_FROMDEVICE);
304 dev_kfree_skb(rxq->pool[i].skb);
305 }
306 }
307
308 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
309 rxq->dma_addr);
310 rxq->bd = NULL;
311}
312EXPORT_SYMBOL(iwl_rx_queue_free);
313
314int iwl_rx_queue_alloc(struct iwl_priv *priv)
315{
316 struct iwl_rx_queue *rxq = &priv->rxq;
317 struct pci_dev *dev = priv->pci_dev;
318 int i;
319
320 spin_lock_init(&rxq->lock);
321 INIT_LIST_HEAD(&rxq->rx_free);
322 INIT_LIST_HEAD(&rxq->rx_used);
323
324 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
325 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
326 if (!rxq->bd)
327 return -ENOMEM;
328
329 /* Fill the rx_used queue with _all_ of the Rx buffers */
330 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
331 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
332
333 /* Set us so that we have processed and used all buffers, but have
334 * not restocked the Rx queue with fresh buffers */
335 rxq->read = rxq->write = 0;
336 rxq->free_count = 0;
337 rxq->need_update = 0;
338 return 0;
339}
340EXPORT_SYMBOL(iwl_rx_queue_alloc);
341
342void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
343{
344 unsigned long flags;
345 int i;
346 spin_lock_irqsave(&rxq->lock, flags);
347 INIT_LIST_HEAD(&rxq->rx_free);
348 INIT_LIST_HEAD(&rxq->rx_used);
349 /* Fill the rx_used queue with _all_ of the Rx buffers */
350 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
351 /* In the reset function, these buffers may have been allocated
352 * to an SKB, so we need to unmap and free potential storage */
353 if (rxq->pool[i].skb != NULL) {
354 pci_unmap_single(priv->pci_dev,
355 rxq->pool[i].dma_addr,
356 priv->hw_params.rx_buf_size,
357 PCI_DMA_FROMDEVICE);
358 priv->alloc_rxb_skb--;
359 dev_kfree_skb(rxq->pool[i].skb);
360 rxq->pool[i].skb = NULL;
361 }
362 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
363 }
364
365 /* Set us so that we have processed and used all buffers, but have
366 * not restocked the Rx queue with fresh buffers */
367 rxq->read = rxq->write = 0;
368 rxq->free_count = 0;
369 spin_unlock_irqrestore(&rxq->lock, flags);
370}
371EXPORT_SYMBOL(iwl_rx_queue_reset);
372
1053d35f
RR
373int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
374{
375 int ret;
376 unsigned long flags;
377 unsigned int rb_size;
378
379 spin_lock_irqsave(&priv->lock, flags);
380 ret = iwl_grab_nic_access(priv);
381 if (ret) {
382 spin_unlock_irqrestore(&priv->lock, flags);
383 return ret;
384 }
385
386 if (priv->cfg->mod_params->amsdu_size_8K)
387 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
388 else
389 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
390
391 /* Stop Rx DMA */
392 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
393
394 /* Reset driver's Rx queue write index */
395 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
396
397 /* Tell device where to find RBD circular buffer in DRAM */
398 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
399 rxq->dma_addr >> 8);
400
401 /* Tell device where in DRAM to update its Rx status */
402 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
d67f5489 403 (priv->shared_phys + priv->rb_closed_offset) >> 4);
1053d35f
RR
404
405 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
406 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
407 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
408 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
409 rb_size |
410 /* 0x10 << 4 | */
411 (RX_QUEUE_SIZE_LOG <<
412 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
413
414 /*
415 * iwl_write32(priv,CSR_INT_COAL_REG,0);
416 */
417
418 iwl_release_nic_access(priv);
419 spin_unlock_irqrestore(&priv->lock, flags);
420
421 return 0;
422}
423
b3bbacb7
TW
424int iwl_rxq_stop(struct iwl_priv *priv)
425{
426 int ret;
427 unsigned long flags;
428
429 spin_lock_irqsave(&priv->lock, flags);
430 ret = iwl_grab_nic_access(priv);
431 if (unlikely(ret)) {
432 spin_unlock_irqrestore(&priv->lock, flags);
433 return ret;
434 }
435
436 /* stop Rx DMA */
437 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
438 ret = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
439 (1 << 24), 1000);
440 if (ret < 0)
441 IWL_ERROR("Can't stop Rx DMA.\n");
442
443 iwl_release_nic_access(priv);
444 spin_unlock_irqrestore(&priv->lock, flags);
445
446 return 0;
447}
448EXPORT_SYMBOL(iwl_rxq_stop);
449
c1354754
TW
450void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
451 struct iwl_rx_mem_buffer *rxb)
452
453{
c1354754
TW
454 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
455 struct iwl4965_missed_beacon_notif *missed_beacon;
456
457 missed_beacon = &pkt->u.missed_beacon;
458 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
459 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
460 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
461 le32_to_cpu(missed_beacon->total_missed_becons),
462 le32_to_cpu(missed_beacon->num_recvd_beacons),
463 le32_to_cpu(missed_beacon->num_expected_beacons));
464 if (!test_bit(STATUS_SCANNING, &priv->status))
465 iwl_init_sensitivity(priv);
466 }
c1354754
TW
467}
468EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
This page took 0.074954 seconds and 5 git commands to generate.