ASoC: fsl: Add S/PDIF CPU DAI driver
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / pcie / rx.c
1 /******************************************************************************
2 *
3 * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29 #include <linux/sched.h>
30 #include <linux/wait.h>
31 #include <linux/gfp.h>
32
33 #include "iwl-prph.h"
34 #include "iwl-io.h"
35 #include "internal.h"
36 #include "iwl-op-mode.h"
37
38 /******************************************************************************
39 *
40 * RX path functions
41 *
42 ******************************************************************************/
43
44 /*
45 * Rx theory of operation
46 *
47 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
48 * each of which point to Receive Buffers to be filled by the NIC. These get
49 * used not only for Rx frames, but for any command response or notification
50 * from the NIC. The driver and NIC manage the Rx buffers by means
51 * of indexes into the circular buffer.
52 *
53 * Rx Queue Indexes
54 * The host/firmware share two index registers for managing the Rx buffers.
55 *
56 * The READ index maps to the first position that the firmware may be writing
57 * to -- the driver can read up to (but not including) this position and get
58 * good data.
59 * The READ index is managed by the firmware once the card is enabled.
60 *
61 * The WRITE index maps to the last position the driver has read from -- the
62 * position preceding WRITE is the last slot the firmware can place a packet.
63 *
64 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
65 * WRITE = READ.
66 *
67 * During initialization, the host sets up the READ queue position to the first
68 * INDEX position, and WRITE to the last (READ - 1 wrapped)
69 *
70 * When the firmware places a packet in a buffer, it will advance the READ index
71 * and fire the RX interrupt. The driver can then query the READ index and
72 * process as many packets as possible, moving the WRITE index forward as it
73 * resets the Rx queue buffers with new memory.
74 *
75 * The management in the driver is as follows:
76 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
77 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
78 * to replenish the iwl->rxq->rx_free.
79 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
80 * iwl->rxq is replenished and the READ INDEX is updated (updating the
81 * 'processed' and 'read' driver indexes as well)
82 * + A received packet is processed and handed to the kernel network stack,
83 * detached from the iwl->rxq. The driver 'processed' index is updated.
84 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
85 * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
86 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
87 * If there were enough free buffers and RX_STALLED is set it is cleared.
88 *
89 *
90 * Driver sequence:
91 *
92 * iwl_rxq_alloc() Allocates rx_free
93 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
94 * iwl_pcie_rxq_restock
95 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
96 * queue, updates firmware pointers, and updates
97 * the WRITE index. If insufficient rx_free buffers
98 * are available, schedules iwl_pcie_rx_replenish
99 *
100 * -- enable interrupts --
101 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
102 * READ INDEX, detaching the SKB from the pool.
103 * Moves the packet buffer from queue to rx_used.
104 * Calls iwl_pcie_rxq_restock to refill any empty
105 * slots.
106 * ...
107 *
108 */
109
110 /*
111 * iwl_rxq_space - Return number of free slots available in queue.
112 */
113 static int iwl_rxq_space(const struct iwl_rxq *rxq)
114 {
115 int s = rxq->read - rxq->write;
116
117 if (s <= 0)
118 s += RX_QUEUE_SIZE;
119 /* keep some buffer to not confuse full and empty queue */
120 s -= 2;
121 if (s < 0)
122 s = 0;
123 return s;
124 }
125
126 /*
127 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
128 */
129 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
130 {
131 return cpu_to_le32((u32)(dma_addr >> 8));
132 }
133
134 /*
135 * iwl_pcie_rx_stop - stops the Rx DMA
136 */
137 int iwl_pcie_rx_stop(struct iwl_trans *trans)
138 {
139 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
140 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
141 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
142 }
143
144 /*
145 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
146 */
147 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
148 struct iwl_rxq *rxq)
149 {
150 unsigned long flags;
151 u32 reg;
152
153 spin_lock_irqsave(&rxq->lock, flags);
154
155 if (rxq->need_update == 0)
156 goto exit_unlock;
157
158 if (trans->cfg->base_params->shadow_reg_enable) {
159 /* shadow register enabled */
160 /* Device expects a multiple of 8 */
161 rxq->write_actual = (rxq->write & ~0x7);
162 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
163 } else {
164 struct iwl_trans_pcie *trans_pcie =
165 IWL_TRANS_GET_PCIE_TRANS(trans);
166
167 /* If power-saving is in use, make sure device is awake */
168 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
169 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
170
171 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
172 IWL_DEBUG_INFO(trans,
173 "Rx queue requesting wakeup,"
174 " GP1 = 0x%x\n", reg);
175 iwl_set_bit(trans, CSR_GP_CNTRL,
176 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
177 goto exit_unlock;
178 }
179
180 rxq->write_actual = (rxq->write & ~0x7);
181 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
182 rxq->write_actual);
183
184 /* Else device is assumed to be awake */
185 } else {
186 /* Device expects a multiple of 8 */
187 rxq->write_actual = (rxq->write & ~0x7);
188 iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
189 rxq->write_actual);
190 }
191 }
192 rxq->need_update = 0;
193
194 exit_unlock:
195 spin_unlock_irqrestore(&rxq->lock, flags);
196 }
197
198 /*
199 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
200 *
201 * If there are slots in the RX queue that need to be restocked,
202 * and we have free pre-allocated buffers, fill the ranks as much
203 * as we can, pulling from rx_free.
204 *
205 * This moves the 'write' index forward to catch up with 'processed', and
206 * also updates the memory address in the firmware to reference the new
207 * target buffer.
208 */
209 static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
210 {
211 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
212 struct iwl_rxq *rxq = &trans_pcie->rxq;
213 struct iwl_rx_mem_buffer *rxb;
214 unsigned long flags;
215
216 /*
217 * If the device isn't enabled - not need to try to add buffers...
218 * This can happen when we stop the device and still have an interrupt
219 * pending. We stop the APM before we sync the interrupts because we
220 * have to (see comment there). On the other hand, since the APM is
221 * stopped, we cannot access the HW (in particular not prph).
222 * So don't try to restock if the APM has been already stopped.
223 */
224 if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
225 return;
226
227 spin_lock_irqsave(&rxq->lock, flags);
228 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
229 /* The overwritten rxb must be a used one */
230 rxb = rxq->queue[rxq->write];
231 BUG_ON(rxb && rxb->page);
232
233 /* Get next free Rx buffer, remove from free list */
234 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
235 list);
236 list_del(&rxb->list);
237
238 /* Point to Rx buffer via next RBD in circular buffer */
239 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
240 rxq->queue[rxq->write] = rxb;
241 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
242 rxq->free_count--;
243 }
244 spin_unlock_irqrestore(&rxq->lock, flags);
245 /* If the pre-allocated buffer pool is dropping low, schedule to
246 * refill it */
247 if (rxq->free_count <= RX_LOW_WATERMARK)
248 schedule_work(&trans_pcie->rx_replenish);
249
250 /* If we've added more space for the firmware to place data, tell it.
251 * Increment device's write pointer in multiples of 8. */
252 if (rxq->write_actual != (rxq->write & ~0x7)) {
253 spin_lock_irqsave(&rxq->lock, flags);
254 rxq->need_update = 1;
255 spin_unlock_irqrestore(&rxq->lock, flags);
256 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
257 }
258 }
259
260 /*
261 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
262 *
263 * A used RBD is an Rx buffer that has been given to the stack. To use it again
264 * a page must be allocated and the RBD must point to the page. This function
265 * doesn't change the HW pointer but handles the list of pages that is used by
266 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
267 * allocated buffers.
268 */
269 static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
270 {
271 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
272 struct iwl_rxq *rxq = &trans_pcie->rxq;
273 struct iwl_rx_mem_buffer *rxb;
274 struct page *page;
275 unsigned long flags;
276 gfp_t gfp_mask = priority;
277
278 while (1) {
279 spin_lock_irqsave(&rxq->lock, flags);
280 if (list_empty(&rxq->rx_used)) {
281 spin_unlock_irqrestore(&rxq->lock, flags);
282 return;
283 }
284 spin_unlock_irqrestore(&rxq->lock, flags);
285
286 if (rxq->free_count > RX_LOW_WATERMARK)
287 gfp_mask |= __GFP_NOWARN;
288
289 if (trans_pcie->rx_page_order > 0)
290 gfp_mask |= __GFP_COMP;
291
292 /* Alloc a new receive buffer */
293 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
294 if (!page) {
295 if (net_ratelimit())
296 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
297 "order: %d\n",
298 trans_pcie->rx_page_order);
299
300 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
301 net_ratelimit())
302 IWL_CRIT(trans, "Failed to alloc_pages with %s."
303 "Only %u free buffers remaining.\n",
304 priority == GFP_ATOMIC ?
305 "GFP_ATOMIC" : "GFP_KERNEL",
306 rxq->free_count);
307 /* We don't reschedule replenish work here -- we will
308 * call the restock method and if it still needs
309 * more buffers it will schedule replenish */
310 return;
311 }
312
313 spin_lock_irqsave(&rxq->lock, flags);
314
315 if (list_empty(&rxq->rx_used)) {
316 spin_unlock_irqrestore(&rxq->lock, flags);
317 __free_pages(page, trans_pcie->rx_page_order);
318 return;
319 }
320 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
321 list);
322 list_del(&rxb->list);
323 spin_unlock_irqrestore(&rxq->lock, flags);
324
325 BUG_ON(rxb->page);
326 rxb->page = page;
327 /* Get physical address of the RB */
328 rxb->page_dma =
329 dma_map_page(trans->dev, page, 0,
330 PAGE_SIZE << trans_pcie->rx_page_order,
331 DMA_FROM_DEVICE);
332 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
333 rxb->page = NULL;
334 spin_lock_irqsave(&rxq->lock, flags);
335 list_add(&rxb->list, &rxq->rx_used);
336 spin_unlock_irqrestore(&rxq->lock, flags);
337 __free_pages(page, trans_pcie->rx_page_order);
338 return;
339 }
340 /* dma address must be no more than 36 bits */
341 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
342 /* and also 256 byte aligned! */
343 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
344
345 spin_lock_irqsave(&rxq->lock, flags);
346
347 list_add_tail(&rxb->list, &rxq->rx_free);
348 rxq->free_count++;
349
350 spin_unlock_irqrestore(&rxq->lock, flags);
351 }
352 }
353
354 static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
355 {
356 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
357 struct iwl_rxq *rxq = &trans_pcie->rxq;
358 int i;
359
360 lockdep_assert_held(&rxq->lock);
361
362 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
363 if (!rxq->pool[i].page)
364 continue;
365 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
366 PAGE_SIZE << trans_pcie->rx_page_order,
367 DMA_FROM_DEVICE);
368 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
369 rxq->pool[i].page = NULL;
370 }
371 }
372
373 /*
374 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
375 *
376 * When moving to rx_free an page is allocated for the slot.
377 *
378 * Also restock the Rx queue via iwl_pcie_rxq_restock.
379 * This is called as a scheduled work item (except for during initialization)
380 */
381 static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
382 {
383 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
384 unsigned long flags;
385
386 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
387
388 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
389 iwl_pcie_rxq_restock(trans);
390 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
391 }
392
393 static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
394 {
395 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
396
397 iwl_pcie_rxq_restock(trans);
398 }
399
400 static void iwl_pcie_rx_replenish_work(struct work_struct *data)
401 {
402 struct iwl_trans_pcie *trans_pcie =
403 container_of(data, struct iwl_trans_pcie, rx_replenish);
404
405 iwl_pcie_rx_replenish(trans_pcie->trans);
406 }
407
408 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
409 {
410 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
411 struct iwl_rxq *rxq = &trans_pcie->rxq;
412 struct device *dev = trans->dev;
413
414 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
415
416 spin_lock_init(&rxq->lock);
417
418 if (WARN_ON(rxq->bd || rxq->rb_stts))
419 return -EINVAL;
420
421 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
422 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
423 &rxq->bd_dma, GFP_KERNEL);
424 if (!rxq->bd)
425 goto err_bd;
426
427 /*Allocate the driver's pointer to receive buffer status */
428 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
429 &rxq->rb_stts_dma, GFP_KERNEL);
430 if (!rxq->rb_stts)
431 goto err_rb_stts;
432
433 return 0;
434
435 err_rb_stts:
436 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
437 rxq->bd, rxq->bd_dma);
438 rxq->bd_dma = 0;
439 rxq->bd = NULL;
440 err_bd:
441 return -ENOMEM;
442 }
443
444 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
445 {
446 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
447 u32 rb_size;
448 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
449
450 if (trans_pcie->rx_buf_size_8k)
451 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
452 else
453 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
454
455 /* Stop Rx DMA */
456 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
457 /* reset and flush pointers */
458 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
459 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
460 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
461
462 /* Reset driver's Rx queue write index */
463 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
464
465 /* Tell device where to find RBD circular buffer in DRAM */
466 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
467 (u32)(rxq->bd_dma >> 8));
468
469 /* Tell device where in DRAM to update its Rx status */
470 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
471 rxq->rb_stts_dma >> 4);
472
473 /* Enable Rx DMA
474 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
475 * the credit mechanism in 5000 HW RX FIFO
476 * Direct rx interrupts to hosts
477 * Rx buffer size 4 or 8k
478 * RB timeout 0x10
479 * 256 RBDs
480 */
481 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
482 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
483 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
484 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
485 rb_size|
486 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
487 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
488
489 /* Set interrupt coalescing timer to default (2048 usecs) */
490 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
491 }
492
493 static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
494 {
495 int i;
496
497 lockdep_assert_held(&rxq->lock);
498
499 INIT_LIST_HEAD(&rxq->rx_free);
500 INIT_LIST_HEAD(&rxq->rx_used);
501 rxq->free_count = 0;
502
503 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
504 list_add(&rxq->pool[i].list, &rxq->rx_used);
505 }
506
507 int iwl_pcie_rx_init(struct iwl_trans *trans)
508 {
509 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
510 struct iwl_rxq *rxq = &trans_pcie->rxq;
511 int i, err;
512 unsigned long flags;
513
514 if (!rxq->bd) {
515 err = iwl_pcie_rx_alloc(trans);
516 if (err)
517 return err;
518 }
519
520 spin_lock_irqsave(&rxq->lock, flags);
521
522 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
523
524 /* free all first - we might be reconfigured for a different size */
525 iwl_pcie_rxq_free_rbs(trans);
526 iwl_pcie_rx_init_rxb_lists(rxq);
527
528 for (i = 0; i < RX_QUEUE_SIZE; i++)
529 rxq->queue[i] = NULL;
530
531 /* Set us so that we have processed and used all buffers, but have
532 * not restocked the Rx queue with fresh buffers */
533 rxq->read = rxq->write = 0;
534 rxq->write_actual = 0;
535 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
536 spin_unlock_irqrestore(&rxq->lock, flags);
537
538 iwl_pcie_rx_replenish(trans);
539
540 iwl_pcie_rx_hw_init(trans, rxq);
541
542 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
543 rxq->need_update = 1;
544 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
545 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
546
547 return 0;
548 }
549
550 void iwl_pcie_rx_free(struct iwl_trans *trans)
551 {
552 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
553 struct iwl_rxq *rxq = &trans_pcie->rxq;
554 unsigned long flags;
555
556 /*if rxq->bd is NULL, it means that nothing has been allocated,
557 * exit now */
558 if (!rxq->bd) {
559 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
560 return;
561 }
562
563 cancel_work_sync(&trans_pcie->rx_replenish);
564
565 spin_lock_irqsave(&rxq->lock, flags);
566 iwl_pcie_rxq_free_rbs(trans);
567 spin_unlock_irqrestore(&rxq->lock, flags);
568
569 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
570 rxq->bd, rxq->bd_dma);
571 rxq->bd_dma = 0;
572 rxq->bd = NULL;
573
574 if (rxq->rb_stts)
575 dma_free_coherent(trans->dev,
576 sizeof(struct iwl_rb_status),
577 rxq->rb_stts, rxq->rb_stts_dma);
578 else
579 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
580 rxq->rb_stts_dma = 0;
581 rxq->rb_stts = NULL;
582 }
583
584 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
585 struct iwl_rx_mem_buffer *rxb)
586 {
587 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
588 struct iwl_rxq *rxq = &trans_pcie->rxq;
589 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
590 unsigned long flags;
591 bool page_stolen = false;
592 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
593 u32 offset = 0;
594
595 if (WARN_ON(!rxb))
596 return;
597
598 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
599
600 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
601 struct iwl_rx_packet *pkt;
602 struct iwl_device_cmd *cmd;
603 u16 sequence;
604 bool reclaim;
605 int index, cmd_index, err, len;
606 struct iwl_rx_cmd_buffer rxcb = {
607 ._offset = offset,
608 ._rx_page_order = trans_pcie->rx_page_order,
609 ._page = rxb->page,
610 ._page_stolen = false,
611 .truesize = max_len,
612 };
613
614 pkt = rxb_addr(&rxcb);
615
616 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
617 break;
618
619 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
620 rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
621 pkt->hdr.cmd);
622
623 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
624 len += sizeof(u32); /* account for status word */
625 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
626 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
627
628 /* Reclaim a command buffer only if this packet is a response
629 * to a (driver-originated) command.
630 * If the packet (e.g. Rx frame) originated from uCode,
631 * there is no command buffer to reclaim.
632 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
633 * but apparently a few don't get set; catch them here. */
634 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
635 if (reclaim) {
636 int i;
637
638 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
639 if (trans_pcie->no_reclaim_cmds[i] ==
640 pkt->hdr.cmd) {
641 reclaim = false;
642 break;
643 }
644 }
645 }
646
647 sequence = le16_to_cpu(pkt->hdr.sequence);
648 index = SEQ_TO_INDEX(sequence);
649 cmd_index = get_cmd_index(&txq->q, index);
650
651 if (reclaim)
652 cmd = txq->entries[cmd_index].cmd;
653 else
654 cmd = NULL;
655
656 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
657
658 if (reclaim) {
659 kfree(txq->entries[cmd_index].free_buf);
660 txq->entries[cmd_index].free_buf = NULL;
661 }
662
663 /*
664 * After here, we should always check rxcb._page_stolen,
665 * if it is true then one of the handlers took the page.
666 */
667
668 if (reclaim) {
669 /* Invoke any callbacks, transfer the buffer to caller,
670 * and fire off the (possibly) blocking
671 * iwl_trans_send_cmd()
672 * as we reclaim the driver command queue */
673 if (!rxcb._page_stolen)
674 iwl_pcie_hcmd_complete(trans, &rxcb, err);
675 else
676 IWL_WARN(trans, "Claim null rxb?\n");
677 }
678
679 page_stolen |= rxcb._page_stolen;
680 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
681 }
682
683 /* page was stolen from us -- free our reference */
684 if (page_stolen) {
685 __free_pages(rxb->page, trans_pcie->rx_page_order);
686 rxb->page = NULL;
687 }
688
689 /* Reuse the page if possible. For notification packets and
690 * SKBs that fail to Rx correctly, add them back into the
691 * rx_free list for reuse later. */
692 spin_lock_irqsave(&rxq->lock, flags);
693 if (rxb->page != NULL) {
694 rxb->page_dma =
695 dma_map_page(trans->dev, rxb->page, 0,
696 PAGE_SIZE << trans_pcie->rx_page_order,
697 DMA_FROM_DEVICE);
698 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
699 /*
700 * free the page(s) as well to not break
701 * the invariant that the items on the used
702 * list have no page(s)
703 */
704 __free_pages(rxb->page, trans_pcie->rx_page_order);
705 rxb->page = NULL;
706 list_add_tail(&rxb->list, &rxq->rx_used);
707 } else {
708 list_add_tail(&rxb->list, &rxq->rx_free);
709 rxq->free_count++;
710 }
711 } else
712 list_add_tail(&rxb->list, &rxq->rx_used);
713 spin_unlock_irqrestore(&rxq->lock, flags);
714 }
715
716 /*
717 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
718 */
719 static void iwl_pcie_rx_handle(struct iwl_trans *trans)
720 {
721 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
722 struct iwl_rxq *rxq = &trans_pcie->rxq;
723 u32 r, i;
724 u8 fill_rx = 0;
725 u32 count = 8;
726 int total_empty;
727
728 /* uCode's read index (stored in shared DRAM) indicates the last Rx
729 * buffer that the driver may process (last buffer filled by ucode). */
730 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
731 i = rxq->read;
732
733 /* Rx interrupt, but nothing sent from uCode */
734 if (i == r)
735 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
736
737 /* calculate total frames need to be restock after handling RX */
738 total_empty = r - rxq->write_actual;
739 if (total_empty < 0)
740 total_empty += RX_QUEUE_SIZE;
741
742 if (total_empty > (RX_QUEUE_SIZE / 2))
743 fill_rx = 1;
744
745 while (i != r) {
746 struct iwl_rx_mem_buffer *rxb;
747
748 rxb = rxq->queue[i];
749 rxq->queue[i] = NULL;
750
751 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
752 r, i, rxb);
753 iwl_pcie_rx_handle_rb(trans, rxb);
754
755 i = (i + 1) & RX_QUEUE_MASK;
756 /* If there are a lot of unused frames,
757 * restock the Rx queue so ucode wont assert. */
758 if (fill_rx) {
759 count++;
760 if (count >= 8) {
761 rxq->read = i;
762 iwl_pcie_rx_replenish_now(trans);
763 count = 0;
764 }
765 }
766 }
767
768 /* Backtrack one entry */
769 rxq->read = i;
770 if (fill_rx)
771 iwl_pcie_rx_replenish_now(trans);
772 else
773 iwl_pcie_rxq_restock(trans);
774 }
775
776 /*
777 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
778 */
779 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
780 {
781 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
782
783 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
784 if (trans->cfg->internal_wimax_coex &&
785 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
786 APMS_CLK_VAL_MRB_FUNC_MODE) ||
787 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
788 APMG_PS_CTRL_VAL_RESET_REQ))) {
789 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
790 iwl_op_mode_wimax_active(trans->op_mode);
791 wake_up(&trans_pcie->wait_command_queue);
792 return;
793 }
794
795 iwl_pcie_dump_csr(trans);
796 iwl_pcie_dump_fh(trans, NULL);
797
798 set_bit(STATUS_FW_ERROR, &trans_pcie->status);
799 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
800 wake_up(&trans_pcie->wait_command_queue);
801
802 local_bh_disable();
803 iwl_op_mode_nic_error(trans->op_mode);
804 local_bh_enable();
805 }
806
807 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
808 {
809 struct iwl_trans *trans = dev_id;
810 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
811 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
812 u32 inta = 0;
813 u32 handled = 0;
814 unsigned long flags;
815 u32 i;
816
817 lock_map_acquire(&trans->sync_cmd_lockdep_map);
818
819 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
820
821 /* Ack/clear/reset pending uCode interrupts.
822 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
823 */
824 /* There is a hardware bug in the interrupt mask function that some
825 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
826 * they are disabled in the CSR_INT_MASK register. Furthermore the
827 * ICT interrupt handling mechanism has another bug that might cause
828 * these unmasked interrupts fail to be detected. We workaround the
829 * hardware bugs here by ACKing all the possible interrupts so that
830 * interrupt coalescing can still be achieved.
831 */
832 iwl_write32(trans, CSR_INT,
833 trans_pcie->inta | ~trans_pcie->inta_mask);
834
835 inta = trans_pcie->inta;
836
837 if (iwl_have_debug_level(IWL_DL_ISR))
838 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
839 inta, iwl_read32(trans, CSR_INT_MASK));
840
841 /* saved interrupt in inta variable now we can reset trans_pcie->inta */
842 trans_pcie->inta = 0;
843
844 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
845
846 /* Now service all interrupt bits discovered above. */
847 if (inta & CSR_INT_BIT_HW_ERR) {
848 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
849
850 /* Tell the device to stop sending interrupts */
851 iwl_disable_interrupts(trans);
852
853 isr_stats->hw++;
854 iwl_pcie_irq_handle_error(trans);
855
856 handled |= CSR_INT_BIT_HW_ERR;
857
858 goto out;
859 }
860
861 if (iwl_have_debug_level(IWL_DL_ISR)) {
862 /* NIC fires this, but we don't use it, redundant with WAKEUP */
863 if (inta & CSR_INT_BIT_SCD) {
864 IWL_DEBUG_ISR(trans,
865 "Scheduler finished to transmit the frame/frames.\n");
866 isr_stats->sch++;
867 }
868
869 /* Alive notification via Rx interrupt will do the real work */
870 if (inta & CSR_INT_BIT_ALIVE) {
871 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
872 isr_stats->alive++;
873 }
874 }
875
876 /* Safely ignore these bits for debug checks below */
877 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
878
879 /* HW RF KILL switch toggled */
880 if (inta & CSR_INT_BIT_RF_KILL) {
881 bool hw_rfkill;
882
883 hw_rfkill = iwl_is_rfkill_set(trans);
884 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
885 hw_rfkill ? "disable radio" : "enable radio");
886
887 isr_stats->rfkill++;
888
889 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
890 if (hw_rfkill) {
891 set_bit(STATUS_RFKILL, &trans_pcie->status);
892 if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
893 &trans_pcie->status))
894 IWL_DEBUG_RF_KILL(trans,
895 "Rfkill while SYNC HCMD in flight\n");
896 wake_up(&trans_pcie->wait_command_queue);
897 } else {
898 clear_bit(STATUS_RFKILL, &trans_pcie->status);
899 }
900
901 handled |= CSR_INT_BIT_RF_KILL;
902 }
903
904 /* Chip got too hot and stopped itself */
905 if (inta & CSR_INT_BIT_CT_KILL) {
906 IWL_ERR(trans, "Microcode CT kill error detected.\n");
907 isr_stats->ctkill++;
908 handled |= CSR_INT_BIT_CT_KILL;
909 }
910
911 /* Error detected by uCode */
912 if (inta & CSR_INT_BIT_SW_ERR) {
913 IWL_ERR(trans, "Microcode SW error detected. "
914 " Restarting 0x%X.\n", inta);
915 isr_stats->sw++;
916 iwl_pcie_irq_handle_error(trans);
917 handled |= CSR_INT_BIT_SW_ERR;
918 }
919
920 /* uCode wakes up after power-down sleep */
921 if (inta & CSR_INT_BIT_WAKEUP) {
922 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
923 iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
924 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
925 iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
926
927 isr_stats->wakeup++;
928
929 handled |= CSR_INT_BIT_WAKEUP;
930 }
931
932 /* All uCode command responses, including Tx command responses,
933 * Rx "responses" (frame-received notification), and other
934 * notifications from uCode come through here*/
935 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
936 CSR_INT_BIT_RX_PERIODIC)) {
937 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
938 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
939 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
940 iwl_write32(trans, CSR_FH_INT_STATUS,
941 CSR_FH_INT_RX_MASK);
942 }
943 if (inta & CSR_INT_BIT_RX_PERIODIC) {
944 handled |= CSR_INT_BIT_RX_PERIODIC;
945 iwl_write32(trans,
946 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
947 }
948 /* Sending RX interrupt require many steps to be done in the
949 * the device:
950 * 1- write interrupt to current index in ICT table.
951 * 2- dma RX frame.
952 * 3- update RX shared data to indicate last write index.
953 * 4- send interrupt.
954 * This could lead to RX race, driver could receive RX interrupt
955 * but the shared data changes does not reflect this;
956 * periodic interrupt will detect any dangling Rx activity.
957 */
958
959 /* Disable periodic interrupt; we use it as just a one-shot. */
960 iwl_write8(trans, CSR_INT_PERIODIC_REG,
961 CSR_INT_PERIODIC_DIS);
962
963 iwl_pcie_rx_handle(trans);
964
965 /*
966 * Enable periodic interrupt in 8 msec only if we received
967 * real RX interrupt (instead of just periodic int), to catch
968 * any dangling Rx interrupt. If it was just the periodic
969 * interrupt, there was no dangling Rx activity, and no need
970 * to extend the periodic interrupt; one-shot is enough.
971 */
972 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
973 iwl_write8(trans, CSR_INT_PERIODIC_REG,
974 CSR_INT_PERIODIC_ENA);
975
976 isr_stats->rx++;
977 }
978
979 /* This "Tx" DMA channel is used only for loading uCode */
980 if (inta & CSR_INT_BIT_FH_TX) {
981 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
982 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
983 isr_stats->tx++;
984 handled |= CSR_INT_BIT_FH_TX;
985 /* Wake up uCode load routine, now that load is complete */
986 trans_pcie->ucode_write_complete = true;
987 wake_up(&trans_pcie->ucode_write_waitq);
988 }
989
990 if (inta & ~handled) {
991 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
992 isr_stats->unhandled++;
993 }
994
995 if (inta & ~(trans_pcie->inta_mask)) {
996 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
997 inta & ~trans_pcie->inta_mask);
998 }
999
1000 /* Re-enable all interrupts */
1001 /* only Re-enable if disabled by irq */
1002 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
1003 iwl_enable_interrupts(trans);
1004 /* Re-enable RF_KILL if it occurred */
1005 else if (handled & CSR_INT_BIT_RF_KILL)
1006 iwl_enable_rfkill_int(trans);
1007
1008 out:
1009 lock_map_release(&trans->sync_cmd_lockdep_map);
1010 return IRQ_HANDLED;
1011 }
1012
1013 /******************************************************************************
1014 *
1015 * ICT functions
1016 *
1017 ******************************************************************************/
1018
1019 /* a device (PCI-E) page is 4096 bytes long */
1020 #define ICT_SHIFT 12
1021 #define ICT_SIZE (1 << ICT_SHIFT)
1022 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1023
1024 /* Free dram table */
1025 void iwl_pcie_free_ict(struct iwl_trans *trans)
1026 {
1027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1028
1029 if (trans_pcie->ict_tbl) {
1030 dma_free_coherent(trans->dev, ICT_SIZE,
1031 trans_pcie->ict_tbl,
1032 trans_pcie->ict_tbl_dma);
1033 trans_pcie->ict_tbl = NULL;
1034 trans_pcie->ict_tbl_dma = 0;
1035 }
1036 }
1037
1038 /*
1039 * allocate dram shared table, it is an aligned memory
1040 * block of ICT_SIZE.
1041 * also reset all data related to ICT table interrupt.
1042 */
1043 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1044 {
1045 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1046
1047 trans_pcie->ict_tbl =
1048 dma_alloc_coherent(trans->dev, ICT_SIZE,
1049 &trans_pcie->ict_tbl_dma,
1050 GFP_KERNEL);
1051 if (!trans_pcie->ict_tbl)
1052 return -ENOMEM;
1053
1054 /* just an API sanity check ... it is guaranteed to be aligned */
1055 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1056 iwl_pcie_free_ict(trans);
1057 return -EINVAL;
1058 }
1059
1060 IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
1061 (unsigned long long)trans_pcie->ict_tbl_dma);
1062
1063 IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
1064
1065 /* reset table and index to all 0 */
1066 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1067 trans_pcie->ict_index = 0;
1068
1069 /* add periodic RX interrupt */
1070 trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1071 return 0;
1072 }
1073
1074 /* Device is going up inform it about using ICT interrupt table,
1075 * also we need to tell the driver to start using ICT interrupt.
1076 */
1077 void iwl_pcie_reset_ict(struct iwl_trans *trans)
1078 {
1079 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1080 u32 val;
1081 unsigned long flags;
1082
1083 if (!trans_pcie->ict_tbl)
1084 return;
1085
1086 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1087 iwl_disable_interrupts(trans);
1088
1089 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1090
1091 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1092
1093 val |= CSR_DRAM_INT_TBL_ENABLE;
1094 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
1095
1096 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1097
1098 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1099 trans_pcie->use_ict = true;
1100 trans_pcie->ict_index = 0;
1101 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1102 iwl_enable_interrupts(trans);
1103 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1104 }
1105
1106 /* Device is going down disable ict interrupt usage */
1107 void iwl_pcie_disable_ict(struct iwl_trans *trans)
1108 {
1109 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1110 unsigned long flags;
1111
1112 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1113 trans_pcie->use_ict = false;
1114 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1115 }
1116
1117 /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
1118 static irqreturn_t iwl_pcie_isr(int irq, void *data)
1119 {
1120 struct iwl_trans *trans = data;
1121 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1122 u32 inta, inta_mask;
1123
1124 lockdep_assert_held(&trans_pcie->irq_lock);
1125
1126 trace_iwlwifi_dev_irq(trans->dev);
1127
1128 /* Disable (but don't clear!) interrupts here to avoid
1129 * back-to-back ISRs and sporadic interrupts from our NIC.
1130 * If we have something to service, the irq thread will re-enable ints.
1131 * If we *don't* have something, we'll re-enable before leaving here. */
1132 inta_mask = iwl_read32(trans, CSR_INT_MASK);
1133 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1134
1135 /* Discover which interrupts are active/pending */
1136 inta = iwl_read32(trans, CSR_INT);
1137
1138 if (inta & (~inta_mask)) {
1139 IWL_DEBUG_ISR(trans,
1140 "We got a masked interrupt (0x%08x)...Ack and ignore\n",
1141 inta & (~inta_mask));
1142 iwl_write32(trans, CSR_INT, inta & (~inta_mask));
1143 inta &= inta_mask;
1144 }
1145
1146 /* Ignore interrupt if there's nothing in NIC to service.
1147 * This may be due to IRQ shared with another device,
1148 * or due to sporadic interrupts thrown from our NIC. */
1149 if (!inta) {
1150 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1151 goto none;
1152 }
1153
1154 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1155 /* Hardware disappeared. It might have already raised
1156 * an interrupt */
1157 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1158 return IRQ_HANDLED;
1159 }
1160
1161 if (iwl_have_debug_level(IWL_DL_ISR))
1162 IWL_DEBUG_ISR(trans,
1163 "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1164 inta, inta_mask,
1165 iwl_read32(trans, CSR_FH_INT_STATUS));
1166
1167 trans_pcie->inta |= inta;
1168 /* the thread will service interrupts and re-enable them */
1169 if (likely(inta))
1170 return IRQ_WAKE_THREAD;
1171 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1172 !trans_pcie->inta)
1173 iwl_enable_interrupts(trans);
1174 return IRQ_HANDLED;
1175
1176 none:
1177 /* re-enable interrupts here since we don't have anything to service. */
1178 /* only Re-enable if disabled by irq and no schedules tasklet. */
1179 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1180 !trans_pcie->inta)
1181 iwl_enable_interrupts(trans);
1182
1183 return IRQ_NONE;
1184 }
1185
1186 /* interrupt handler using ict table, with this interrupt driver will
1187 * stop using INTA register to get device's interrupt, reading this register
1188 * is expensive, device will write interrupts in ICT dram table, increment
1189 * index then will fire interrupt to driver, driver will OR all ICT table
1190 * entries from current index up to table entry with 0 value. the result is
1191 * the interrupt we need to service, driver will set the entries back to 0 and
1192 * set index.
1193 */
1194 irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1195 {
1196 struct iwl_trans *trans = data;
1197 struct iwl_trans_pcie *trans_pcie;
1198 u32 inta;
1199 u32 val = 0;
1200 u32 read;
1201 unsigned long flags;
1202
1203 if (!trans)
1204 return IRQ_NONE;
1205
1206 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1207
1208 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1209
1210 /* dram interrupt table not set yet,
1211 * use legacy interrupt.
1212 */
1213 if (unlikely(!trans_pcie->use_ict)) {
1214 irqreturn_t ret = iwl_pcie_isr(irq, data);
1215 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1216 return ret;
1217 }
1218
1219 trace_iwlwifi_dev_irq(trans->dev);
1220
1221 /* Disable (but don't clear!) interrupts here to avoid
1222 * back-to-back ISRs and sporadic interrupts from our NIC.
1223 * If we have something to service, the tasklet will re-enable ints.
1224 * If we *don't* have something, we'll re-enable before leaving here.
1225 */
1226 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1227
1228 /* Ignore interrupt if there's nothing in NIC to service.
1229 * This may be due to IRQ shared with another device,
1230 * or due to sporadic interrupts thrown from our NIC. */
1231 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1232 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1233 if (!read) {
1234 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1235 goto none;
1236 }
1237
1238 /*
1239 * Collect all entries up to the first 0, starting from ict_index;
1240 * note we already read at ict_index.
1241 */
1242 do {
1243 val |= read;
1244 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1245 trans_pcie->ict_index, read);
1246 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1247 trans_pcie->ict_index =
1248 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1249
1250 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1251 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1252 read);
1253 } while (read);
1254
1255 /* We should not get this value, just ignore it. */
1256 if (val == 0xffffffff)
1257 val = 0;
1258
1259 /*
1260 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1261 * (bit 15 before shifting it to 31) to clear when using interrupt
1262 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1263 * so we use them to decide on the real state of the Rx bit.
1264 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1265 */
1266 if (val & 0xC0000)
1267 val |= 0x8000;
1268
1269 inta = (0xff & val) | ((0xff00 & val) << 16);
1270 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
1271 inta, trans_pcie->inta_mask, val);
1272 if (iwl_have_debug_level(IWL_DL_ISR))
1273 IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
1274 iwl_read32(trans, CSR_INT_MASK));
1275
1276 inta &= trans_pcie->inta_mask;
1277 trans_pcie->inta |= inta;
1278
1279 /* iwl_pcie_tasklet() will service interrupts and re-enable them */
1280 if (likely(inta)) {
1281 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1282 return IRQ_WAKE_THREAD;
1283 } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1284 !trans_pcie->inta) {
1285 /* Allow interrupt if was disabled by this handler and
1286 * no tasklet was schedules, We should not enable interrupt,
1287 * tasklet will enable it.
1288 */
1289 iwl_enable_interrupts(trans);
1290 }
1291
1292 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1293 return IRQ_HANDLED;
1294
1295 none:
1296 /* re-enable interrupts here since we don't have anything to service.
1297 * only Re-enable if disabled by irq.
1298 */
1299 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1300 !trans_pcie->inta)
1301 iwl_enable_interrupts(trans);
1302
1303 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1304 return IRQ_NONE;
1305 }
This page took 0.060118 seconds and 5 git commands to generate.