ath9k: fix a regression in ath9k_ps_restore
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-trans-pcie-rx.c
CommitLineData
ab697a9f
EG
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/sched.h>
30#include <linux/wait.h>
1a361cd8 31#include <linux/gfp.h>
ab697a9f 32
522376d2 33/*TODO: Remove include to iwl-core.h*/
ab697a9f
EG
34#include "iwl-core.h"
35#include "iwl-io.h"
36#include "iwl-helpers.h"
c17d0681 37#include "iwl-trans-pcie-int.h"
ab697a9f
EG
38
39/******************************************************************************
40 *
41 * RX path functions
42 *
43 ******************************************************************************/
44
45/*
46 * Rx theory of operation
47 *
48 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
49 * each of which point to Receive Buffers to be filled by the NIC. These get
50 * used not only for Rx frames, but for any command response or notification
51 * from the NIC. The driver and NIC manage the Rx buffers by means
52 * of indexes into the circular buffer.
53 *
54 * Rx Queue Indexes
55 * The host/firmware share two index registers for managing the Rx buffers.
56 *
57 * The READ index maps to the first position that the firmware may be writing
58 * to -- the driver can read up to (but not including) this position and get
59 * good data.
60 * The READ index is managed by the firmware once the card is enabled.
61 *
62 * The WRITE index maps to the last position the driver has read from -- the
63 * position preceding WRITE is the last slot the firmware can place a packet.
64 *
65 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
66 * WRITE = READ.
67 *
68 * During initialization, the host sets up the READ queue position to the first
69 * INDEX position, and WRITE to the last (READ - 1 wrapped)
70 *
71 * When the firmware places a packet in a buffer, it will advance the READ index
72 * and fire the RX interrupt. The driver can then query the READ index and
73 * process as many packets as possible, moving the WRITE index forward as it
74 * resets the Rx queue buffers with new memory.
75 *
76 * The management in the driver is as follows:
77 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
78 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
79 * to replenish the iwl->rxq->rx_free.
80 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
81 * iwl->rxq is replenished and the READ INDEX is updated (updating the
82 * 'processed' and 'read' driver indexes as well)
83 * + A received packet is processed and handed to the kernel network stack,
84 * detached from the iwl->rxq. The driver 'processed' index is updated.
85 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
86 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
87 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
88 * were enough free buffers and RX_STALLED is set it is cleared.
89 *
90 *
91 * Driver sequence:
92 *
93 * iwl_rx_queue_alloc() Allocates rx_free
94 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
95 * iwl_rx_queue_restock
96 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
97 * queue, updates firmware pointers, and updates
98 * the WRITE index. If insufficient rx_free buffers
99 * are available, schedules iwl_rx_replenish
100 *
101 * -- enable interrupts --
102 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
103 * READ INDEX, detaching the SKB from the pool.
104 * Moves the packet buffer from queue to rx_used.
105 * Calls iwl_rx_queue_restock to refill any empty
106 * slots.
107 * ...
108 *
109 */
110
111/**
112 * iwl_rx_queue_space - Return number of free slots available in queue.
113 */
114static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
115{
116 int s = q->read - q->write;
117 if (s <= 0)
118 s += RX_QUEUE_SIZE;
119 /* keep some buffer to not confuse full and empty queue */
120 s -= 2;
121 if (s < 0)
122 s = 0;
123 return s;
124}
125
126/**
127 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
128 */
5a878bf6 129void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
ab697a9f
EG
130 struct iwl_rx_queue *q)
131{
132 unsigned long flags;
133 u32 reg;
134
135 spin_lock_irqsave(&q->lock, flags);
136
137 if (q->need_update == 0)
138 goto exit_unlock;
139
fd656935 140 if (hw_params(trans).shadow_reg_enable) {
ab697a9f
EG
141 /* shadow register enabled */
142 /* Device expects a multiple of 8 */
143 q->write_actual = (q->write & ~0x7);
fd656935 144 iwl_write32(bus(trans), FH_RSCSR_CHNL0_WPTR, q->write_actual);
ab697a9f
EG
145 } else {
146 /* If power-saving is in use, make sure device is awake */
5a878bf6 147 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
fd656935 148 reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
ab697a9f
EG
149
150 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
5a878bf6 151 IWL_DEBUG_INFO(trans,
ab697a9f
EG
152 "Rx queue requesting wakeup,"
153 " GP1 = 0x%x\n", reg);
fd656935 154 iwl_set_bit(bus(trans), CSR_GP_CNTRL,
ab697a9f
EG
155 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
156 goto exit_unlock;
157 }
158
159 q->write_actual = (q->write & ~0x7);
fd656935 160 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
ab697a9f
EG
161 q->write_actual);
162
163 /* Else device is assumed to be awake */
164 } else {
165 /* Device expects a multiple of 8 */
166 q->write_actual = (q->write & ~0x7);
fd656935 167 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
ab697a9f
EG
168 q->write_actual);
169 }
170 }
171 q->need_update = 0;
172
173 exit_unlock:
174 spin_unlock_irqrestore(&q->lock, flags);
175}
176
177/**
178 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
179 */
5a878bf6 180static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
ab697a9f
EG
181{
182 return cpu_to_le32((u32)(dma_addr >> 8));
183}
184
185/**
186 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
187 *
188 * If there are slots in the RX queue that need to be restocked,
189 * and we have free pre-allocated buffers, fill the ranks as much
190 * as we can, pulling from rx_free.
191 *
192 * This moves the 'write' index forward to catch up with 'processed', and
193 * also updates the memory address in the firmware to reference the new
194 * target buffer.
195 */
5a878bf6 196static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
ab697a9f 197{
5a878bf6
EG
198 struct iwl_trans_pcie *trans_pcie =
199 IWL_TRANS_GET_PCIE_TRANS(trans);
200
201 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
ab697a9f
EG
202 struct list_head *element;
203 struct iwl_rx_mem_buffer *rxb;
204 unsigned long flags;
205
206 spin_lock_irqsave(&rxq->lock, flags);
207 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
208 /* The overwritten rxb must be a used one */
209 rxb = rxq->queue[rxq->write];
210 BUG_ON(rxb && rxb->page);
211
212 /* Get next free Rx buffer, remove from free list */
213 element = rxq->rx_free.next;
214 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
215 list_del(element);
216
217 /* Point to Rx buffer via next RBD in circular buffer */
5a878bf6 218 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
ab697a9f
EG
219 rxq->queue[rxq->write] = rxb;
220 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
221 rxq->free_count--;
222 }
223 spin_unlock_irqrestore(&rxq->lock, flags);
224 /* If the pre-allocated buffer pool is dropping low, schedule to
225 * refill it */
226 if (rxq->free_count <= RX_LOW_WATERMARK)
5a878bf6 227 queue_work(trans->shrd->workqueue, &trans_pcie->rx_replenish);
ab697a9f
EG
228
229
230 /* If we've added more space for the firmware to place data, tell it.
231 * Increment device's write pointer in multiples of 8. */
232 if (rxq->write_actual != (rxq->write & ~0x7)) {
233 spin_lock_irqsave(&rxq->lock, flags);
234 rxq->need_update = 1;
235 spin_unlock_irqrestore(&rxq->lock, flags);
5a878bf6 236 iwl_rx_queue_update_write_ptr(trans, rxq);
ab697a9f
EG
237 }
238}
239
240/**
241 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
242 *
243 * When moving to rx_free an SKB is allocated for the slot.
244 *
245 * Also restock the Rx queue via iwl_rx_queue_restock.
246 * This is called as a scheduled work item (except for during initialization)
247 */
5a878bf6 248static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
ab697a9f 249{
5a878bf6
EG
250 struct iwl_trans_pcie *trans_pcie =
251 IWL_TRANS_GET_PCIE_TRANS(trans);
252
253 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
ab697a9f
EG
254 struct list_head *element;
255 struct iwl_rx_mem_buffer *rxb;
256 struct page *page;
257 unsigned long flags;
258 gfp_t gfp_mask = priority;
259
260 while (1) {
261 spin_lock_irqsave(&rxq->lock, flags);
262 if (list_empty(&rxq->rx_used)) {
263 spin_unlock_irqrestore(&rxq->lock, flags);
264 return;
265 }
266 spin_unlock_irqrestore(&rxq->lock, flags);
267
268 if (rxq->free_count > RX_LOW_WATERMARK)
269 gfp_mask |= __GFP_NOWARN;
270
5a878bf6 271 if (hw_params(trans).rx_page_order > 0)
ab697a9f
EG
272 gfp_mask |= __GFP_COMP;
273
274 /* Alloc a new receive buffer */
d6189124 275 page = alloc_pages(gfp_mask,
5a878bf6 276 hw_params(trans).rx_page_order);
ab697a9f
EG
277 if (!page) {
278 if (net_ratelimit())
5a878bf6 279 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
d6189124 280 "order: %d\n",
5a878bf6 281 hw_params(trans).rx_page_order);
ab697a9f
EG
282
283 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
284 net_ratelimit())
5a878bf6 285 IWL_CRIT(trans, "Failed to alloc_pages with %s."
ab697a9f
EG
286 "Only %u free buffers remaining.\n",
287 priority == GFP_ATOMIC ?
288 "GFP_ATOMIC" : "GFP_KERNEL",
289 rxq->free_count);
290 /* We don't reschedule replenish work here -- we will
291 * call the restock method and if it still needs
292 * more buffers it will schedule replenish */
293 return;
294 }
295
296 spin_lock_irqsave(&rxq->lock, flags);
297
298 if (list_empty(&rxq->rx_used)) {
299 spin_unlock_irqrestore(&rxq->lock, flags);
5a878bf6 300 __free_pages(page, hw_params(trans).rx_page_order);
ab697a9f
EG
301 return;
302 }
303 element = rxq->rx_used.next;
304 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
305 list_del(element);
306
307 spin_unlock_irqrestore(&rxq->lock, flags);
308
309 BUG_ON(rxb->page);
310 rxb->page = page;
311 /* Get physical address of the RB */
5a878bf6
EG
312 rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0,
313 PAGE_SIZE << hw_params(trans).rx_page_order,
ab697a9f
EG
314 DMA_FROM_DEVICE);
315 /* dma address must be no more than 36 bits */
316 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
317 /* and also 256 byte aligned! */
318 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
319
320 spin_lock_irqsave(&rxq->lock, flags);
321
322 list_add_tail(&rxb->list, &rxq->rx_free);
323 rxq->free_count++;
324
325 spin_unlock_irqrestore(&rxq->lock, flags);
326 }
327}
328
5a878bf6 329void iwlagn_rx_replenish(struct iwl_trans *trans)
ab697a9f
EG
330{
331 unsigned long flags;
332
5a878bf6 333 iwlagn_rx_allocate(trans, GFP_KERNEL);
ab697a9f 334
5a878bf6
EG
335 spin_lock_irqsave(&trans->shrd->lock, flags);
336 iwlagn_rx_queue_restock(trans);
337 spin_unlock_irqrestore(&trans->shrd->lock, flags);
ab697a9f
EG
338}
339
5a878bf6 340static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
ab697a9f 341{
5a878bf6 342 iwlagn_rx_allocate(trans, GFP_ATOMIC);
ab697a9f 343
5a878bf6 344 iwlagn_rx_queue_restock(trans);
ab697a9f
EG
345}
346
347void iwl_bg_rx_replenish(struct work_struct *data)
348{
5a878bf6
EG
349 struct iwl_trans_pcie *trans_pcie =
350 container_of(data, struct iwl_trans_pcie, rx_replenish);
351 struct iwl_trans *trans = trans_pcie->trans;
ab697a9f 352
5a878bf6 353 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
ab697a9f
EG
354 return;
355
5a878bf6
EG
356 mutex_lock(&trans->shrd->mutex);
357 iwlagn_rx_replenish(trans);
358 mutex_unlock(&trans->shrd->mutex);
ab697a9f
EG
359}
360
361/**
362 * iwl_rx_handle - Main entry function for receiving responses from uCode
363 *
364 * Uses the priv->rx_handlers callback function array to invoke
365 * the appropriate handlers, including command responses,
366 * frame-received notifications, and other notifications.
367 */
5a878bf6 368static void iwl_rx_handle(struct iwl_trans *trans)
ab697a9f
EG
369{
370 struct iwl_rx_mem_buffer *rxb;
371 struct iwl_rx_packet *pkt;
5a878bf6
EG
372 struct iwl_trans_pcie *trans_pcie =
373 IWL_TRANS_GET_PCIE_TRANS(trans);
374 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
247c61d6
EG
375 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
376 struct iwl_device_cmd *cmd;
ab697a9f
EG
377 u32 r, i;
378 int reclaim;
379 unsigned long flags;
380 u8 fill_rx = 0;
381 u32 count = 8;
382 int total_empty;
247c61d6 383 int index, cmd_index;
ab697a9f
EG
384
385 /* uCode's read index (stored in shared DRAM) indicates the last Rx
386 * buffer that the driver may process (last buffer filled by ucode). */
387 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
388 i = rxq->read;
389
390 /* Rx interrupt, but nothing sent from uCode */
391 if (i == r)
5a878bf6 392 IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i);
ab697a9f
EG
393
394 /* calculate total frames need to be restock after handling RX */
395 total_empty = r - rxq->write_actual;
396 if (total_empty < 0)
397 total_empty += RX_QUEUE_SIZE;
398
399 if (total_empty > (RX_QUEUE_SIZE / 2))
400 fill_rx = 1;
401
402 while (i != r) {
247c61d6 403 int len, err;
17a68dd7 404 u16 txq_id, sequence;
ab697a9f
EG
405
406 rxb = rxq->queue[i];
407
408 /* If an RXB doesn't have a Rx queue slot associated with it,
409 * then a bug has been introduced in the queue refilling
410 * routines -- catch it here */
411 if (WARN_ON(rxb == NULL)) {
412 i = (i + 1) & RX_QUEUE_MASK;
413 continue;
414 }
415
416 rxq->queue[i] = NULL;
417
5a878bf6
EG
418 dma_unmap_page(bus(trans)->dev, rxb->page_dma,
419 PAGE_SIZE << hw_params(trans).rx_page_order,
ab697a9f
EG
420 DMA_FROM_DEVICE);
421 pkt = rxb_addr(rxb);
422
5a878bf6 423 IWL_DEBUG_RX(trans, "r = %d, i = %d, %s, 0x%02x\n", r,
ab697a9f
EG
424 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
425
426 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
427 len += sizeof(u32); /* account for status word */
5a878bf6 428 trace_iwlwifi_dev_rx(priv(trans), pkt, len);
ab697a9f
EG
429
430 /* Reclaim a command buffer only if this packet is a response
431 * to a (driver-originated) command.
432 * If the packet (e.g. Rx frame) originated from uCode,
433 * there is no command buffer to reclaim.
434 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
435 * but apparently a few don't get set; catch them here. */
436 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
437 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
438 (pkt->hdr.cmd != REPLY_RX) &&
439 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
440 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
441 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
442 (pkt->hdr.cmd != REPLY_TX);
443
17a68dd7 444 sequence = le16_to_cpu(pkt->hdr.sequence);
247c61d6
EG
445 index = SEQ_TO_INDEX(sequence);
446 cmd_index = get_cmd_index(&txq->q, index);
447
448 if (reclaim)
449 cmd = txq->cmd[cmd_index];
450 else
451 cmd = NULL;
17a68dd7
EG
452
453 /* warn if this is cmd response / notification and the uCode
454 * didn't set the SEQ_RX_FRAME for a frame that is
455 * uCode-originated*/
456 WARN(txq_id == trans->shrd->cmd_queue && reclaim == false &&
457 (!(pkt->hdr.sequence & SEQ_RX_FRAME)),
458 "reclaim is false, SEQ_RX_FRAME unset: %s\n",
459 get_cmd_string(pkt->hdr.cmd));
460
247c61d6 461 err = iwl_rx_dispatch(priv(trans), rxb, cmd);
ab697a9f
EG
462
463 /*
464 * XXX: After here, we should always check rxb->page
465 * against NULL before touching it or its virtual
466 * memory (pkt). Because some rx_handler might have
467 * already taken or freed the pages.
468 */
469
470 if (reclaim) {
471 /* Invoke any callbacks, transfer the buffer to caller,
472 * and fire off the (possibly) blocking
e6bb4c9c 473 * iwl_trans_send_cmd()
ab697a9f
EG
474 * as we reclaim the driver command queue */
475 if (rxb->page)
247c61d6 476 iwl_tx_cmd_complete(trans, rxb, err);
ab697a9f 477 else
5a878bf6 478 IWL_WARN(trans, "Claim null rxb?\n");
ab697a9f
EG
479 }
480
481 /* Reuse the page if possible. For notification packets and
482 * SKBs that fail to Rx correctly, add them back into the
483 * rx_free list for reuse later. */
484 spin_lock_irqsave(&rxq->lock, flags);
485 if (rxb->page != NULL) {
5a878bf6 486 rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page,
d6189124 487 0, PAGE_SIZE <<
5a878bf6 488 hw_params(trans).rx_page_order,
ab697a9f
EG
489 DMA_FROM_DEVICE);
490 list_add_tail(&rxb->list, &rxq->rx_free);
491 rxq->free_count++;
492 } else
493 list_add_tail(&rxb->list, &rxq->rx_used);
494
495 spin_unlock_irqrestore(&rxq->lock, flags);
496
497 i = (i + 1) & RX_QUEUE_MASK;
498 /* If there are a lot of unused frames,
499 * restock the Rx queue so ucode wont assert. */
500 if (fill_rx) {
501 count++;
502 if (count >= 8) {
503 rxq->read = i;
5a878bf6 504 iwlagn_rx_replenish_now(trans);
ab697a9f
EG
505 count = 0;
506 }
507 }
508 }
509
510 /* Backtrack one entry */
511 rxq->read = i;
512 if (fill_rx)
5a878bf6 513 iwlagn_rx_replenish_now(trans);
ab697a9f 514 else
5a878bf6 515 iwlagn_rx_queue_restock(trans);
ab697a9f
EG
516}
517
7ff94706
EG
518static const char * const desc_lookup_text[] = {
519 "OK",
520 "FAIL",
521 "BAD_PARAM",
522 "BAD_CHECKSUM",
523 "NMI_INTERRUPT_WDG",
524 "SYSASSERT",
525 "FATAL_ERROR",
526 "BAD_COMMAND",
527 "HW_ERROR_TUNE_LOCK",
528 "HW_ERROR_TEMPERATURE",
529 "ILLEGAL_CHAN_FREQ",
530 "VCC_NOT_STABLE",
531 "FH_ERROR",
532 "NMI_INTERRUPT_HOST",
533 "NMI_INTERRUPT_ACTION_PT",
534 "NMI_INTERRUPT_UNKNOWN",
535 "UCODE_VERSION_MISMATCH",
536 "HW_ERROR_ABS_LOCK",
537 "HW_ERROR_CAL_LOCK_FAIL",
538 "NMI_INTERRUPT_INST_ACTION_PT",
539 "NMI_INTERRUPT_DATA_ACTION_PT",
540 "NMI_TRM_HW_ER",
541 "NMI_INTERRUPT_TRM",
542 "NMI_INTERRUPT_BREAK_POINT",
543 "DEBUG_0",
544 "DEBUG_1",
545 "DEBUG_2",
546 "DEBUG_3",
547};
548
549static struct { char *name; u8 num; } advanced_lookup[] = {
550 { "NMI_INTERRUPT_WDG", 0x34 },
551 { "SYSASSERT", 0x35 },
552 { "UCODE_VERSION_MISMATCH", 0x37 },
553 { "BAD_COMMAND", 0x38 },
554 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
555 { "FATAL_ERROR", 0x3D },
556 { "NMI_TRM_HW_ERR", 0x46 },
557 { "NMI_INTERRUPT_TRM", 0x4C },
558 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
559 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
560 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
561 { "NMI_INTERRUPT_HOST", 0x66 },
562 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
563 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
564 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
565 { "ADVANCED_SYSASSERT", 0 },
566};
567
568static const char *desc_lookup(u32 num)
569{
570 int i;
571 int max = ARRAY_SIZE(desc_lookup_text);
572
573 if (num < max)
574 return desc_lookup_text[num];
575
576 max = ARRAY_SIZE(advanced_lookup) - 1;
577 for (i = 0; i < max; i++) {
578 if (advanced_lookup[i].num == num)
579 break;
580 }
581 return advanced_lookup[i].name;
582}
583
584#define ERROR_START_OFFSET (1 * sizeof(u32))
585#define ERROR_ELEM_SIZE (7 * sizeof(u32))
586
6bb78847 587static void iwl_dump_nic_error_log(struct iwl_trans *trans)
7ff94706
EG
588{
589 u32 base;
590 struct iwl_error_event_table table;
6bb78847 591 struct iwl_priv *priv = priv(trans);
1f7b6172
EG
592 struct iwl_trans_pcie *trans_pcie =
593 IWL_TRANS_GET_PCIE_TRANS(trans);
7ff94706
EG
594
595 base = priv->device_pointers.error_event_table;
596 if (priv->ucode_type == IWL_UCODE_INIT) {
597 if (!base)
598 base = priv->init_errlog_ptr;
599 } else {
600 if (!base)
601 base = priv->inst_errlog_ptr;
602 }
603
604 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
6bb78847 605 IWL_ERR(trans,
7ff94706
EG
606 "Not valid error log pointer 0x%08X for %s uCode\n",
607 base,
608 (priv->ucode_type == IWL_UCODE_INIT)
609 ? "Init" : "RT");
610 return;
611 }
612
83ed9015 613 iwl_read_targ_mem_words(bus(priv), base, &table, sizeof(table));
7ff94706
EG
614
615 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6bb78847
EG
616 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
617 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
618 trans->shrd->status, table.valid);
7ff94706
EG
619 }
620
1f7b6172 621 trans_pcie->isr_stats.err_code = table.error_id;
7ff94706
EG
622
623 trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
624 table.data1, table.data2, table.line,
625 table.blink1, table.blink2, table.ilink1,
626 table.ilink2, table.bcon_time, table.gp1,
627 table.gp2, table.gp3, table.ucode_ver,
628 table.hw_ver, table.brd_ver);
6bb78847 629 IWL_ERR(trans, "0x%08X | %-28s\n", table.error_id,
7ff94706 630 desc_lookup(table.error_id));
6bb78847
EG
631 IWL_ERR(trans, "0x%08X | uPc\n", table.pc);
632 IWL_ERR(trans, "0x%08X | branchlink1\n", table.blink1);
633 IWL_ERR(trans, "0x%08X | branchlink2\n", table.blink2);
634 IWL_ERR(trans, "0x%08X | interruptlink1\n", table.ilink1);
635 IWL_ERR(trans, "0x%08X | interruptlink2\n", table.ilink2);
636 IWL_ERR(trans, "0x%08X | data1\n", table.data1);
637 IWL_ERR(trans, "0x%08X | data2\n", table.data2);
638 IWL_ERR(trans, "0x%08X | line\n", table.line);
639 IWL_ERR(trans, "0x%08X | beacon time\n", table.bcon_time);
640 IWL_ERR(trans, "0x%08X | tsf low\n", table.tsf_low);
641 IWL_ERR(trans, "0x%08X | tsf hi\n", table.tsf_hi);
642 IWL_ERR(trans, "0x%08X | time gp1\n", table.gp1);
643 IWL_ERR(trans, "0x%08X | time gp2\n", table.gp2);
644 IWL_ERR(trans, "0x%08X | time gp3\n", table.gp3);
645 IWL_ERR(trans, "0x%08X | uCode version\n", table.ucode_ver);
646 IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
647 IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
648 IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
7ff94706
EG
649}
650
651/**
652 * iwl_irq_handle_error - called for HW or SW error interrupt from card
653 */
6bb78847 654static void iwl_irq_handle_error(struct iwl_trans *trans)
7ff94706 655{
6bb78847 656 struct iwl_priv *priv = priv(trans);
7ff94706
EG
657 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
658 if (priv->cfg->internal_wimax_coex &&
83ed9015 659 (!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) &
7ff94706 660 APMS_CLK_VAL_MRB_FUNC_MODE) ||
83ed9015 661 (iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) &
7ff94706
EG
662 APMG_PS_CTRL_VAL_RESET_REQ))) {
663 /*
664 * Keep the restart process from trying to send host
665 * commands by clearing the ready bit.
666 */
6bb78847
EG
667 clear_bit(STATUS_READY, &trans->shrd->status);
668 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
effd4d9a 669 wake_up(&priv->shrd->wait_command_queue);
6bb78847 670 IWL_ERR(trans, "RF is used by WiMAX\n");
7ff94706
EG
671 return;
672 }
673
6bb78847 674 IWL_ERR(trans, "Loaded firmware version: %s\n",
7ff94706
EG
675 priv->hw->wiphy->fw_version);
676
6bb78847
EG
677 iwl_dump_nic_error_log(trans);
678 iwl_dump_csr(trans);
679 iwl_dump_fh(trans, NULL, false);
680 iwl_dump_nic_event_log(trans, false, NULL, false);
7ff94706 681#ifdef CONFIG_IWLWIFI_DEBUG
6bb78847 682 if (iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS)
522376d2 683 iwl_print_rx_config_cmd(priv(trans), IWL_RXON_CTX_BSS);
7ff94706
EG
684#endif
685
686 iwlagn_fw_error(priv, false);
687}
688
689#define EVENT_START_OFFSET (4 * sizeof(u32))
690
691/**
692 * iwl_print_event_log - Dump error event log to syslog
693 *
694 */
6bb78847 695static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
7ff94706
EG
696 u32 num_events, u32 mode,
697 int pos, char **buf, size_t bufsz)
698{
699 u32 i;
700 u32 base; /* SRAM byte address of event log header */
701 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
702 u32 ptr; /* SRAM byte address of log data */
703 u32 ev, time, data; /* event log data */
704 unsigned long reg_flags;
6bb78847 705 struct iwl_priv *priv = priv(trans);
7ff94706
EG
706
707 if (num_events == 0)
708 return pos;
709
710 base = priv->device_pointers.log_event_table;
711 if (priv->ucode_type == IWL_UCODE_INIT) {
712 if (!base)
713 base = priv->init_evtlog_ptr;
714 } else {
715 if (!base)
716 base = priv->inst_evtlog_ptr;
717 }
718
719 if (mode == 0)
720 event_size = 2 * sizeof(u32);
721 else
722 event_size = 3 * sizeof(u32);
723
724 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
725
726 /* Make sure device is powered up for SRAM reads */
3e10caeb
EG
727 spin_lock_irqsave(&bus(trans)->reg_lock, reg_flags);
728 iwl_grab_nic_access(bus(trans));
7ff94706
EG
729
730 /* Set starting address; reads will auto-increment */
3e10caeb 731 iwl_write32(bus(trans), HBUS_TARG_MEM_RADDR, ptr);
7ff94706
EG
732 rmb();
733
734 /* "time" is actually "data" for mode 0 (no timestamp).
735 * place event id # at far right for easier visual parsing. */
736 for (i = 0; i < num_events; i++) {
3e10caeb
EG
737 ev = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
738 time = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
7ff94706
EG
739 if (mode == 0) {
740 /* data, ev */
741 if (bufsz) {
742 pos += scnprintf(*buf + pos, bufsz - pos,
743 "EVT_LOG:0x%08x:%04u\n",
744 time, ev);
745 } else {
746 trace_iwlwifi_dev_ucode_event(priv, 0,
747 time, ev);
6bb78847 748 IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n",
7ff94706
EG
749 time, ev);
750 }
751 } else {
3e10caeb 752 data = iwl_read32(bus(trans), HBUS_TARG_MEM_RDAT);
7ff94706
EG
753 if (bufsz) {
754 pos += scnprintf(*buf + pos, bufsz - pos,
755 "EVT_LOGT:%010u:0x%08x:%04u\n",
756 time, data, ev);
757 } else {
6bb78847 758 IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n",
7ff94706
EG
759 time, data, ev);
760 trace_iwlwifi_dev_ucode_event(priv, time,
761 data, ev);
762 }
763 }
764 }
765
766 /* Allow device to power down */
3e10caeb
EG
767 iwl_release_nic_access(bus(trans));
768 spin_unlock_irqrestore(&bus(trans)->reg_lock, reg_flags);
7ff94706
EG
769 return pos;
770}
771
772/**
773 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
774 */
6bb78847 775static int iwl_print_last_event_logs(struct iwl_trans *trans, u32 capacity,
7ff94706
EG
776 u32 num_wraps, u32 next_entry,
777 u32 size, u32 mode,
778 int pos, char **buf, size_t bufsz)
779{
780 /*
781 * display the newest DEFAULT_LOG_ENTRIES entries
782 * i.e the entries just before the next ont that uCode would fill.
783 */
784 if (num_wraps) {
785 if (next_entry < size) {
6bb78847 786 pos = iwl_print_event_log(trans,
7ff94706
EG
787 capacity - (size - next_entry),
788 size - next_entry, mode,
789 pos, buf, bufsz);
6bb78847 790 pos = iwl_print_event_log(trans, 0,
7ff94706
EG
791 next_entry, mode,
792 pos, buf, bufsz);
793 } else
6bb78847 794 pos = iwl_print_event_log(trans, next_entry - size,
7ff94706
EG
795 size, mode, pos, buf, bufsz);
796 } else {
797 if (next_entry < size) {
6bb78847 798 pos = iwl_print_event_log(trans, 0, next_entry,
7ff94706
EG
799 mode, pos, buf, bufsz);
800 } else {
6bb78847 801 pos = iwl_print_event_log(trans, next_entry - size,
7ff94706
EG
802 size, mode, pos, buf, bufsz);
803 }
804 }
805 return pos;
806}
807
808#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
809
6bb78847 810int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
7ff94706
EG
811 char **buf, bool display)
812{
813 u32 base; /* SRAM byte address of event log header */
814 u32 capacity; /* event log capacity in # entries */
815 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
816 u32 num_wraps; /* # times uCode wrapped to top of log */
817 u32 next_entry; /* index of next entry to be written by uCode */
818 u32 size; /* # entries that we'll print */
819 u32 logsize;
820 int pos = 0;
821 size_t bufsz = 0;
6bb78847 822 struct iwl_priv *priv = priv(trans);
7ff94706
EG
823
824 base = priv->device_pointers.log_event_table;
825 if (priv->ucode_type == IWL_UCODE_INIT) {
826 logsize = priv->init_evtlog_size;
827 if (!base)
828 base = priv->init_evtlog_ptr;
829 } else {
830 logsize = priv->inst_evtlog_size;
831 if (!base)
832 base = priv->inst_evtlog_ptr;
833 }
834
835 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
6bb78847 836 IWL_ERR(trans,
7ff94706
EG
837 "Invalid event log pointer 0x%08X for %s uCode\n",
838 base,
839 (priv->ucode_type == IWL_UCODE_INIT)
840 ? "Init" : "RT");
841 return -EINVAL;
842 }
843
844 /* event log header */
3e10caeb
EG
845 capacity = iwl_read_targ_mem(bus(trans), base);
846 mode = iwl_read_targ_mem(bus(trans), base + (1 * sizeof(u32)));
847 num_wraps = iwl_read_targ_mem(bus(trans), base + (2 * sizeof(u32)));
848 next_entry = iwl_read_targ_mem(bus(trans), base + (3 * sizeof(u32)));
7ff94706
EG
849
850 if (capacity > logsize) {
6bb78847
EG
851 IWL_ERR(trans, "Log capacity %d is bogus, limit to %d "
852 "entries\n", capacity, logsize);
7ff94706
EG
853 capacity = logsize;
854 }
855
856 if (next_entry > logsize) {
6bb78847 857 IWL_ERR(trans, "Log write index %d is bogus, limit to %d\n",
7ff94706
EG
858 next_entry, logsize);
859 next_entry = logsize;
860 }
861
862 size = num_wraps ? capacity : next_entry;
863
864 /* bail out if nothing in log */
865 if (size == 0) {
6bb78847 866 IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
7ff94706
EG
867 return pos;
868 }
869
7ff94706 870#ifdef CONFIG_IWLWIFI_DEBUG
6bb78847 871 if (!(iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) && !full_log)
7ff94706
EG
872 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
873 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
874#else
875 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
876 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
877#endif
6bb78847 878 IWL_ERR(trans, "Start IWL Event Log Dump: display last %u entries\n",
7ff94706
EG
879 size);
880
881#ifdef CONFIG_IWLWIFI_DEBUG
882 if (display) {
883 if (full_log)
884 bufsz = capacity * 48;
885 else
886 bufsz = size * 48;
887 *buf = kmalloc(bufsz, GFP_KERNEL);
888 if (!*buf)
889 return -ENOMEM;
890 }
6bb78847 891 if ((iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) || full_log) {
7ff94706
EG
892 /*
893 * if uCode has wrapped back to top of log,
894 * start at the oldest entry,
895 * i.e the next one that uCode would fill.
896 */
897 if (num_wraps)
6bb78847 898 pos = iwl_print_event_log(trans, next_entry,
7ff94706
EG
899 capacity - next_entry, mode,
900 pos, buf, bufsz);
901 /* (then/else) start at top of log */
6bb78847 902 pos = iwl_print_event_log(trans, 0,
7ff94706
EG
903 next_entry, mode, pos, buf, bufsz);
904 } else
6bb78847 905 pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
7ff94706
EG
906 next_entry, size, mode,
907 pos, buf, bufsz);
908#else
6bb78847 909 pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
7ff94706
EG
910 next_entry, size, mode,
911 pos, buf, bufsz);
912#endif
913 return pos;
914}
915
ab697a9f 916/* tasklet for iwlagn interrupt */
0c325769 917void iwl_irq_tasklet(struct iwl_trans *trans)
ab697a9f
EG
918{
919 u32 inta = 0;
920 u32 handled = 0;
921 unsigned long flags;
922 u32 i;
923#ifdef CONFIG_IWLWIFI_DEBUG
924 u32 inta_mask;
925#endif
926
3e10caeb 927 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1f7b6172
EG
928 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
929
0c325769
EG
930
931 spin_lock_irqsave(&trans->shrd->lock, flags);
ab697a9f
EG
932
933 /* Ack/clear/reset pending uCode interrupts.
934 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
935 */
936 /* There is a hardware bug in the interrupt mask function that some
937 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
938 * they are disabled in the CSR_INT_MASK register. Furthermore the
939 * ICT interrupt handling mechanism has another bug that might cause
940 * these unmasked interrupts fail to be detected. We workaround the
941 * hardware bugs here by ACKing all the possible interrupts so that
942 * interrupt coalescing can still be achieved.
943 */
83ed9015 944 iwl_write32(bus(trans), CSR_INT,
0c325769 945 trans_pcie->inta | ~trans_pcie->inta_mask);
ab697a9f 946
0c325769 947 inta = trans_pcie->inta;
ab697a9f
EG
948
949#ifdef CONFIG_IWLWIFI_DEBUG
0c325769 950 if (iwl_get_debug_level(trans->shrd) & IWL_DL_ISR) {
ab697a9f 951 /* just for debug */
83ed9015 952 inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);
0c325769 953 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ",
ab697a9f
EG
954 inta, inta_mask);
955 }
956#endif
957
0c325769 958 spin_unlock_irqrestore(&trans->shrd->lock, flags);
ab697a9f 959
0c325769
EG
960 /* saved interrupt in inta variable now we can reset trans_pcie->inta */
961 trans_pcie->inta = 0;
ab697a9f
EG
962
963 /* Now service all interrupt bits discovered above. */
964 if (inta & CSR_INT_BIT_HW_ERR) {
0c325769 965 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
ab697a9f
EG
966
967 /* Tell the device to stop sending interrupts */
0c325769 968 iwl_disable_interrupts(trans);
ab697a9f 969
1f7b6172 970 isr_stats->hw++;
6bb78847 971 iwl_irq_handle_error(trans);
ab697a9f
EG
972
973 handled |= CSR_INT_BIT_HW_ERR;
974
975 return;
976 }
977
978#ifdef CONFIG_IWLWIFI_DEBUG
0c325769 979 if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
ab697a9f
EG
980 /* NIC fires this, but we don't use it, redundant with WAKEUP */
981 if (inta & CSR_INT_BIT_SCD) {
0c325769 982 IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
ab697a9f 983 "the frame/frames.\n");
1f7b6172 984 isr_stats->sch++;
ab697a9f
EG
985 }
986
987 /* Alive notification via Rx interrupt will do the real work */
988 if (inta & CSR_INT_BIT_ALIVE) {
0c325769 989 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1f7b6172 990 isr_stats->alive++;
ab697a9f
EG
991 }
992 }
993#endif
994 /* Safely ignore these bits for debug checks below */
995 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
996
997 /* HW RF KILL switch toggled */
998 if (inta & CSR_INT_BIT_RF_KILL) {
999 int hw_rf_kill = 0;
83ed9015 1000 if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
ab697a9f
EG
1001 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1002 hw_rf_kill = 1;
1003
0c325769 1004 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
ab697a9f
EG
1005 hw_rf_kill ? "disable radio" : "enable radio");
1006
1f7b6172 1007 isr_stats->rfkill++;
ab697a9f
EG
1008
1009 /* driver only loads ucode once setting the interface up.
1010 * the driver allows loading the ucode even if the radio
1011 * is killed. Hence update the killswitch state here. The
1012 * rfkill handler will care about restarting if needed.
1013 */
0c325769 1014 if (!test_bit(STATUS_ALIVE, &trans->shrd->status)) {
ab697a9f 1015 if (hw_rf_kill)
0c325769
EG
1016 set_bit(STATUS_RF_KILL_HW,
1017 &trans->shrd->status);
ab697a9f 1018 else
63013ae3 1019 clear_bit(STATUS_RF_KILL_HW,
0c325769 1020 &trans->shrd->status);
3e10caeb 1021 iwl_set_hw_rfkill_state(priv(trans), hw_rf_kill);
ab697a9f
EG
1022 }
1023
1024 handled |= CSR_INT_BIT_RF_KILL;
1025 }
1026
1027 /* Chip got too hot and stopped itself */
1028 if (inta & CSR_INT_BIT_CT_KILL) {
0c325769 1029 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1f7b6172 1030 isr_stats->ctkill++;
ab697a9f
EG
1031 handled |= CSR_INT_BIT_CT_KILL;
1032 }
1033
1034 /* Error detected by uCode */
1035 if (inta & CSR_INT_BIT_SW_ERR) {
0c325769 1036 IWL_ERR(trans, "Microcode SW error detected. "
ab697a9f 1037 " Restarting 0x%X.\n", inta);
1f7b6172 1038 isr_stats->sw++;
6bb78847 1039 iwl_irq_handle_error(trans);
ab697a9f
EG
1040 handled |= CSR_INT_BIT_SW_ERR;
1041 }
1042
1043 /* uCode wakes up after power-down sleep */
1044 if (inta & CSR_INT_BIT_WAKEUP) {
0c325769
EG
1045 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1046 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
1047 for (i = 0; i < hw_params(trans).max_txq_num; i++)
fd656935 1048 iwl_txq_update_write_ptr(trans,
8ad71bef 1049 &trans_pcie->txq[i]);
ab697a9f 1050
1f7b6172 1051 isr_stats->wakeup++;
ab697a9f
EG
1052
1053 handled |= CSR_INT_BIT_WAKEUP;
1054 }
1055
1056 /* All uCode command responses, including Tx command responses,
1057 * Rx "responses" (frame-received notification), and other
1058 * notifications from uCode come through here*/
1059 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1060 CSR_INT_BIT_RX_PERIODIC)) {
0c325769 1061 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
ab697a9f
EG
1062 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1063 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
83ed9015 1064 iwl_write32(bus(trans), CSR_FH_INT_STATUS,
ab697a9f
EG
1065 CSR_FH_INT_RX_MASK);
1066 }
1067 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1068 handled |= CSR_INT_BIT_RX_PERIODIC;
83ed9015 1069 iwl_write32(bus(trans),
0c325769 1070 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
ab697a9f
EG
1071 }
1072 /* Sending RX interrupt require many steps to be done in the
1073 * the device:
1074 * 1- write interrupt to current index in ICT table.
1075 * 2- dma RX frame.
1076 * 3- update RX shared data to indicate last write index.
1077 * 4- send interrupt.
1078 * This could lead to RX race, driver could receive RX interrupt
1079 * but the shared data changes does not reflect this;
1080 * periodic interrupt will detect any dangling Rx activity.
1081 */
1082
1083 /* Disable periodic interrupt; we use it as just a one-shot. */
83ed9015 1084 iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
ab697a9f 1085 CSR_INT_PERIODIC_DIS);
0c325769 1086 iwl_rx_handle(trans);
ab697a9f
EG
1087
1088 /*
1089 * Enable periodic interrupt in 8 msec only if we received
1090 * real RX interrupt (instead of just periodic int), to catch
1091 * any dangling Rx interrupt. If it was just the periodic
1092 * interrupt, there was no dangling Rx activity, and no need
1093 * to extend the periodic interrupt; one-shot is enough.
1094 */
1095 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
83ed9015 1096 iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
ab697a9f
EG
1097 CSR_INT_PERIODIC_ENA);
1098
1f7b6172 1099 isr_stats->rx++;
ab697a9f
EG
1100 }
1101
1102 /* This "Tx" DMA channel is used only for loading uCode */
1103 if (inta & CSR_INT_BIT_FH_TX) {
83ed9015 1104 iwl_write32(bus(trans), CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
0c325769 1105 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1f7b6172 1106 isr_stats->tx++;
ab697a9f
EG
1107 handled |= CSR_INT_BIT_FH_TX;
1108 /* Wake up uCode load routine, now that load is complete */
0c325769 1109 priv(trans)->ucode_write_complete = 1;
effd4d9a 1110 wake_up(&trans->shrd->wait_command_queue);
ab697a9f
EG
1111 }
1112
1113 if (inta & ~handled) {
0c325769 1114 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1f7b6172 1115 isr_stats->unhandled++;
ab697a9f
EG
1116 }
1117
0c325769
EG
1118 if (inta & ~(trans_pcie->inta_mask)) {
1119 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1120 inta & ~trans_pcie->inta_mask);
ab697a9f
EG
1121 }
1122
1123 /* Re-enable all interrupts */
1124 /* only Re-enable if disabled by irq */
0c325769
EG
1125 if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status))
1126 iwl_enable_interrupts(trans);
ab697a9f
EG
1127 /* Re-enable RF_KILL if it occurred */
1128 else if (handled & CSR_INT_BIT_RF_KILL)
0c325769 1129 iwl_enable_rfkill_int(priv(trans));
ab697a9f
EG
1130}
1131
1a361cd8
EG
1132/******************************************************************************
1133 *
1134 * ICT functions
1135 *
1136 ******************************************************************************/
1137#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
1138
1139/* Free dram table */
0c325769 1140void iwl_free_isr_ict(struct iwl_trans *trans)
1a361cd8 1141{
0c325769
EG
1142 struct iwl_trans_pcie *trans_pcie =
1143 IWL_TRANS_GET_PCIE_TRANS(trans);
1144
1145 if (trans_pcie->ict_tbl_vir) {
1146 dma_free_coherent(bus(trans)->dev,
1a361cd8 1147 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
0c325769
EG
1148 trans_pcie->ict_tbl_vir,
1149 trans_pcie->ict_tbl_dma);
1150 trans_pcie->ict_tbl_vir = NULL;
1151 memset(&trans_pcie->ict_tbl_dma, 0,
1152 sizeof(trans_pcie->ict_tbl_dma));
1153 memset(&trans_pcie->aligned_ict_tbl_dma, 0,
1154 sizeof(trans_pcie->aligned_ict_tbl_dma));
1a361cd8
EG
1155 }
1156}
1157
1158
1159/* allocate dram shared table it is a PAGE_SIZE aligned
1160 * also reset all data related to ICT table interrupt.
1161 */
0c325769 1162int iwl_alloc_isr_ict(struct iwl_trans *trans)
1a361cd8 1163{
0c325769
EG
1164 struct iwl_trans_pcie *trans_pcie =
1165 IWL_TRANS_GET_PCIE_TRANS(trans);
1a361cd8
EG
1166
1167 /* allocate shrared data table */
0c325769
EG
1168 trans_pcie->ict_tbl_vir =
1169 dma_alloc_coherent(bus(trans)->dev,
1a361cd8 1170 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
0c325769
EG
1171 &trans_pcie->ict_tbl_dma, GFP_KERNEL);
1172 if (!trans_pcie->ict_tbl_vir)
1a361cd8
EG
1173 return -ENOMEM;
1174
1175 /* align table to PAGE_SIZE boundary */
0c325769
EG
1176 trans_pcie->aligned_ict_tbl_dma =
1177 ALIGN(trans_pcie->ict_tbl_dma, PAGE_SIZE);
1a361cd8 1178
0c325769
EG
1179 IWL_DEBUG_ISR(trans, "ict dma addr %Lx dma aligned %Lx diff %d\n",
1180 (unsigned long long)trans_pcie->ict_tbl_dma,
1181 (unsigned long long)trans_pcie->aligned_ict_tbl_dma,
1182 (int)(trans_pcie->aligned_ict_tbl_dma -
1183 trans_pcie->ict_tbl_dma));
1a361cd8 1184
0c325769
EG
1185 trans_pcie->ict_tbl = trans_pcie->ict_tbl_vir +
1186 (trans_pcie->aligned_ict_tbl_dma -
1187 trans_pcie->ict_tbl_dma);
1a361cd8 1188
0c325769
EG
1189 IWL_DEBUG_ISR(trans, "ict vir addr %p vir aligned %p diff %d\n",
1190 trans_pcie->ict_tbl, trans_pcie->ict_tbl_vir,
1191 (int)(trans_pcie->aligned_ict_tbl_dma -
1192 trans_pcie->ict_tbl_dma));
1a361cd8
EG
1193
1194 /* reset table and index to all 0 */
0c325769 1195 memset(trans_pcie->ict_tbl_vir, 0,
1a361cd8 1196 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
0c325769 1197 trans_pcie->ict_index = 0;
1a361cd8
EG
1198
1199 /* add periodic RX interrupt */
0c325769 1200 trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1a361cd8
EG
1201 return 0;
1202}
1203
1204/* Device is going up inform it about using ICT interrupt table,
1205 * also we need to tell the driver to start using ICT interrupt.
1206 */
6bb78847 1207int iwl_reset_ict(struct iwl_trans *trans)
1a361cd8
EG
1208{
1209 u32 val;
1210 unsigned long flags;
0c325769
EG
1211 struct iwl_trans_pcie *trans_pcie =
1212 IWL_TRANS_GET_PCIE_TRANS(trans);
1a361cd8 1213
0c325769 1214 if (!trans_pcie->ict_tbl_vir)
1a361cd8
EG
1215 return 0;
1216
0c325769
EG
1217 spin_lock_irqsave(&trans->shrd->lock, flags);
1218 iwl_disable_interrupts(trans);
1a361cd8 1219
0c325769 1220 memset(&trans_pcie->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
1a361cd8 1221
0c325769 1222 val = trans_pcie->aligned_ict_tbl_dma >> PAGE_SHIFT;
1a361cd8
EG
1223
1224 val |= CSR_DRAM_INT_TBL_ENABLE;
1225 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
1226
0c325769 1227 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%X "
1a361cd8
EG
1228 "aligned dma address %Lx\n",
1229 val,
0c325769 1230 (unsigned long long)trans_pcie->aligned_ict_tbl_dma);
1a361cd8 1231
83ed9015 1232 iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val);
0c325769
EG
1233 trans_pcie->use_ict = true;
1234 trans_pcie->ict_index = 0;
83ed9015 1235 iwl_write32(bus(trans), CSR_INT, trans_pcie->inta_mask);
0c325769
EG
1236 iwl_enable_interrupts(trans);
1237 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1a361cd8
EG
1238
1239 return 0;
1240}
1241
1242/* Device is going down disable ict interrupt usage */
0c325769 1243void iwl_disable_ict(struct iwl_trans *trans)
1a361cd8 1244{
0c325769
EG
1245 struct iwl_trans_pcie *trans_pcie =
1246 IWL_TRANS_GET_PCIE_TRANS(trans);
1247
1a361cd8
EG
1248 unsigned long flags;
1249
0c325769
EG
1250 spin_lock_irqsave(&trans->shrd->lock, flags);
1251 trans_pcie->use_ict = false;
1252 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1a361cd8
EG
1253}
1254
1255static irqreturn_t iwl_isr(int irq, void *data)
1256{
0c325769
EG
1257 struct iwl_trans *trans = data;
1258 struct iwl_trans_pcie *trans_pcie;
1a361cd8
EG
1259 u32 inta, inta_mask;
1260 unsigned long flags;
1261#ifdef CONFIG_IWLWIFI_DEBUG
1262 u32 inta_fh;
1263#endif
0c325769 1264 if (!trans)
1a361cd8
EG
1265 return IRQ_NONE;
1266
0c325769
EG
1267 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1268
1269 spin_lock_irqsave(&trans->shrd->lock, flags);
1a361cd8
EG
1270
1271 /* Disable (but don't clear!) interrupts here to avoid
1272 * back-to-back ISRs and sporadic interrupts from our NIC.
1273 * If we have something to service, the tasklet will re-enable ints.
1274 * If we *don't* have something, we'll re-enable before leaving here. */
83ed9015
EG
1275 inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
1276 iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
1a361cd8
EG
1277
1278 /* Discover which interrupts are active/pending */
83ed9015 1279 inta = iwl_read32(bus(trans), CSR_INT);
1a361cd8
EG
1280
1281 /* Ignore interrupt if there's nothing in NIC to service.
1282 * This may be due to IRQ shared with another device,
1283 * or due to sporadic interrupts thrown from our NIC. */
1284 if (!inta) {
0c325769 1285 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1a361cd8
EG
1286 goto none;
1287 }
1288
1289 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1290 /* Hardware disappeared. It might have already raised
1291 * an interrupt */
0c325769 1292 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1a361cd8
EG
1293 goto unplugged;
1294 }
1295
1296#ifdef CONFIG_IWLWIFI_DEBUG
0c325769 1297 if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
83ed9015 1298 inta_fh = iwl_read32(bus(trans), CSR_FH_INT_STATUS);
0c325769 1299 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
1a361cd8
EG
1300 "fh 0x%08x\n", inta, inta_mask, inta_fh);
1301 }
1302#endif
1303
0c325769 1304 trans_pcie->inta |= inta;
1a361cd8
EG
1305 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1306 if (likely(inta))
0c325769
EG
1307 tasklet_schedule(&trans_pcie->irq_tasklet);
1308 else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
1309 !trans_pcie->inta)
1310 iwl_enable_interrupts(trans);
1a361cd8
EG
1311
1312 unplugged:
0c325769 1313 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1a361cd8
EG
1314 return IRQ_HANDLED;
1315
1316 none:
1317 /* re-enable interrupts here since we don't have anything to service. */
1318 /* only Re-enable if disabled by irq and no schedules tasklet. */
0c325769
EG
1319 if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
1320 !trans_pcie->inta)
1321 iwl_enable_interrupts(trans);
1a361cd8 1322
0c325769 1323 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1a361cd8
EG
1324 return IRQ_NONE;
1325}
1326
1327/* interrupt handler using ict table, with this interrupt driver will
1328 * stop using INTA register to get device's interrupt, reading this register
1329 * is expensive, device will write interrupts in ICT dram table, increment
1330 * index then will fire interrupt to driver, driver will OR all ICT table
1331 * entries from current index up to table entry with 0 value. the result is
1332 * the interrupt we need to service, driver will set the entries back to 0 and
1333 * set index.
1334 */
1335irqreturn_t iwl_isr_ict(int irq, void *data)
1336{
0c325769
EG
1337 struct iwl_trans *trans = data;
1338 struct iwl_trans_pcie *trans_pcie;
1a361cd8
EG
1339 u32 inta, inta_mask;
1340 u32 val = 0;
1341 unsigned long flags;
1342
0c325769 1343 if (!trans)
1a361cd8
EG
1344 return IRQ_NONE;
1345
0c325769
EG
1346 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1347
1a361cd8
EG
1348 /* dram interrupt table not set yet,
1349 * use legacy interrupt.
1350 */
0c325769 1351 if (!trans_pcie->use_ict)
1a361cd8
EG
1352 return iwl_isr(irq, data);
1353
0c325769 1354 spin_lock_irqsave(&trans->shrd->lock, flags);
1a361cd8
EG
1355
1356 /* Disable (but don't clear!) interrupts here to avoid
1357 * back-to-back ISRs and sporadic interrupts from our NIC.
1358 * If we have something to service, the tasklet will re-enable ints.
1359 * If we *don't* have something, we'll re-enable before leaving here.
1360 */
83ed9015
EG
1361 inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
1362 iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
1a361cd8
EG
1363
1364
1365 /* Ignore interrupt if there's nothing in NIC to service.
1366 * This may be due to IRQ shared with another device,
1367 * or due to sporadic interrupts thrown from our NIC. */
0c325769
EG
1368 if (!trans_pcie->ict_tbl[trans_pcie->ict_index]) {
1369 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1a361cd8
EG
1370 goto none;
1371 }
1372
1373 /* read all entries that not 0 start with ict_index */
0c325769 1374 while (trans_pcie->ict_tbl[trans_pcie->ict_index]) {
1a361cd8 1375
0c325769
EG
1376 val |= le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1377 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1378 trans_pcie->ict_index,
1a361cd8 1379 le32_to_cpu(
0c325769
EG
1380 trans_pcie->ict_tbl[trans_pcie->ict_index]));
1381 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1382 trans_pcie->ict_index =
1383 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1a361cd8
EG
1384
1385 }
1386
1387 /* We should not get this value, just ignore it. */
1388 if (val == 0xffffffff)
1389 val = 0;
1390
1391 /*
1392 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1393 * (bit 15 before shifting it to 31) to clear when using interrupt
1394 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1395 * so we use them to decide on the real state of the Rx bit.
1396 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1397 */
1398 if (val & 0xC0000)
1399 val |= 0x8000;
1400
1401 inta = (0xff & val) | ((0xff00 & val) << 16);
0c325769 1402 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1a361cd8
EG
1403 inta, inta_mask, val);
1404
0c325769
EG
1405 inta &= trans_pcie->inta_mask;
1406 trans_pcie->inta |= inta;
1a361cd8
EG
1407
1408 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1409 if (likely(inta))
0c325769
EG
1410 tasklet_schedule(&trans_pcie->irq_tasklet);
1411 else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
1412 !trans_pcie->inta) {
1a361cd8
EG
1413 /* Allow interrupt if was disabled by this handler and
1414 * no tasklet was schedules, We should not enable interrupt,
1415 * tasklet will enable it.
1416 */
0c325769 1417 iwl_enable_interrupts(trans);
1a361cd8
EG
1418 }
1419
0c325769 1420 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1a361cd8
EG
1421 return IRQ_HANDLED;
1422
1423 none:
1424 /* re-enable interrupts here since we don't have anything to service.
1425 * only Re-enable if disabled by irq.
1426 */
0c325769
EG
1427 if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
1428 !trans_pcie->inta)
1429 iwl_enable_interrupts(trans);
1a361cd8 1430
0c325769 1431 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1a361cd8
EG
1432 return IRQ_NONE;
1433}
This page took 0.124458 seconds and 5 git commands to generate.