1 /******************************************************************************
3 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/sched.h>
30 #include <linux/wait.h>
31 #include <linux/gfp.h>
35 #include "iwl-trans-pcie-int.h"
36 #include "iwl-op-mode.h"
38 #ifdef CONFIG_IWLWIFI_IDI
42 /******************************************************************************
46 ******************************************************************************/
49 * Rx theory of operation
51 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
52 * each of which point to Receive Buffers to be filled by the NIC. These get
53 * used not only for Rx frames, but for any command response or notification
54 * from the NIC. The driver and NIC manage the Rx buffers by means
55 * of indexes into the circular buffer.
58 * The host/firmware share two index registers for managing the Rx buffers.
60 * The READ index maps to the first position that the firmware may be writing
61 * to -- the driver can read up to (but not including) this position and get
63 * The READ index is managed by the firmware once the card is enabled.
65 * The WRITE index maps to the last position the driver has read from -- the
66 * position preceding WRITE is the last slot the firmware can place a packet.
68 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
71 * During initialization, the host sets up the READ queue position to the first
72 * INDEX position, and WRITE to the last (READ - 1 wrapped)
74 * When the firmware places a packet in a buffer, it will advance the READ index
75 * and fire the RX interrupt. The driver can then query the READ index and
76 * process as many packets as possible, moving the WRITE index forward as it
77 * resets the Rx queue buffers with new memory.
79 * The management in the driver is as follows:
80 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
81 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
82 * to replenish the iwl->rxq->rx_free.
83 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
84 * iwl->rxq is replenished and the READ INDEX is updated (updating the
85 * 'processed' and 'read' driver indexes as well)
86 * + A received packet is processed and handed to the kernel network stack,
87 * detached from the iwl->rxq. The driver 'processed' index is updated.
88 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
89 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
90 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
91 * were enough free buffers and RX_STALLED is set it is cleared.
96 * iwl_rx_queue_alloc() Allocates rx_free
97 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
98 * iwl_rx_queue_restock
99 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
100 * queue, updates firmware pointers, and updates
101 * the WRITE index. If insufficient rx_free buffers
102 * are available, schedules iwl_rx_replenish
104 * -- enable interrupts --
105 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
106 * READ INDEX, detaching the SKB from the pool.
107 * Moves the packet buffer from queue to rx_used.
108 * Calls iwl_rx_queue_restock to refill any empty
115 * iwl_rx_queue_space - Return number of free slots available in queue.
117 static int iwl_rx_queue_space(const struct iwl_rx_queue
*q
)
119 int s
= q
->read
- q
->write
;
122 /* keep some buffer to not confuse full and empty queue */
130 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
132 void iwl_rx_queue_update_write_ptr(struct iwl_trans
*trans
,
133 struct iwl_rx_queue
*q
)
138 spin_lock_irqsave(&q
->lock
, flags
);
140 if (q
->need_update
== 0)
143 if (trans
->cfg
->base_params
->shadow_reg_enable
) {
144 /* shadow register enabled */
145 /* Device expects a multiple of 8 */
146 q
->write_actual
= (q
->write
& ~0x7);
147 iwl_write32(trans
, FH_RSCSR_CHNL0_WPTR
, q
->write_actual
);
149 struct iwl_trans_pcie
*trans_pcie
=
150 IWL_TRANS_GET_PCIE_TRANS(trans
);
152 /* If power-saving is in use, make sure device is awake */
153 if (test_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
)) {
154 reg
= iwl_read32(trans
, CSR_UCODE_DRV_GP1
);
156 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
157 IWL_DEBUG_INFO(trans
,
158 "Rx queue requesting wakeup,"
159 " GP1 = 0x%x\n", reg
);
160 iwl_set_bit(trans
, CSR_GP_CNTRL
,
161 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
165 q
->write_actual
= (q
->write
& ~0x7);
166 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_WPTR
,
169 /* Else device is assumed to be awake */
171 /* Device expects a multiple of 8 */
172 q
->write_actual
= (q
->write
& ~0x7);
173 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_WPTR
,
180 spin_unlock_irqrestore(&q
->lock
, flags
);
184 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
186 static inline __le32
iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr
)
188 return cpu_to_le32((u32
)(dma_addr
>> 8));
192 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
194 * If there are slots in the RX queue that need to be restocked,
195 * and we have free pre-allocated buffers, fill the ranks as much
196 * as we can, pulling from rx_free.
198 * This moves the 'write' index forward to catch up with 'processed', and
199 * also updates the memory address in the firmware to reference the new
202 static void iwlagn_rx_queue_restock(struct iwl_trans
*trans
)
204 struct iwl_trans_pcie
*trans_pcie
=
205 IWL_TRANS_GET_PCIE_TRANS(trans
);
207 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
208 struct list_head
*element
;
209 struct iwl_rx_mem_buffer
*rxb
;
212 spin_lock_irqsave(&rxq
->lock
, flags
);
213 while ((iwl_rx_queue_space(rxq
) > 0) && (rxq
->free_count
)) {
214 /* The overwritten rxb must be a used one */
215 rxb
= rxq
->queue
[rxq
->write
];
216 BUG_ON(rxb
&& rxb
->page
);
218 /* Get next free Rx buffer, remove from free list */
219 element
= rxq
->rx_free
.next
;
220 rxb
= list_entry(element
, struct iwl_rx_mem_buffer
, list
);
223 /* Point to Rx buffer via next RBD in circular buffer */
224 rxq
->bd
[rxq
->write
] = iwlagn_dma_addr2rbd_ptr(rxb
->page_dma
);
225 rxq
->queue
[rxq
->write
] = rxb
;
226 rxq
->write
= (rxq
->write
+ 1) & RX_QUEUE_MASK
;
229 spin_unlock_irqrestore(&rxq
->lock
, flags
);
230 /* If the pre-allocated buffer pool is dropping low, schedule to
232 if (rxq
->free_count
<= RX_LOW_WATERMARK
)
233 schedule_work(&trans_pcie
->rx_replenish
);
236 /* If we've added more space for the firmware to place data, tell it.
237 * Increment device's write pointer in multiples of 8. */
238 if (rxq
->write_actual
!= (rxq
->write
& ~0x7)) {
239 spin_lock_irqsave(&rxq
->lock
, flags
);
240 rxq
->need_update
= 1;
241 spin_unlock_irqrestore(&rxq
->lock
, flags
);
242 iwl_rx_queue_update_write_ptr(trans
, rxq
);
247 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
249 * When moving to rx_free an SKB is allocated for the slot.
251 * Also restock the Rx queue via iwl_rx_queue_restock.
252 * This is called as a scheduled work item (except for during initialization)
254 static void iwlagn_rx_allocate(struct iwl_trans
*trans
, gfp_t priority
)
256 struct iwl_trans_pcie
*trans_pcie
=
257 IWL_TRANS_GET_PCIE_TRANS(trans
);
259 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
260 struct list_head
*element
;
261 struct iwl_rx_mem_buffer
*rxb
;
264 gfp_t gfp_mask
= priority
;
267 spin_lock_irqsave(&rxq
->lock
, flags
);
268 if (list_empty(&rxq
->rx_used
)) {
269 spin_unlock_irqrestore(&rxq
->lock
, flags
);
272 spin_unlock_irqrestore(&rxq
->lock
, flags
);
274 if (rxq
->free_count
> RX_LOW_WATERMARK
)
275 gfp_mask
|= __GFP_NOWARN
;
277 if (trans_pcie
->rx_page_order
> 0)
278 gfp_mask
|= __GFP_COMP
;
280 /* Alloc a new receive buffer */
281 page
= alloc_pages(gfp_mask
,
282 trans_pcie
->rx_page_order
);
285 IWL_DEBUG_INFO(trans
, "alloc_pages failed, "
287 trans_pcie
->rx_page_order
);
289 if ((rxq
->free_count
<= RX_LOW_WATERMARK
) &&
291 IWL_CRIT(trans
, "Failed to alloc_pages with %s."
292 "Only %u free buffers remaining.\n",
293 priority
== GFP_ATOMIC
?
294 "GFP_ATOMIC" : "GFP_KERNEL",
296 /* We don't reschedule replenish work here -- we will
297 * call the restock method and if it still needs
298 * more buffers it will schedule replenish */
302 spin_lock_irqsave(&rxq
->lock
, flags
);
304 if (list_empty(&rxq
->rx_used
)) {
305 spin_unlock_irqrestore(&rxq
->lock
, flags
);
306 __free_pages(page
, trans_pcie
->rx_page_order
);
309 element
= rxq
->rx_used
.next
;
310 rxb
= list_entry(element
, struct iwl_rx_mem_buffer
, list
);
313 spin_unlock_irqrestore(&rxq
->lock
, flags
);
317 /* Get physical address of the RB */
318 rxb
->page_dma
= dma_map_page(trans
->dev
, page
, 0,
319 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
321 /* dma address must be no more than 36 bits */
322 BUG_ON(rxb
->page_dma
& ~DMA_BIT_MASK(36));
323 /* and also 256 byte aligned! */
324 BUG_ON(rxb
->page_dma
& DMA_BIT_MASK(8));
326 spin_lock_irqsave(&rxq
->lock
, flags
);
328 list_add_tail(&rxb
->list
, &rxq
->rx_free
);
331 spin_unlock_irqrestore(&rxq
->lock
, flags
);
335 void iwlagn_rx_replenish(struct iwl_trans
*trans
)
337 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
340 iwlagn_rx_allocate(trans
, GFP_KERNEL
);
342 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
343 iwlagn_rx_queue_restock(trans
);
344 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
347 static void iwlagn_rx_replenish_now(struct iwl_trans
*trans
)
349 iwlagn_rx_allocate(trans
, GFP_ATOMIC
);
351 iwlagn_rx_queue_restock(trans
);
354 void iwl_bg_rx_replenish(struct work_struct
*data
)
356 struct iwl_trans_pcie
*trans_pcie
=
357 container_of(data
, struct iwl_trans_pcie
, rx_replenish
);
359 iwlagn_rx_replenish(trans_pcie
->trans
);
362 static void iwl_rx_handle_rxbuf(struct iwl_trans
*trans
,
363 struct iwl_rx_mem_buffer
*rxb
)
365 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
366 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
367 struct iwl_tx_queue
*txq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
369 bool page_stolen
= false;
370 int max_len
= PAGE_SIZE
<< trans_pcie
->rx_page_order
;
376 dma_unmap_page(trans
->dev
, rxb
->page_dma
, max_len
, DMA_FROM_DEVICE
);
378 while (offset
+ sizeof(u32
) + sizeof(struct iwl_cmd_header
) < max_len
) {
379 struct iwl_rx_packet
*pkt
;
380 struct iwl_device_cmd
*cmd
;
383 int index
, cmd_index
, err
, len
;
384 struct iwl_rx_cmd_buffer rxcb
= {
387 ._page_stolen
= false,
391 pkt
= rxb_addr(&rxcb
);
393 if (pkt
->len_n_flags
== cpu_to_le32(FH_RSCSR_FRAME_INVALID
))
396 IWL_DEBUG_RX(trans
, "cmd at offset %d: %s (0x%.2x)\n",
398 trans_pcie_get_cmd_string(trans_pcie
, pkt
->hdr
.cmd
),
401 len
= le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_FRAME_SIZE_MSK
;
402 len
+= sizeof(u32
); /* account for status word */
403 trace_iwlwifi_dev_rx(trans
->dev
, pkt
, len
);
405 /* Reclaim a command buffer only if this packet is a response
406 * to a (driver-originated) command.
407 * If the packet (e.g. Rx frame) originated from uCode,
408 * there is no command buffer to reclaim.
409 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
410 * but apparently a few don't get set; catch them here. */
411 reclaim
= !(pkt
->hdr
.sequence
& SEQ_RX_FRAME
);
415 for (i
= 0; i
< trans_pcie
->n_no_reclaim_cmds
; i
++) {
416 if (trans_pcie
->no_reclaim_cmds
[i
] ==
424 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
425 index
= SEQ_TO_INDEX(sequence
);
426 cmd_index
= get_cmd_index(&txq
->q
, index
);
429 cmd
= txq
->entries
[cmd_index
].cmd
;
433 err
= iwl_op_mode_rx(trans
->op_mode
, &rxcb
, cmd
);
436 * After here, we should always check rxcb._page_stolen,
437 * if it is true then one of the handlers took the page.
441 /* Invoke any callbacks, transfer the buffer to caller,
442 * and fire off the (possibly) blocking
443 * iwl_trans_send_cmd()
444 * as we reclaim the driver command queue */
445 if (!rxcb
._page_stolen
)
446 iwl_tx_cmd_complete(trans
, &rxcb
, err
);
448 IWL_WARN(trans
, "Claim null rxb?\n");
451 page_stolen
|= rxcb
._page_stolen
;
452 offset
+= ALIGN(len
, FH_RSCSR_FRAME_ALIGN
);
455 /* page was stolen from us -- free our reference */
457 __free_pages(rxb
->page
, trans_pcie
->rx_page_order
);
461 /* Reuse the page if possible. For notification packets and
462 * SKBs that fail to Rx correctly, add them back into the
463 * rx_free list for reuse later. */
464 spin_lock_irqsave(&rxq
->lock
, flags
);
465 if (rxb
->page
!= NULL
) {
467 dma_map_page(trans
->dev
, rxb
->page
, 0,
468 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
470 list_add_tail(&rxb
->list
, &rxq
->rx_free
);
473 list_add_tail(&rxb
->list
, &rxq
->rx_used
);
474 spin_unlock_irqrestore(&rxq
->lock
, flags
);
478 * iwl_rx_handle - Main entry function for receiving responses from uCode
480 * Uses the priv->rx_handlers callback function array to invoke
481 * the appropriate handlers, including command responses,
482 * frame-received notifications, and other notifications.
484 static void iwl_rx_handle(struct iwl_trans
*trans
)
486 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
487 struct iwl_rx_queue
*rxq
= &trans_pcie
->rxq
;
493 /* uCode's read index (stored in shared DRAM) indicates the last Rx
494 * buffer that the driver may process (last buffer filled by ucode). */
495 r
= le16_to_cpu(rxq
->rb_stts
->closed_rb_num
) & 0x0FFF;
498 /* Rx interrupt, but nothing sent from uCode */
500 IWL_DEBUG_RX(trans
, "r = %d, i = %d\n", r
, i
);
502 /* calculate total frames need to be restock after handling RX */
503 total_empty
= r
- rxq
->write_actual
;
505 total_empty
+= RX_QUEUE_SIZE
;
507 if (total_empty
> (RX_QUEUE_SIZE
/ 2))
511 struct iwl_rx_mem_buffer
*rxb
;
514 rxq
->queue
[i
] = NULL
;
516 IWL_DEBUG_RX(trans
, "rxbuf: r = %d, i = %d (%p)\n", rxb
);
518 iwl_rx_handle_rxbuf(trans
, rxb
);
520 i
= (i
+ 1) & RX_QUEUE_MASK
;
521 /* If there are a lot of unused frames,
522 * restock the Rx queue so ucode wont assert. */
527 iwlagn_rx_replenish_now(trans
);
533 /* Backtrack one entry */
536 iwlagn_rx_replenish_now(trans
);
538 iwlagn_rx_queue_restock(trans
);
542 * iwl_irq_handle_error - called for HW or SW error interrupt from card
544 static void iwl_irq_handle_error(struct iwl_trans
*trans
)
546 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
547 if (trans
->cfg
->internal_wimax_coex
&&
548 (!(iwl_read_prph(trans
, APMG_CLK_CTRL_REG
) &
549 APMS_CLK_VAL_MRB_FUNC_MODE
) ||
550 (iwl_read_prph(trans
, APMG_PS_CTRL_REG
) &
551 APMG_PS_CTRL_VAL_RESET_REQ
))) {
552 struct iwl_trans_pcie
*trans_pcie
;
554 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
555 clear_bit(STATUS_HCMD_ACTIVE
, &trans_pcie
->status
);
556 iwl_op_mode_wimax_active(trans
->op_mode
);
557 wake_up(&trans
->wait_command_queue
);
562 iwl_dump_fh(trans
, NULL
, false);
564 iwl_op_mode_nic_error(trans
->op_mode
);
567 /* tasklet for iwlagn interrupt */
568 void iwl_irq_tasklet(struct iwl_trans
*trans
)
574 #ifdef CONFIG_IWLWIFI_DEBUG
578 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
579 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
582 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
584 /* Ack/clear/reset pending uCode interrupts.
585 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
587 /* There is a hardware bug in the interrupt mask function that some
588 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
589 * they are disabled in the CSR_INT_MASK register. Furthermore the
590 * ICT interrupt handling mechanism has another bug that might cause
591 * these unmasked interrupts fail to be detected. We workaround the
592 * hardware bugs here by ACKing all the possible interrupts so that
593 * interrupt coalescing can still be achieved.
595 iwl_write32(trans
, CSR_INT
,
596 trans_pcie
->inta
| ~trans_pcie
->inta_mask
);
598 inta
= trans_pcie
->inta
;
600 #ifdef CONFIG_IWLWIFI_DEBUG
601 if (iwl_have_debug_level(IWL_DL_ISR
)) {
603 inta_mask
= iwl_read32(trans
, CSR_INT_MASK
);
604 IWL_DEBUG_ISR(trans
, "inta 0x%08x, enabled 0x%08x\n",
609 /* saved interrupt in inta variable now we can reset trans_pcie->inta */
610 trans_pcie
->inta
= 0;
612 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
614 /* Now service all interrupt bits discovered above. */
615 if (inta
& CSR_INT_BIT_HW_ERR
) {
616 IWL_ERR(trans
, "Hardware error detected. Restarting.\n");
618 /* Tell the device to stop sending interrupts */
619 iwl_disable_interrupts(trans
);
622 iwl_irq_handle_error(trans
);
624 handled
|= CSR_INT_BIT_HW_ERR
;
629 #ifdef CONFIG_IWLWIFI_DEBUG
630 if (iwl_have_debug_level(IWL_DL_ISR
)) {
631 /* NIC fires this, but we don't use it, redundant with WAKEUP */
632 if (inta
& CSR_INT_BIT_SCD
) {
633 IWL_DEBUG_ISR(trans
, "Scheduler finished to transmit "
634 "the frame/frames.\n");
638 /* Alive notification via Rx interrupt will do the real work */
639 if (inta
& CSR_INT_BIT_ALIVE
) {
640 IWL_DEBUG_ISR(trans
, "Alive interrupt\n");
645 /* Safely ignore these bits for debug checks below */
646 inta
&= ~(CSR_INT_BIT_SCD
| CSR_INT_BIT_ALIVE
);
648 /* HW RF KILL switch toggled */
649 if (inta
& CSR_INT_BIT_RF_KILL
) {
652 hw_rfkill
= iwl_is_rfkill_set(trans
);
653 IWL_WARN(trans
, "RF_KILL bit toggled to %s.\n",
654 hw_rfkill
? "disable radio" : "enable radio");
658 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
660 handled
|= CSR_INT_BIT_RF_KILL
;
663 /* Chip got too hot and stopped itself */
664 if (inta
& CSR_INT_BIT_CT_KILL
) {
665 IWL_ERR(trans
, "Microcode CT kill error detected.\n");
667 handled
|= CSR_INT_BIT_CT_KILL
;
670 /* Error detected by uCode */
671 if (inta
& CSR_INT_BIT_SW_ERR
) {
672 IWL_ERR(trans
, "Microcode SW error detected. "
673 " Restarting 0x%X.\n", inta
);
675 iwl_irq_handle_error(trans
);
676 handled
|= CSR_INT_BIT_SW_ERR
;
679 /* uCode wakes up after power-down sleep */
680 if (inta
& CSR_INT_BIT_WAKEUP
) {
681 IWL_DEBUG_ISR(trans
, "Wakeup interrupt\n");
682 iwl_rx_queue_update_write_ptr(trans
, &trans_pcie
->rxq
);
683 for (i
= 0; i
< trans
->cfg
->base_params
->num_of_queues
; i
++)
684 iwl_txq_update_write_ptr(trans
,
685 &trans_pcie
->txq
[i
]);
689 handled
|= CSR_INT_BIT_WAKEUP
;
692 /* All uCode command responses, including Tx command responses,
693 * Rx "responses" (frame-received notification), and other
694 * notifications from uCode come through here*/
695 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
|
696 CSR_INT_BIT_RX_PERIODIC
)) {
697 IWL_DEBUG_ISR(trans
, "Rx interrupt\n");
698 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
)) {
699 handled
|= (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
);
700 iwl_write32(trans
, CSR_FH_INT_STATUS
,
703 if (inta
& CSR_INT_BIT_RX_PERIODIC
) {
704 handled
|= CSR_INT_BIT_RX_PERIODIC
;
706 CSR_INT
, CSR_INT_BIT_RX_PERIODIC
);
708 /* Sending RX interrupt require many steps to be done in the
710 * 1- write interrupt to current index in ICT table.
712 * 3- update RX shared data to indicate last write index.
714 * This could lead to RX race, driver could receive RX interrupt
715 * but the shared data changes does not reflect this;
716 * periodic interrupt will detect any dangling Rx activity.
719 /* Disable periodic interrupt; we use it as just a one-shot. */
720 iwl_write8(trans
, CSR_INT_PERIODIC_REG
,
721 CSR_INT_PERIODIC_DIS
);
722 #ifdef CONFIG_IWLWIFI_IDI
723 iwl_amfh_rx_handler();
725 iwl_rx_handle(trans
);
728 * Enable periodic interrupt in 8 msec only if we received
729 * real RX interrupt (instead of just periodic int), to catch
730 * any dangling Rx interrupt. If it was just the periodic
731 * interrupt, there was no dangling Rx activity, and no need
732 * to extend the periodic interrupt; one-shot is enough.
734 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
))
735 iwl_write8(trans
, CSR_INT_PERIODIC_REG
,
736 CSR_INT_PERIODIC_ENA
);
741 /* This "Tx" DMA channel is used only for loading uCode */
742 if (inta
& CSR_INT_BIT_FH_TX
) {
743 iwl_write32(trans
, CSR_FH_INT_STATUS
, CSR_FH_INT_TX_MASK
);
744 IWL_DEBUG_ISR(trans
, "uCode load interrupt\n");
746 handled
|= CSR_INT_BIT_FH_TX
;
747 /* Wake up uCode load routine, now that load is complete */
748 trans_pcie
->ucode_write_complete
= true;
749 wake_up(&trans_pcie
->ucode_write_waitq
);
752 if (inta
& ~handled
) {
753 IWL_ERR(trans
, "Unhandled INTA bits 0x%08x\n", inta
& ~handled
);
754 isr_stats
->unhandled
++;
757 if (inta
& ~(trans_pcie
->inta_mask
)) {
758 IWL_WARN(trans
, "Disabled INTA bits 0x%08x were pending\n",
759 inta
& ~trans_pcie
->inta_mask
);
762 /* Re-enable all interrupts */
763 /* only Re-enable if disabled by irq */
764 if (test_bit(STATUS_INT_ENABLED
, &trans_pcie
->status
))
765 iwl_enable_interrupts(trans
);
766 /* Re-enable RF_KILL if it occurred */
767 else if (handled
& CSR_INT_BIT_RF_KILL
)
768 iwl_enable_rfkill_int(trans
);
771 /******************************************************************************
775 ******************************************************************************/
777 /* a device (PCI-E) page is 4096 bytes long */
779 #define ICT_SIZE (1 << ICT_SHIFT)
780 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
782 /* Free dram table */
783 void iwl_free_isr_ict(struct iwl_trans
*trans
)
785 struct iwl_trans_pcie
*trans_pcie
=
786 IWL_TRANS_GET_PCIE_TRANS(trans
);
788 if (trans_pcie
->ict_tbl
) {
789 dma_free_coherent(trans
->dev
, ICT_SIZE
,
791 trans_pcie
->ict_tbl_dma
);
792 trans_pcie
->ict_tbl
= NULL
;
793 trans_pcie
->ict_tbl_dma
= 0;
799 * allocate dram shared table, it is an aligned memory
801 * also reset all data related to ICT table interrupt.
803 int iwl_alloc_isr_ict(struct iwl_trans
*trans
)
805 struct iwl_trans_pcie
*trans_pcie
=
806 IWL_TRANS_GET_PCIE_TRANS(trans
);
808 trans_pcie
->ict_tbl
=
809 dma_alloc_coherent(trans
->dev
, ICT_SIZE
,
810 &trans_pcie
->ict_tbl_dma
,
812 if (!trans_pcie
->ict_tbl
)
815 /* just an API sanity check ... it is guaranteed to be aligned */
816 if (WARN_ON(trans_pcie
->ict_tbl_dma
& (ICT_SIZE
- 1))) {
817 iwl_free_isr_ict(trans
);
821 IWL_DEBUG_ISR(trans
, "ict dma addr %Lx\n",
822 (unsigned long long)trans_pcie
->ict_tbl_dma
);
824 IWL_DEBUG_ISR(trans
, "ict vir addr %p\n", trans_pcie
->ict_tbl
);
826 /* reset table and index to all 0 */
827 memset(trans_pcie
->ict_tbl
, 0, ICT_SIZE
);
828 trans_pcie
->ict_index
= 0;
830 /* add periodic RX interrupt */
831 trans_pcie
->inta_mask
|= CSR_INT_BIT_RX_PERIODIC
;
835 /* Device is going up inform it about using ICT interrupt table,
836 * also we need to tell the driver to start using ICT interrupt.
838 void iwl_reset_ict(struct iwl_trans
*trans
)
842 struct iwl_trans_pcie
*trans_pcie
=
843 IWL_TRANS_GET_PCIE_TRANS(trans
);
845 if (!trans_pcie
->ict_tbl
)
848 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
849 iwl_disable_interrupts(trans
);
851 memset(trans_pcie
->ict_tbl
, 0, ICT_SIZE
);
853 val
= trans_pcie
->ict_tbl_dma
>> ICT_SHIFT
;
855 val
|= CSR_DRAM_INT_TBL_ENABLE
;
856 val
|= CSR_DRAM_INIT_TBL_WRAP_CHECK
;
858 IWL_DEBUG_ISR(trans
, "CSR_DRAM_INT_TBL_REG =0x%x\n", val
);
860 iwl_write32(trans
, CSR_DRAM_INT_TBL_REG
, val
);
861 trans_pcie
->use_ict
= true;
862 trans_pcie
->ict_index
= 0;
863 iwl_write32(trans
, CSR_INT
, trans_pcie
->inta_mask
);
864 iwl_enable_interrupts(trans
);
865 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
868 /* Device is going down disable ict interrupt usage */
869 void iwl_disable_ict(struct iwl_trans
*trans
)
871 struct iwl_trans_pcie
*trans_pcie
=
872 IWL_TRANS_GET_PCIE_TRANS(trans
);
876 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
877 trans_pcie
->use_ict
= false;
878 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
881 static irqreturn_t
iwl_isr(int irq
, void *data
)
883 struct iwl_trans
*trans
= data
;
884 struct iwl_trans_pcie
*trans_pcie
;
887 #ifdef CONFIG_IWLWIFI_DEBUG
893 trace_iwlwifi_dev_irq(trans
->dev
);
895 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
897 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
899 /* Disable (but don't clear!) interrupts here to avoid
900 * back-to-back ISRs and sporadic interrupts from our NIC.
901 * If we have something to service, the tasklet will re-enable ints.
902 * If we *don't* have something, we'll re-enable before leaving here. */
903 inta_mask
= iwl_read32(trans
, CSR_INT_MASK
); /* just for debug */
904 iwl_write32(trans
, CSR_INT_MASK
, 0x00000000);
906 /* Discover which interrupts are active/pending */
907 inta
= iwl_read32(trans
, CSR_INT
);
909 /* Ignore interrupt if there's nothing in NIC to service.
910 * This may be due to IRQ shared with another device,
911 * or due to sporadic interrupts thrown from our NIC. */
913 IWL_DEBUG_ISR(trans
, "Ignore interrupt, inta == 0\n");
917 if ((inta
== 0xFFFFFFFF) || ((inta
& 0xFFFFFFF0) == 0xa5a5a5a0)) {
918 /* Hardware disappeared. It might have already raised
920 IWL_WARN(trans
, "HARDWARE GONE?? INTA == 0x%08x\n", inta
);
924 #ifdef CONFIG_IWLWIFI_DEBUG
925 if (iwl_have_debug_level(IWL_DL_ISR
)) {
926 inta_fh
= iwl_read32(trans
, CSR_FH_INT_STATUS
);
927 IWL_DEBUG_ISR(trans
, "ISR inta 0x%08x, enabled 0x%08x, "
928 "fh 0x%08x\n", inta
, inta_mask
, inta_fh
);
932 trans_pcie
->inta
|= inta
;
933 /* iwl_irq_tasklet() will service interrupts and re-enable them */
935 tasklet_schedule(&trans_pcie
->irq_tasklet
);
936 else if (test_bit(STATUS_INT_ENABLED
, &trans_pcie
->status
) &&
938 iwl_enable_interrupts(trans
);
941 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
945 /* re-enable interrupts here since we don't have anything to service. */
946 /* only Re-enable if disabled by irq and no schedules tasklet. */
947 if (test_bit(STATUS_INT_ENABLED
, &trans_pcie
->status
) &&
949 iwl_enable_interrupts(trans
);
951 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
955 /* interrupt handler using ict table, with this interrupt driver will
956 * stop using INTA register to get device's interrupt, reading this register
957 * is expensive, device will write interrupts in ICT dram table, increment
958 * index then will fire interrupt to driver, driver will OR all ICT table
959 * entries from current index up to table entry with 0 value. the result is
960 * the interrupt we need to service, driver will set the entries back to 0 and
963 irqreturn_t
iwl_isr_ict(int irq
, void *data
)
965 struct iwl_trans
*trans
= data
;
966 struct iwl_trans_pcie
*trans_pcie
;
975 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
977 /* dram interrupt table not set yet,
978 * use legacy interrupt.
980 if (!trans_pcie
->use_ict
)
981 return iwl_isr(irq
, data
);
983 trace_iwlwifi_dev_irq(trans
->dev
);
985 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
987 /* Disable (but don't clear!) interrupts here to avoid
988 * back-to-back ISRs and sporadic interrupts from our NIC.
989 * If we have something to service, the tasklet will re-enable ints.
990 * If we *don't* have something, we'll re-enable before leaving here.
992 inta_mask
= iwl_read32(trans
, CSR_INT_MASK
); /* just for debug */
993 iwl_write32(trans
, CSR_INT_MASK
, 0x00000000);
996 /* Ignore interrupt if there's nothing in NIC to service.
997 * This may be due to IRQ shared with another device,
998 * or due to sporadic interrupts thrown from our NIC. */
999 read
= le32_to_cpu(trans_pcie
->ict_tbl
[trans_pcie
->ict_index
]);
1000 trace_iwlwifi_dev_ict_read(trans
->dev
, trans_pcie
->ict_index
, read
);
1002 IWL_DEBUG_ISR(trans
, "Ignore interrupt, inta == 0\n");
1007 * Collect all entries up to the first 0, starting from ict_index;
1008 * note we already read at ict_index.
1012 IWL_DEBUG_ISR(trans
, "ICT index %d value 0x%08X\n",
1013 trans_pcie
->ict_index
, read
);
1014 trans_pcie
->ict_tbl
[trans_pcie
->ict_index
] = 0;
1015 trans_pcie
->ict_index
=
1016 iwl_queue_inc_wrap(trans_pcie
->ict_index
, ICT_COUNT
);
1018 read
= le32_to_cpu(trans_pcie
->ict_tbl
[trans_pcie
->ict_index
]);
1019 trace_iwlwifi_dev_ict_read(trans
->dev
, trans_pcie
->ict_index
,
1023 /* We should not get this value, just ignore it. */
1024 if (val
== 0xffffffff)
1028 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1029 * (bit 15 before shifting it to 31) to clear when using interrupt
1030 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1031 * so we use them to decide on the real state of the Rx bit.
1032 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1037 inta
= (0xff & val
) | ((0xff00 & val
) << 16);
1038 IWL_DEBUG_ISR(trans
, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1039 inta
, inta_mask
, val
);
1041 inta
&= trans_pcie
->inta_mask
;
1042 trans_pcie
->inta
|= inta
;
1044 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1046 tasklet_schedule(&trans_pcie
->irq_tasklet
);
1047 else if (test_bit(STATUS_INT_ENABLED
, &trans_pcie
->status
) &&
1048 !trans_pcie
->inta
) {
1049 /* Allow interrupt if was disabled by this handler and
1050 * no tasklet was schedules, We should not enable interrupt,
1051 * tasklet will enable it.
1053 iwl_enable_interrupts(trans
);
1056 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
1060 /* re-enable interrupts here since we don't have anything to service.
1061 * only Re-enable if disabled by irq.
1063 if (test_bit(STATUS_INT_ENABLED
, &trans_pcie
->status
) &&
1065 iwl_enable_interrupts(trans
);
1067 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);