Commit | Line | Data |
---|---|---|
ab697a9f EG |
1 | /****************************************************************************** |
2 | * | |
128e63ef | 3 | * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved. |
ab697a9f EG |
4 | * |
5 | * Portions of this file are derived from the ipw3945 project, as well | |
6 | * as portions of the ieee80211 subsystem header files. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., | |
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
20 | * | |
21 | * The full GNU General Public License is included in this distribution in the | |
22 | * file called LICENSE. | |
23 | * | |
24 | * Contact Information: | |
25 | * Intel Linux Wireless <ilw@linux.intel.com> | |
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
27 | * | |
28 | *****************************************************************************/ | |
29 | #include <linux/sched.h> | |
30 | #include <linux/wait.h> | |
1a361cd8 | 31 | #include <linux/gfp.h> |
ab697a9f | 32 | |
1b29dc94 | 33 | #include "iwl-prph.h" |
ab697a9f | 34 | #include "iwl-io.h" |
6468a01a | 35 | #include "internal.h" |
db70f290 | 36 | #include "iwl-op-mode.h" |
ab697a9f EG |
37 | |
38 | /****************************************************************************** | |
39 | * | |
40 | * RX path functions | |
41 | * | |
42 | ******************************************************************************/ | |
43 | ||
44 | /* | |
45 | * Rx theory of operation | |
46 | * | |
47 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | |
48 | * each of which point to Receive Buffers to be filled by the NIC. These get | |
49 | * used not only for Rx frames, but for any command response or notification | |
50 | * from the NIC. The driver and NIC manage the Rx buffers by means | |
51 | * of indexes into the circular buffer. | |
52 | * | |
53 | * Rx Queue Indexes | |
54 | * The host/firmware share two index registers for managing the Rx buffers. | |
55 | * | |
56 | * The READ index maps to the first position that the firmware may be writing | |
57 | * to -- the driver can read up to (but not including) this position and get | |
58 | * good data. | |
59 | * The READ index is managed by the firmware once the card is enabled. | |
60 | * | |
61 | * The WRITE index maps to the last position the driver has read from -- the | |
62 | * position preceding WRITE is the last slot the firmware can place a packet. | |
63 | * | |
64 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | |
65 | * WRITE = READ. | |
66 | * | |
67 | * During initialization, the host sets up the READ queue position to the first | |
68 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | |
69 | * | |
70 | * When the firmware places a packet in a buffer, it will advance the READ index | |
71 | * and fire the RX interrupt. The driver can then query the READ index and | |
72 | * process as many packets as possible, moving the WRITE index forward as it | |
73 | * resets the Rx queue buffers with new memory. | |
74 | * | |
75 | * The management in the driver is as follows: | |
76 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | |
77 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | |
78 | * to replenish the iwl->rxq->rx_free. | |
990aa6d7 | 79 | * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the |
ab697a9f EG |
80 | * iwl->rxq is replenished and the READ INDEX is updated (updating the |
81 | * 'processed' and 'read' driver indexes as well) | |
82 | * + A received packet is processed and handed to the kernel network stack, | |
83 | * detached from the iwl->rxq. The driver 'processed' index is updated. | |
2bfb5092 JB |
84 | * + The Host/Firmware iwl->rxq is replenished at irq thread time from the |
85 | * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, | |
86 | * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. | |
87 | * If there were enough free buffers and RX_STALLED is set it is cleared. | |
ab697a9f EG |
88 | * |
89 | * | |
90 | * Driver sequence: | |
91 | * | |
990aa6d7 EG |
92 | * iwl_rxq_alloc() Allocates rx_free |
93 | * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls | |
94 | * iwl_pcie_rxq_restock | |
95 | * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx | |
ab697a9f EG |
96 | * queue, updates firmware pointers, and updates |
97 | * the WRITE index. If insufficient rx_free buffers | |
990aa6d7 | 98 | * are available, schedules iwl_pcie_rx_replenish |
ab697a9f EG |
99 | * |
100 | * -- enable interrupts -- | |
990aa6d7 | 101 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the |
ab697a9f EG |
102 | * READ INDEX, detaching the SKB from the pool. |
103 | * Moves the packet buffer from queue to rx_used. | |
990aa6d7 | 104 | * Calls iwl_pcie_rxq_restock to refill any empty |
ab697a9f EG |
105 | * slots. |
106 | * ... | |
107 | * | |
108 | */ | |
109 | ||
990aa6d7 EG |
110 | /* |
111 | * iwl_rxq_space - Return number of free slots available in queue. | |
ab697a9f | 112 | */ |
990aa6d7 | 113 | static int iwl_rxq_space(const struct iwl_rxq *q) |
ab697a9f EG |
114 | { |
115 | int s = q->read - q->write; | |
116 | if (s <= 0) | |
117 | s += RX_QUEUE_SIZE; | |
118 | /* keep some buffer to not confuse full and empty queue */ | |
119 | s -= 2; | |
120 | if (s < 0) | |
121 | s = 0; | |
122 | return s; | |
123 | } | |
124 | ||
9805c446 EG |
125 | /* |
126 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | |
127 | */ | |
128 | static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) | |
129 | { | |
130 | return cpu_to_le32((u32)(dma_addr >> 8)); | |
131 | } | |
132 | ||
49bd072d EG |
133 | /* |
134 | * iwl_pcie_rx_stop - stops the Rx DMA | |
135 | */ | |
9805c446 EG |
136 | int iwl_pcie_rx_stop(struct iwl_trans *trans) |
137 | { | |
9805c446 EG |
138 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
139 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | |
140 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | |
141 | } | |
142 | ||
990aa6d7 EG |
143 | /* |
144 | * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue | |
ab697a9f | 145 | */ |
223b9cb1 | 146 | static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q) |
ab697a9f EG |
147 | { |
148 | unsigned long flags; | |
149 | u32 reg; | |
150 | ||
151 | spin_lock_irqsave(&q->lock, flags); | |
152 | ||
153 | if (q->need_update == 0) | |
154 | goto exit_unlock; | |
155 | ||
035f7ff2 | 156 | if (trans->cfg->base_params->shadow_reg_enable) { |
ab697a9f EG |
157 | /* shadow register enabled */ |
158 | /* Device expects a multiple of 8 */ | |
159 | q->write_actual = (q->write & ~0x7); | |
1042db2a | 160 | iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual); |
ab697a9f | 161 | } else { |
47107e84 DF |
162 | struct iwl_trans_pcie *trans_pcie = |
163 | IWL_TRANS_GET_PCIE_TRANS(trans); | |
164 | ||
ab697a9f | 165 | /* If power-saving is in use, make sure device is awake */ |
01d651d4 | 166 | if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) { |
1042db2a | 167 | reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); |
ab697a9f EG |
168 | |
169 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
5a878bf6 | 170 | IWL_DEBUG_INFO(trans, |
ab697a9f EG |
171 | "Rx queue requesting wakeup," |
172 | " GP1 = 0x%x\n", reg); | |
1042db2a | 173 | iwl_set_bit(trans, CSR_GP_CNTRL, |
ab697a9f EG |
174 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
175 | goto exit_unlock; | |
176 | } | |
177 | ||
178 | q->write_actual = (q->write & ~0x7); | |
1042db2a | 179 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, |
ab697a9f EG |
180 | q->write_actual); |
181 | ||
182 | /* Else device is assumed to be awake */ | |
183 | } else { | |
184 | /* Device expects a multiple of 8 */ | |
185 | q->write_actual = (q->write & ~0x7); | |
1042db2a | 186 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, |
ab697a9f EG |
187 | q->write_actual); |
188 | } | |
189 | } | |
190 | q->need_update = 0; | |
191 | ||
192 | exit_unlock: | |
193 | spin_unlock_irqrestore(&q->lock, flags); | |
194 | } | |
195 | ||
990aa6d7 EG |
196 | /* |
197 | * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool | |
ab697a9f EG |
198 | * |
199 | * If there are slots in the RX queue that need to be restocked, | |
200 | * and we have free pre-allocated buffers, fill the ranks as much | |
201 | * as we can, pulling from rx_free. | |
202 | * | |
203 | * This moves the 'write' index forward to catch up with 'processed', and | |
204 | * also updates the memory address in the firmware to reference the new | |
205 | * target buffer. | |
206 | */ | |
990aa6d7 | 207 | static void iwl_pcie_rxq_restock(struct iwl_trans *trans) |
ab697a9f | 208 | { |
20d3b647 | 209 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
990aa6d7 | 210 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
ab697a9f EG |
211 | struct iwl_rx_mem_buffer *rxb; |
212 | unsigned long flags; | |
213 | ||
7439046d EG |
214 | /* |
215 | * If the device isn't enabled - not need to try to add buffers... | |
216 | * This can happen when we stop the device and still have an interrupt | |
2bfb5092 JB |
217 | * pending. We stop the APM before we sync the interrupts because we |
218 | * have to (see comment there). On the other hand, since the APM is | |
219 | * stopped, we cannot access the HW (in particular not prph). | |
7439046d EG |
220 | * So don't try to restock if the APM has been already stopped. |
221 | */ | |
222 | if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) | |
223 | return; | |
224 | ||
ab697a9f | 225 | spin_lock_irqsave(&rxq->lock, flags); |
990aa6d7 | 226 | while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { |
ab697a9f EG |
227 | /* The overwritten rxb must be a used one */ |
228 | rxb = rxq->queue[rxq->write]; | |
229 | BUG_ON(rxb && rxb->page); | |
230 | ||
231 | /* Get next free Rx buffer, remove from free list */ | |
e2b1930e JB |
232 | rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, |
233 | list); | |
234 | list_del(&rxb->list); | |
ab697a9f EG |
235 | |
236 | /* Point to Rx buffer via next RBD in circular buffer */ | |
9805c446 | 237 | rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); |
ab697a9f EG |
238 | rxq->queue[rxq->write] = rxb; |
239 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | |
240 | rxq->free_count--; | |
241 | } | |
242 | spin_unlock_irqrestore(&rxq->lock, flags); | |
243 | /* If the pre-allocated buffer pool is dropping low, schedule to | |
244 | * refill it */ | |
245 | if (rxq->free_count <= RX_LOW_WATERMARK) | |
1ee158d8 | 246 | schedule_work(&trans_pcie->rx_replenish); |
ab697a9f | 247 | |
ab697a9f EG |
248 | /* If we've added more space for the firmware to place data, tell it. |
249 | * Increment device's write pointer in multiples of 8. */ | |
250 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
251 | spin_lock_irqsave(&rxq->lock, flags); | |
252 | rxq->need_update = 1; | |
253 | spin_unlock_irqrestore(&rxq->lock, flags); | |
990aa6d7 | 254 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); |
ab697a9f EG |
255 | } |
256 | } | |
257 | ||
358a46d4 | 258 | /* |
9805c446 | 259 | * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD |
ab697a9f | 260 | * |
358a46d4 EG |
261 | * A used RBD is an Rx buffer that has been given to the stack. To use it again |
262 | * a page must be allocated and the RBD must point to the page. This function | |
263 | * doesn't change the HW pointer but handles the list of pages that is used by | |
990aa6d7 | 264 | * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly |
358a46d4 | 265 | * allocated buffers. |
ab697a9f | 266 | */ |
9805c446 | 267 | static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) |
ab697a9f | 268 | { |
20d3b647 | 269 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
990aa6d7 | 270 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
ab697a9f EG |
271 | struct iwl_rx_mem_buffer *rxb; |
272 | struct page *page; | |
273 | unsigned long flags; | |
274 | gfp_t gfp_mask = priority; | |
275 | ||
276 | while (1) { | |
277 | spin_lock_irqsave(&rxq->lock, flags); | |
278 | if (list_empty(&rxq->rx_used)) { | |
279 | spin_unlock_irqrestore(&rxq->lock, flags); | |
280 | return; | |
281 | } | |
282 | spin_unlock_irqrestore(&rxq->lock, flags); | |
283 | ||
284 | if (rxq->free_count > RX_LOW_WATERMARK) | |
285 | gfp_mask |= __GFP_NOWARN; | |
286 | ||
b2cf410c | 287 | if (trans_pcie->rx_page_order > 0) |
ab697a9f EG |
288 | gfp_mask |= __GFP_COMP; |
289 | ||
290 | /* Alloc a new receive buffer */ | |
20d3b647 | 291 | page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); |
ab697a9f EG |
292 | if (!page) { |
293 | if (net_ratelimit()) | |
5a878bf6 | 294 | IWL_DEBUG_INFO(trans, "alloc_pages failed, " |
d6189124 | 295 | "order: %d\n", |
b2cf410c | 296 | trans_pcie->rx_page_order); |
ab697a9f EG |
297 | |
298 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | |
299 | net_ratelimit()) | |
5a878bf6 | 300 | IWL_CRIT(trans, "Failed to alloc_pages with %s." |
ab697a9f EG |
301 | "Only %u free buffers remaining.\n", |
302 | priority == GFP_ATOMIC ? | |
303 | "GFP_ATOMIC" : "GFP_KERNEL", | |
304 | rxq->free_count); | |
305 | /* We don't reschedule replenish work here -- we will | |
306 | * call the restock method and if it still needs | |
307 | * more buffers it will schedule replenish */ | |
308 | return; | |
309 | } | |
310 | ||
311 | spin_lock_irqsave(&rxq->lock, flags); | |
312 | ||
313 | if (list_empty(&rxq->rx_used)) { | |
314 | spin_unlock_irqrestore(&rxq->lock, flags); | |
b2cf410c | 315 | __free_pages(page, trans_pcie->rx_page_order); |
ab697a9f EG |
316 | return; |
317 | } | |
e2b1930e JB |
318 | rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, |
319 | list); | |
320 | list_del(&rxb->list); | |
ab697a9f EG |
321 | spin_unlock_irqrestore(&rxq->lock, flags); |
322 | ||
323 | BUG_ON(rxb->page); | |
324 | rxb->page = page; | |
325 | /* Get physical address of the RB */ | |
20d3b647 JB |
326 | rxb->page_dma = |
327 | dma_map_page(trans->dev, page, 0, | |
328 | PAGE_SIZE << trans_pcie->rx_page_order, | |
329 | DMA_FROM_DEVICE); | |
7c341582 JB |
330 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
331 | rxb->page = NULL; | |
332 | spin_lock_irqsave(&rxq->lock, flags); | |
333 | list_add(&rxb->list, &rxq->rx_used); | |
334 | spin_unlock_irqrestore(&rxq->lock, flags); | |
335 | __free_pages(page, trans_pcie->rx_page_order); | |
336 | return; | |
337 | } | |
ab697a9f EG |
338 | /* dma address must be no more than 36 bits */ |
339 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | |
340 | /* and also 256 byte aligned! */ | |
341 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | |
342 | ||
343 | spin_lock_irqsave(&rxq->lock, flags); | |
344 | ||
345 | list_add_tail(&rxb->list, &rxq->rx_free); | |
346 | rxq->free_count++; | |
347 | ||
348 | spin_unlock_irqrestore(&rxq->lock, flags); | |
349 | } | |
350 | } | |
351 | ||
9805c446 EG |
352 | static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) |
353 | { | |
354 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
355 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
356 | int i; | |
357 | ||
358 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | |
359 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | |
360 | /* In the reset function, these buffers may have been allocated | |
361 | * to an SKB, so we need to unmap and free potential storage */ | |
362 | if (rxq->pool[i].page != NULL) { | |
363 | dma_unmap_page(trans->dev, rxq->pool[i].page_dma, | |
364 | PAGE_SIZE << trans_pcie->rx_page_order, | |
365 | DMA_FROM_DEVICE); | |
366 | __free_pages(rxq->pool[i].page, | |
367 | trans_pcie->rx_page_order); | |
368 | rxq->pool[i].page = NULL; | |
369 | } | |
370 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | |
371 | } | |
372 | } | |
373 | ||
358a46d4 | 374 | /* |
990aa6d7 | 375 | * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free |
358a46d4 EG |
376 | * |
377 | * When moving to rx_free an page is allocated for the slot. | |
378 | * | |
990aa6d7 | 379 | * Also restock the Rx queue via iwl_pcie_rxq_restock. |
358a46d4 EG |
380 | * This is called as a scheduled work item (except for during initialization) |
381 | */ | |
9805c446 | 382 | static void iwl_pcie_rx_replenish(struct iwl_trans *trans) |
ab697a9f | 383 | { |
7b11488f | 384 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
ab697a9f EG |
385 | unsigned long flags; |
386 | ||
9805c446 | 387 | iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); |
ab697a9f | 388 | |
7b11488f | 389 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
990aa6d7 | 390 | iwl_pcie_rxq_restock(trans); |
7b11488f | 391 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
ab697a9f EG |
392 | } |
393 | ||
990aa6d7 | 394 | static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans) |
ab697a9f | 395 | { |
9805c446 | 396 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); |
ab697a9f | 397 | |
990aa6d7 | 398 | iwl_pcie_rxq_restock(trans); |
ab697a9f EG |
399 | } |
400 | ||
9805c446 | 401 | static void iwl_pcie_rx_replenish_work(struct work_struct *data) |
ab697a9f | 402 | { |
5a878bf6 EG |
403 | struct iwl_trans_pcie *trans_pcie = |
404 | container_of(data, struct iwl_trans_pcie, rx_replenish); | |
ab697a9f | 405 | |
990aa6d7 | 406 | iwl_pcie_rx_replenish(trans_pcie->trans); |
ab697a9f EG |
407 | } |
408 | ||
9805c446 EG |
409 | static int iwl_pcie_rx_alloc(struct iwl_trans *trans) |
410 | { | |
411 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
412 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
413 | struct device *dev = trans->dev; | |
414 | ||
415 | memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); | |
416 | ||
417 | spin_lock_init(&rxq->lock); | |
418 | ||
419 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | |
420 | return -EINVAL; | |
421 | ||
422 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | |
423 | rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | |
424 | &rxq->bd_dma, GFP_KERNEL); | |
425 | if (!rxq->bd) | |
426 | goto err_bd; | |
427 | ||
428 | /*Allocate the driver's pointer to receive buffer status */ | |
429 | rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), | |
430 | &rxq->rb_stts_dma, GFP_KERNEL); | |
431 | if (!rxq->rb_stts) | |
432 | goto err_rb_stts; | |
433 | ||
434 | return 0; | |
435 | ||
436 | err_rb_stts: | |
437 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | |
438 | rxq->bd, rxq->bd_dma); | |
d21fa2da | 439 | rxq->bd_dma = 0; |
9805c446 EG |
440 | rxq->bd = NULL; |
441 | err_bd: | |
442 | return -ENOMEM; | |
ab697a9f EG |
443 | } |
444 | ||
9805c446 EG |
445 | static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) |
446 | { | |
447 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
448 | u32 rb_size; | |
449 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | |
450 | ||
9805c446 EG |
451 | if (trans_pcie->rx_buf_size_8k) |
452 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | |
453 | else | |
454 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
455 | ||
456 | /* Stop Rx DMA */ | |
457 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | |
ddaf5a5b JB |
458 | /* reset and flush pointers */ |
459 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); | |
460 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); | |
461 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0); | |
9805c446 EG |
462 | |
463 | /* Reset driver's Rx queue write index */ | |
464 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | |
465 | ||
466 | /* Tell device where to find RBD circular buffer in DRAM */ | |
467 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | |
468 | (u32)(rxq->bd_dma >> 8)); | |
469 | ||
470 | /* Tell device where in DRAM to update its Rx status */ | |
471 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, | |
472 | rxq->rb_stts_dma >> 4); | |
473 | ||
474 | /* Enable Rx DMA | |
475 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
476 | * the credit mechanism in 5000 HW RX FIFO | |
477 | * Direct rx interrupts to hosts | |
478 | * Rx buffer size 4 or 8k | |
479 | * RB timeout 0x10 | |
480 | * 256 RBDs | |
481 | */ | |
482 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, | |
483 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | |
484 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
485 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
486 | rb_size| | |
49bd072d | 487 | (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| |
9805c446 EG |
488 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); |
489 | ||
490 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
491 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
492 | } | |
493 | ||
494 | int iwl_pcie_rx_init(struct iwl_trans *trans) | |
495 | { | |
496 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
497 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
9805c446 EG |
498 | int i, err; |
499 | unsigned long flags; | |
500 | ||
501 | if (!rxq->bd) { | |
502 | err = iwl_pcie_rx_alloc(trans); | |
503 | if (err) | |
504 | return err; | |
505 | } | |
506 | ||
507 | spin_lock_irqsave(&rxq->lock, flags); | |
508 | INIT_LIST_HEAD(&rxq->rx_free); | |
509 | INIT_LIST_HEAD(&rxq->rx_used); | |
510 | ||
511 | INIT_WORK(&trans_pcie->rx_replenish, | |
512 | iwl_pcie_rx_replenish_work); | |
513 | ||
514 | iwl_pcie_rxq_free_rbs(trans); | |
515 | ||
516 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
517 | rxq->queue[i] = NULL; | |
518 | ||
519 | /* Set us so that we have processed and used all buffers, but have | |
520 | * not restocked the Rx queue with fresh buffers */ | |
521 | rxq->read = rxq->write = 0; | |
522 | rxq->write_actual = 0; | |
523 | rxq->free_count = 0; | |
ddaf5a5b | 524 | memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); |
9805c446 EG |
525 | spin_unlock_irqrestore(&rxq->lock, flags); |
526 | ||
527 | iwl_pcie_rx_replenish(trans); | |
528 | ||
529 | iwl_pcie_rx_hw_init(trans, rxq); | |
530 | ||
531 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | |
532 | rxq->need_update = 1; | |
533 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | |
534 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | |
535 | ||
536 | return 0; | |
537 | } | |
538 | ||
539 | void iwl_pcie_rx_free(struct iwl_trans *trans) | |
540 | { | |
541 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
542 | struct iwl_rxq *rxq = &trans_pcie->rxq; | |
543 | unsigned long flags; | |
544 | ||
545 | /*if rxq->bd is NULL, it means that nothing has been allocated, | |
546 | * exit now */ | |
547 | if (!rxq->bd) { | |
548 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); | |
549 | return; | |
550 | } | |
551 | ||
0aa86df6 JB |
552 | cancel_work_sync(&trans_pcie->rx_replenish); |
553 | ||
9805c446 EG |
554 | spin_lock_irqsave(&rxq->lock, flags); |
555 | iwl_pcie_rxq_free_rbs(trans); | |
556 | spin_unlock_irqrestore(&rxq->lock, flags); | |
557 | ||
558 | dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, | |
559 | rxq->bd, rxq->bd_dma); | |
d21fa2da | 560 | rxq->bd_dma = 0; |
9805c446 EG |
561 | rxq->bd = NULL; |
562 | ||
563 | if (rxq->rb_stts) | |
564 | dma_free_coherent(trans->dev, | |
565 | sizeof(struct iwl_rb_status), | |
566 | rxq->rb_stts, rxq->rb_stts_dma); | |
567 | else | |
568 | IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); | |
d21fa2da | 569 | rxq->rb_stts_dma = 0; |
9805c446 EG |
570 | rxq->rb_stts = NULL; |
571 | } | |
572 | ||
573 | static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, | |
df2f3216 JB |
574 | struct iwl_rx_mem_buffer *rxb) |
575 | { | |
576 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
990aa6d7 EG |
577 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
578 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | |
df2f3216 | 579 | unsigned long flags; |
0c19744c | 580 | bool page_stolen = false; |
b2cf410c | 581 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; |
0c19744c | 582 | u32 offset = 0; |
df2f3216 JB |
583 | |
584 | if (WARN_ON(!rxb)) | |
585 | return; | |
586 | ||
0c19744c JB |
587 | dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); |
588 | ||
589 | while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { | |
590 | struct iwl_rx_packet *pkt; | |
591 | struct iwl_device_cmd *cmd; | |
592 | u16 sequence; | |
593 | bool reclaim; | |
594 | int index, cmd_index, err, len; | |
595 | struct iwl_rx_cmd_buffer rxcb = { | |
596 | ._offset = offset, | |
d13f1862 | 597 | ._rx_page_order = trans_pcie->rx_page_order, |
0c19744c JB |
598 | ._page = rxb->page, |
599 | ._page_stolen = false, | |
0d6c4a2e | 600 | .truesize = max_len, |
0c19744c JB |
601 | }; |
602 | ||
603 | pkt = rxb_addr(&rxcb); | |
604 | ||
605 | if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) | |
606 | break; | |
607 | ||
608 | IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", | |
990aa6d7 | 609 | rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd), |
d9fb6465 | 610 | pkt->hdr.cmd); |
0c19744c JB |
611 | |
612 | len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; | |
613 | len += sizeof(u32); /* account for status word */ | |
f042c2eb JB |
614 | trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); |
615 | trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); | |
0c19744c JB |
616 | |
617 | /* Reclaim a command buffer only if this packet is a response | |
618 | * to a (driver-originated) command. | |
619 | * If the packet (e.g. Rx frame) originated from uCode, | |
620 | * there is no command buffer to reclaim. | |
621 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | |
622 | * but apparently a few don't get set; catch them here. */ | |
623 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); | |
624 | if (reclaim) { | |
625 | int i; | |
626 | ||
627 | for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { | |
628 | if (trans_pcie->no_reclaim_cmds[i] == | |
629 | pkt->hdr.cmd) { | |
630 | reclaim = false; | |
631 | break; | |
632 | } | |
d663ee73 JB |
633 | } |
634 | } | |
df2f3216 | 635 | |
0c19744c JB |
636 | sequence = le16_to_cpu(pkt->hdr.sequence); |
637 | index = SEQ_TO_INDEX(sequence); | |
638 | cmd_index = get_cmd_index(&txq->q, index); | |
639 | ||
38c0f334 JB |
640 | if (reclaim) |
641 | cmd = txq->entries[cmd_index].cmd; | |
642 | else | |
0c19744c JB |
643 | cmd = NULL; |
644 | ||
645 | err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); | |
646 | ||
96791422 | 647 | if (reclaim) { |
f4feb8ac JB |
648 | kfree(txq->entries[cmd_index].free_buf); |
649 | txq->entries[cmd_index].free_buf = NULL; | |
96791422 EG |
650 | } |
651 | ||
0c19744c JB |
652 | /* |
653 | * After here, we should always check rxcb._page_stolen, | |
654 | * if it is true then one of the handlers took the page. | |
655 | */ | |
656 | ||
657 | if (reclaim) { | |
658 | /* Invoke any callbacks, transfer the buffer to caller, | |
659 | * and fire off the (possibly) blocking | |
660 | * iwl_trans_send_cmd() | |
661 | * as we reclaim the driver command queue */ | |
662 | if (!rxcb._page_stolen) | |
990aa6d7 | 663 | iwl_pcie_hcmd_complete(trans, &rxcb, err); |
0c19744c JB |
664 | else |
665 | IWL_WARN(trans, "Claim null rxb?\n"); | |
666 | } | |
667 | ||
668 | page_stolen |= rxcb._page_stolen; | |
669 | offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); | |
df2f3216 JB |
670 | } |
671 | ||
0c19744c JB |
672 | /* page was stolen from us -- free our reference */ |
673 | if (page_stolen) { | |
b2cf410c | 674 | __free_pages(rxb->page, trans_pcie->rx_page_order); |
df2f3216 | 675 | rxb->page = NULL; |
0c19744c | 676 | } |
df2f3216 JB |
677 | |
678 | /* Reuse the page if possible. For notification packets and | |
679 | * SKBs that fail to Rx correctly, add them back into the | |
680 | * rx_free list for reuse later. */ | |
681 | spin_lock_irqsave(&rxq->lock, flags); | |
682 | if (rxb->page != NULL) { | |
683 | rxb->page_dma = | |
684 | dma_map_page(trans->dev, rxb->page, 0, | |
20d3b647 JB |
685 | PAGE_SIZE << trans_pcie->rx_page_order, |
686 | DMA_FROM_DEVICE); | |
7c341582 JB |
687 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
688 | /* | |
689 | * free the page(s) as well to not break | |
690 | * the invariant that the items on the used | |
691 | * list have no page(s) | |
692 | */ | |
693 | __free_pages(rxb->page, trans_pcie->rx_page_order); | |
694 | rxb->page = NULL; | |
695 | list_add_tail(&rxb->list, &rxq->rx_used); | |
696 | } else { | |
697 | list_add_tail(&rxb->list, &rxq->rx_free); | |
698 | rxq->free_count++; | |
699 | } | |
df2f3216 JB |
700 | } else |
701 | list_add_tail(&rxb->list, &rxq->rx_used); | |
702 | spin_unlock_irqrestore(&rxq->lock, flags); | |
703 | } | |
704 | ||
990aa6d7 EG |
705 | /* |
706 | * iwl_pcie_rx_handle - Main entry function for receiving responses from fw | |
ab697a9f | 707 | */ |
990aa6d7 | 708 | static void iwl_pcie_rx_handle(struct iwl_trans *trans) |
ab697a9f | 709 | { |
df2f3216 | 710 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
990aa6d7 | 711 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
ab697a9f | 712 | u32 r, i; |
ab697a9f EG |
713 | u8 fill_rx = 0; |
714 | u32 count = 8; | |
715 | int total_empty; | |
716 | ||
717 | /* uCode's read index (stored in shared DRAM) indicates the last Rx | |
718 | * buffer that the driver may process (last buffer filled by ucode). */ | |
52e2a99e | 719 | r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; |
ab697a9f EG |
720 | i = rxq->read; |
721 | ||
722 | /* Rx interrupt, but nothing sent from uCode */ | |
723 | if (i == r) | |
726f23fd | 724 | IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); |
ab697a9f EG |
725 | |
726 | /* calculate total frames need to be restock after handling RX */ | |
727 | total_empty = r - rxq->write_actual; | |
728 | if (total_empty < 0) | |
729 | total_empty += RX_QUEUE_SIZE; | |
730 | ||
731 | if (total_empty > (RX_QUEUE_SIZE / 2)) | |
732 | fill_rx = 1; | |
733 | ||
734 | while (i != r) { | |
48a2d66f | 735 | struct iwl_rx_mem_buffer *rxb; |
ab697a9f EG |
736 | |
737 | rxb = rxq->queue[i]; | |
ab697a9f EG |
738 | rxq->queue[i] = NULL; |
739 | ||
726f23fd EG |
740 | IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", |
741 | r, i, rxb); | |
9805c446 | 742 | iwl_pcie_rx_handle_rb(trans, rxb); |
ab697a9f EG |
743 | |
744 | i = (i + 1) & RX_QUEUE_MASK; | |
745 | /* If there are a lot of unused frames, | |
746 | * restock the Rx queue so ucode wont assert. */ | |
747 | if (fill_rx) { | |
748 | count++; | |
749 | if (count >= 8) { | |
750 | rxq->read = i; | |
990aa6d7 | 751 | iwl_pcie_rx_replenish_now(trans); |
ab697a9f EG |
752 | count = 0; |
753 | } | |
754 | } | |
755 | } | |
756 | ||
757 | /* Backtrack one entry */ | |
758 | rxq->read = i; | |
759 | if (fill_rx) | |
990aa6d7 | 760 | iwl_pcie_rx_replenish_now(trans); |
ab697a9f | 761 | else |
990aa6d7 | 762 | iwl_pcie_rxq_restock(trans); |
ab697a9f EG |
763 | } |
764 | ||
990aa6d7 EG |
765 | /* |
766 | * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card | |
7ff94706 | 767 | */ |
990aa6d7 | 768 | static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) |
7ff94706 | 769 | { |
f946b529 EG |
770 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
771 | ||
7ff94706 | 772 | /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ |
035f7ff2 | 773 | if (trans->cfg->internal_wimax_coex && |
1042db2a | 774 | (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & |
20d3b647 | 775 | APMS_CLK_VAL_MRB_FUNC_MODE) || |
1042db2a | 776 | (iwl_read_prph(trans, APMG_PS_CTRL_REG) & |
20d3b647 | 777 | APMG_PS_CTRL_VAL_RESET_REQ))) { |
74fda971 | 778 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
8a8bbdb4 | 779 | iwl_op_mode_wimax_active(trans->op_mode); |
f946b529 | 780 | wake_up(&trans_pcie->wait_command_queue); |
7ff94706 EG |
781 | return; |
782 | } | |
783 | ||
990aa6d7 EG |
784 | iwl_pcie_dump_csr(trans); |
785 | iwl_pcie_dump_fh(trans, NULL); | |
7ff94706 | 786 | |
d18aa87f | 787 | set_bit(STATUS_FW_ERROR, &trans_pcie->status); |
f946b529 EG |
788 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
789 | wake_up(&trans_pcie->wait_command_queue); | |
790 | ||
2bfb5092 | 791 | local_bh_disable(); |
bcb9321c | 792 | iwl_op_mode_nic_error(trans->op_mode); |
2bfb5092 | 793 | local_bh_enable(); |
7ff94706 EG |
794 | } |
795 | ||
2bfb5092 | 796 | irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) |
ab697a9f | 797 | { |
2bfb5092 | 798 | struct iwl_trans *trans = dev_id; |
20d3b647 JB |
799 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
800 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
ab697a9f EG |
801 | u32 inta = 0; |
802 | u32 handled = 0; | |
803 | unsigned long flags; | |
804 | u32 i; | |
805 | #ifdef CONFIG_IWLWIFI_DEBUG | |
806 | u32 inta_mask; | |
807 | #endif | |
808 | ||
2bfb5092 JB |
809 | lock_map_acquire(&trans->sync_cmd_lockdep_map); |
810 | ||
7b11488f | 811 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
ab697a9f EG |
812 | |
813 | /* Ack/clear/reset pending uCode interrupts. | |
814 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | |
815 | */ | |
816 | /* There is a hardware bug in the interrupt mask function that some | |
817 | * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if | |
818 | * they are disabled in the CSR_INT_MASK register. Furthermore the | |
819 | * ICT interrupt handling mechanism has another bug that might cause | |
820 | * these unmasked interrupts fail to be detected. We workaround the | |
821 | * hardware bugs here by ACKing all the possible interrupts so that | |
822 | * interrupt coalescing can still be achieved. | |
823 | */ | |
1042db2a | 824 | iwl_write32(trans, CSR_INT, |
20d3b647 | 825 | trans_pcie->inta | ~trans_pcie->inta_mask); |
ab697a9f | 826 | |
0c325769 | 827 | inta = trans_pcie->inta; |
ab697a9f EG |
828 | |
829 | #ifdef CONFIG_IWLWIFI_DEBUG | |
a8bceb39 | 830 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
ab697a9f | 831 | /* just for debug */ |
1042db2a | 832 | inta_mask = iwl_read32(trans, CSR_INT_MASK); |
0ca24daf | 833 | IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", |
20d3b647 | 834 | inta, inta_mask); |
ab697a9f EG |
835 | } |
836 | #endif | |
837 | ||
0c325769 EG |
838 | /* saved interrupt in inta variable now we can reset trans_pcie->inta */ |
839 | trans_pcie->inta = 0; | |
ab697a9f | 840 | |
7b11488f | 841 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
b49ba04a | 842 | |
ab697a9f EG |
843 | /* Now service all interrupt bits discovered above. */ |
844 | if (inta & CSR_INT_BIT_HW_ERR) { | |
0c325769 | 845 | IWL_ERR(trans, "Hardware error detected. Restarting.\n"); |
ab697a9f EG |
846 | |
847 | /* Tell the device to stop sending interrupts */ | |
0c325769 | 848 | iwl_disable_interrupts(trans); |
ab697a9f | 849 | |
1f7b6172 | 850 | isr_stats->hw++; |
990aa6d7 | 851 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
852 | |
853 | handled |= CSR_INT_BIT_HW_ERR; | |
854 | ||
2bfb5092 | 855 | goto out; |
ab697a9f EG |
856 | } |
857 | ||
858 | #ifdef CONFIG_IWLWIFI_DEBUG | |
a8bceb39 | 859 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
ab697a9f EG |
860 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ |
861 | if (inta & CSR_INT_BIT_SCD) { | |
0c325769 | 862 | IWL_DEBUG_ISR(trans, "Scheduler finished to transmit " |
ab697a9f | 863 | "the frame/frames.\n"); |
1f7b6172 | 864 | isr_stats->sch++; |
ab697a9f EG |
865 | } |
866 | ||
867 | /* Alive notification via Rx interrupt will do the real work */ | |
868 | if (inta & CSR_INT_BIT_ALIVE) { | |
0c325769 | 869 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); |
1f7b6172 | 870 | isr_stats->alive++; |
ab697a9f EG |
871 | } |
872 | } | |
873 | #endif | |
874 | /* Safely ignore these bits for debug checks below */ | |
875 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | |
876 | ||
877 | /* HW RF KILL switch toggled */ | |
878 | if (inta & CSR_INT_BIT_RF_KILL) { | |
c9eec95c | 879 | bool hw_rfkill; |
ab697a9f | 880 | |
8d425517 | 881 | hw_rfkill = iwl_is_rfkill_set(trans); |
0c325769 | 882 | IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", |
20d3b647 | 883 | hw_rfkill ? "disable radio" : "enable radio"); |
ab697a9f | 884 | |
1f7b6172 | 885 | isr_stats->rfkill++; |
ab697a9f | 886 | |
c9eec95c | 887 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
f946b529 EG |
888 | if (hw_rfkill) { |
889 | set_bit(STATUS_RFKILL, &trans_pcie->status); | |
890 | if (test_and_clear_bit(STATUS_HCMD_ACTIVE, | |
891 | &trans_pcie->status)) | |
892 | IWL_DEBUG_RF_KILL(trans, | |
893 | "Rfkill while SYNC HCMD in flight\n"); | |
894 | wake_up(&trans_pcie->wait_command_queue); | |
895 | } else { | |
896 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | |
897 | } | |
ab697a9f EG |
898 | |
899 | handled |= CSR_INT_BIT_RF_KILL; | |
900 | } | |
901 | ||
902 | /* Chip got too hot and stopped itself */ | |
903 | if (inta & CSR_INT_BIT_CT_KILL) { | |
0c325769 | 904 | IWL_ERR(trans, "Microcode CT kill error detected.\n"); |
1f7b6172 | 905 | isr_stats->ctkill++; |
ab697a9f EG |
906 | handled |= CSR_INT_BIT_CT_KILL; |
907 | } | |
908 | ||
909 | /* Error detected by uCode */ | |
910 | if (inta & CSR_INT_BIT_SW_ERR) { | |
0c325769 | 911 | IWL_ERR(trans, "Microcode SW error detected. " |
ab697a9f | 912 | " Restarting 0x%X.\n", inta); |
1f7b6172 | 913 | isr_stats->sw++; |
990aa6d7 | 914 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
915 | handled |= CSR_INT_BIT_SW_ERR; |
916 | } | |
917 | ||
918 | /* uCode wakes up after power-down sleep */ | |
919 | if (inta & CSR_INT_BIT_WAKEUP) { | |
0c325769 | 920 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
990aa6d7 | 921 | iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq); |
035f7ff2 | 922 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) |
990aa6d7 | 923 | iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]); |
ab697a9f | 924 | |
1f7b6172 | 925 | isr_stats->wakeup++; |
ab697a9f EG |
926 | |
927 | handled |= CSR_INT_BIT_WAKEUP; | |
928 | } | |
929 | ||
930 | /* All uCode command responses, including Tx command responses, | |
931 | * Rx "responses" (frame-received notification), and other | |
932 | * notifications from uCode come through here*/ | |
933 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | | |
20d3b647 | 934 | CSR_INT_BIT_RX_PERIODIC)) { |
0c325769 | 935 | IWL_DEBUG_ISR(trans, "Rx interrupt\n"); |
ab697a9f EG |
936 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { |
937 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | |
1042db2a | 938 | iwl_write32(trans, CSR_FH_INT_STATUS, |
ab697a9f EG |
939 | CSR_FH_INT_RX_MASK); |
940 | } | |
941 | if (inta & CSR_INT_BIT_RX_PERIODIC) { | |
942 | handled |= CSR_INT_BIT_RX_PERIODIC; | |
1042db2a | 943 | iwl_write32(trans, |
0c325769 | 944 | CSR_INT, CSR_INT_BIT_RX_PERIODIC); |
ab697a9f EG |
945 | } |
946 | /* Sending RX interrupt require many steps to be done in the | |
947 | * the device: | |
948 | * 1- write interrupt to current index in ICT table. | |
949 | * 2- dma RX frame. | |
950 | * 3- update RX shared data to indicate last write index. | |
951 | * 4- send interrupt. | |
952 | * This could lead to RX race, driver could receive RX interrupt | |
953 | * but the shared data changes does not reflect this; | |
954 | * periodic interrupt will detect any dangling Rx activity. | |
955 | */ | |
956 | ||
957 | /* Disable periodic interrupt; we use it as just a one-shot. */ | |
1042db2a | 958 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
ab697a9f | 959 | CSR_INT_PERIODIC_DIS); |
6379103e | 960 | |
990aa6d7 | 961 | iwl_pcie_rx_handle(trans); |
6379103e | 962 | |
ab697a9f EG |
963 | /* |
964 | * Enable periodic interrupt in 8 msec only if we received | |
965 | * real RX interrupt (instead of just periodic int), to catch | |
966 | * any dangling Rx interrupt. If it was just the periodic | |
967 | * interrupt, there was no dangling Rx activity, and no need | |
968 | * to extend the periodic interrupt; one-shot is enough. | |
969 | */ | |
970 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) | |
1042db2a | 971 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
20d3b647 | 972 | CSR_INT_PERIODIC_ENA); |
ab697a9f | 973 | |
1f7b6172 | 974 | isr_stats->rx++; |
ab697a9f EG |
975 | } |
976 | ||
977 | /* This "Tx" DMA channel is used only for loading uCode */ | |
978 | if (inta & CSR_INT_BIT_FH_TX) { | |
1042db2a | 979 | iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); |
0c325769 | 980 | IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); |
1f7b6172 | 981 | isr_stats->tx++; |
ab697a9f EG |
982 | handled |= CSR_INT_BIT_FH_TX; |
983 | /* Wake up uCode load routine, now that load is complete */ | |
13df1aab JB |
984 | trans_pcie->ucode_write_complete = true; |
985 | wake_up(&trans_pcie->ucode_write_waitq); | |
ab697a9f EG |
986 | } |
987 | ||
988 | if (inta & ~handled) { | |
0c325769 | 989 | IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); |
1f7b6172 | 990 | isr_stats->unhandled++; |
ab697a9f EG |
991 | } |
992 | ||
0c325769 EG |
993 | if (inta & ~(trans_pcie->inta_mask)) { |
994 | IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", | |
995 | inta & ~trans_pcie->inta_mask); | |
ab697a9f EG |
996 | } |
997 | ||
998 | /* Re-enable all interrupts */ | |
999 | /* only Re-enable if disabled by irq */ | |
83626404 | 1000 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status)) |
0c325769 | 1001 | iwl_enable_interrupts(trans); |
ab697a9f | 1002 | /* Re-enable RF_KILL if it occurred */ |
8722c899 SG |
1003 | else if (handled & CSR_INT_BIT_RF_KILL) |
1004 | iwl_enable_rfkill_int(trans); | |
2bfb5092 JB |
1005 | |
1006 | out: | |
1007 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
1008 | return IRQ_HANDLED; | |
ab697a9f EG |
1009 | } |
1010 | ||
1a361cd8 EG |
1011 | /****************************************************************************** |
1012 | * | |
1013 | * ICT functions | |
1014 | * | |
1015 | ******************************************************************************/ | |
10667136 JB |
1016 | |
1017 | /* a device (PCI-E) page is 4096 bytes long */ | |
1018 | #define ICT_SHIFT 12 | |
1019 | #define ICT_SIZE (1 << ICT_SHIFT) | |
1020 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) | |
1a361cd8 EG |
1021 | |
1022 | /* Free dram table */ | |
990aa6d7 | 1023 | void iwl_pcie_free_ict(struct iwl_trans *trans) |
1a361cd8 | 1024 | { |
20d3b647 | 1025 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
0c325769 | 1026 | |
10667136 | 1027 | if (trans_pcie->ict_tbl) { |
1042db2a | 1028 | dma_free_coherent(trans->dev, ICT_SIZE, |
10667136 | 1029 | trans_pcie->ict_tbl, |
0c325769 | 1030 | trans_pcie->ict_tbl_dma); |
10667136 JB |
1031 | trans_pcie->ict_tbl = NULL; |
1032 | trans_pcie->ict_tbl_dma = 0; | |
1a361cd8 EG |
1033 | } |
1034 | } | |
1035 | ||
10667136 JB |
1036 | /* |
1037 | * allocate dram shared table, it is an aligned memory | |
1038 | * block of ICT_SIZE. | |
1a361cd8 EG |
1039 | * also reset all data related to ICT table interrupt. |
1040 | */ | |
990aa6d7 | 1041 | int iwl_pcie_alloc_ict(struct iwl_trans *trans) |
1a361cd8 | 1042 | { |
20d3b647 | 1043 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1044 | |
10667136 | 1045 | trans_pcie->ict_tbl = |
1042db2a | 1046 | dma_alloc_coherent(trans->dev, ICT_SIZE, |
10667136 JB |
1047 | &trans_pcie->ict_tbl_dma, |
1048 | GFP_KERNEL); | |
1049 | if (!trans_pcie->ict_tbl) | |
1a361cd8 EG |
1050 | return -ENOMEM; |
1051 | ||
10667136 JB |
1052 | /* just an API sanity check ... it is guaranteed to be aligned */ |
1053 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { | |
990aa6d7 | 1054 | iwl_pcie_free_ict(trans); |
10667136 JB |
1055 | return -EINVAL; |
1056 | } | |
1a361cd8 | 1057 | |
10667136 JB |
1058 | IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n", |
1059 | (unsigned long long)trans_pcie->ict_tbl_dma); | |
1a361cd8 | 1060 | |
10667136 | 1061 | IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl); |
1a361cd8 EG |
1062 | |
1063 | /* reset table and index to all 0 */ | |
10667136 | 1064 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); |
0c325769 | 1065 | trans_pcie->ict_index = 0; |
1a361cd8 EG |
1066 | |
1067 | /* add periodic RX interrupt */ | |
0c325769 | 1068 | trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC; |
1a361cd8 EG |
1069 | return 0; |
1070 | } | |
1071 | ||
1072 | /* Device is going up inform it about using ICT interrupt table, | |
1073 | * also we need to tell the driver to start using ICT interrupt. | |
1074 | */ | |
990aa6d7 | 1075 | void iwl_pcie_reset_ict(struct iwl_trans *trans) |
1a361cd8 | 1076 | { |
20d3b647 | 1077 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 EG |
1078 | u32 val; |
1079 | unsigned long flags; | |
1080 | ||
10667136 | 1081 | if (!trans_pcie->ict_tbl) |
ed6a3803 | 1082 | return; |
1a361cd8 | 1083 | |
7b11488f | 1084 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
0c325769 | 1085 | iwl_disable_interrupts(trans); |
1a361cd8 | 1086 | |
10667136 | 1087 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); |
1a361cd8 | 1088 | |
10667136 | 1089 | val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; |
1a361cd8 EG |
1090 | |
1091 | val |= CSR_DRAM_INT_TBL_ENABLE; | |
1092 | val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; | |
1093 | ||
10667136 | 1094 | IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); |
1a361cd8 | 1095 | |
1042db2a | 1096 | iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); |
0c325769 EG |
1097 | trans_pcie->use_ict = true; |
1098 | trans_pcie->ict_index = 0; | |
1042db2a | 1099 | iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); |
0c325769 | 1100 | iwl_enable_interrupts(trans); |
7b11488f | 1101 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1102 | } |
1103 | ||
1104 | /* Device is going down disable ict interrupt usage */ | |
990aa6d7 | 1105 | void iwl_pcie_disable_ict(struct iwl_trans *trans) |
1a361cd8 | 1106 | { |
20d3b647 | 1107 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 EG |
1108 | unsigned long flags; |
1109 | ||
7b11488f | 1110 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
0c325769 | 1111 | trans_pcie->use_ict = false; |
7b11488f | 1112 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1113 | } |
1114 | ||
eb647644 | 1115 | /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ |
990aa6d7 | 1116 | static irqreturn_t iwl_pcie_isr(int irq, void *data) |
1a361cd8 | 1117 | { |
0c325769 | 1118 | struct iwl_trans *trans = data; |
eb647644 | 1119 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1120 | u32 inta, inta_mask; |
1a361cd8 EG |
1121 | #ifdef CONFIG_IWLWIFI_DEBUG |
1122 | u32 inta_fh; | |
1123 | #endif | |
eb647644 EG |
1124 | |
1125 | lockdep_assert_held(&trans_pcie->irq_lock); | |
1126 | ||
6c1011e1 | 1127 | trace_iwlwifi_dev_irq(trans->dev); |
b80667ee | 1128 | |
1a361cd8 EG |
1129 | /* Disable (but don't clear!) interrupts here to avoid |
1130 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
2bfb5092 | 1131 | * If we have something to service, the irq thread will re-enable ints. |
1a361cd8 | 1132 | * If we *don't* have something, we'll re-enable before leaving here. */ |
25a17265 | 1133 | inta_mask = iwl_read32(trans, CSR_INT_MASK); |
1042db2a | 1134 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); |
1a361cd8 EG |
1135 | |
1136 | /* Discover which interrupts are active/pending */ | |
1042db2a | 1137 | inta = iwl_read32(trans, CSR_INT); |
1a361cd8 | 1138 | |
25a17265 EG |
1139 | if (inta & (~inta_mask)) { |
1140 | IWL_DEBUG_ISR(trans, | |
1141 | "We got a masked interrupt (0x%08x)...Ack and ignore\n", | |
1142 | inta & (~inta_mask)); | |
1143 | iwl_write32(trans, CSR_INT, inta & (~inta_mask)); | |
1144 | inta &= inta_mask; | |
1145 | } | |
1146 | ||
1a361cd8 EG |
1147 | /* Ignore interrupt if there's nothing in NIC to service. |
1148 | * This may be due to IRQ shared with another device, | |
1149 | * or due to sporadic interrupts thrown from our NIC. */ | |
1150 | if (!inta) { | |
0c325769 | 1151 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); |
1a361cd8 EG |
1152 | goto none; |
1153 | } | |
1154 | ||
1155 | if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { | |
1156 | /* Hardware disappeared. It might have already raised | |
1157 | * an interrupt */ | |
0c325769 | 1158 | IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); |
eb647644 | 1159 | return IRQ_HANDLED; |
1a361cd8 EG |
1160 | } |
1161 | ||
1162 | #ifdef CONFIG_IWLWIFI_DEBUG | |
a8bceb39 | 1163 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
1042db2a | 1164 | inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS); |
0c325769 | 1165 | IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, " |
1a361cd8 EG |
1166 | "fh 0x%08x\n", inta, inta_mask, inta_fh); |
1167 | } | |
1168 | #endif | |
1169 | ||
0c325769 | 1170 | trans_pcie->inta |= inta; |
2bfb5092 | 1171 | /* the thread will service interrupts and re-enable them */ |
1a361cd8 | 1172 | if (likely(inta)) |
2bfb5092 | 1173 | return IRQ_WAKE_THREAD; |
83626404 | 1174 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
20d3b647 | 1175 | !trans_pcie->inta) |
0c325769 | 1176 | iwl_enable_interrupts(trans); |
392d4cad | 1177 | return IRQ_HANDLED; |
1a361cd8 | 1178 | |
eb647644 | 1179 | none: |
1a361cd8 EG |
1180 | /* re-enable interrupts here since we don't have anything to service. */ |
1181 | /* only Re-enable if disabled by irq and no schedules tasklet. */ | |
83626404 | 1182 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
20d3b647 | 1183 | !trans_pcie->inta) |
0c325769 | 1184 | iwl_enable_interrupts(trans); |
1a361cd8 | 1185 | |
1a361cd8 EG |
1186 | return IRQ_NONE; |
1187 | } | |
1188 | ||
1189 | /* interrupt handler using ict table, with this interrupt driver will | |
1190 | * stop using INTA register to get device's interrupt, reading this register | |
1191 | * is expensive, device will write interrupts in ICT dram table, increment | |
1192 | * index then will fire interrupt to driver, driver will OR all ICT table | |
1193 | * entries from current index up to table entry with 0 value. the result is | |
1194 | * the interrupt we need to service, driver will set the entries back to 0 and | |
1195 | * set index. | |
1196 | */ | |
990aa6d7 | 1197 | irqreturn_t iwl_pcie_isr_ict(int irq, void *data) |
1a361cd8 | 1198 | { |
0c325769 EG |
1199 | struct iwl_trans *trans = data; |
1200 | struct iwl_trans_pcie *trans_pcie; | |
1a361cd8 EG |
1201 | u32 inta, inta_mask; |
1202 | u32 val = 0; | |
b80667ee | 1203 | u32 read; |
1a361cd8 EG |
1204 | unsigned long flags; |
1205 | ||
0c325769 | 1206 | if (!trans) |
1a361cd8 EG |
1207 | return IRQ_NONE; |
1208 | ||
0c325769 EG |
1209 | trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1210 | ||
eb647644 EG |
1211 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
1212 | ||
1a361cd8 EG |
1213 | /* dram interrupt table not set yet, |
1214 | * use legacy interrupt. | |
1215 | */ | |
eb647644 | 1216 | if (unlikely(!trans_pcie->use_ict)) { |
990aa6d7 | 1217 | irqreturn_t ret = iwl_pcie_isr(irq, data); |
eb647644 EG |
1218 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1219 | return ret; | |
1220 | } | |
1a361cd8 | 1221 | |
6c1011e1 | 1222 | trace_iwlwifi_dev_irq(trans->dev); |
b80667ee | 1223 | |
1a361cd8 EG |
1224 | /* Disable (but don't clear!) interrupts here to avoid |
1225 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
1226 | * If we have something to service, the tasklet will re-enable ints. | |
1227 | * If we *don't* have something, we'll re-enable before leaving here. | |
1228 | */ | |
25a17265 | 1229 | inta_mask = iwl_read32(trans, CSR_INT_MASK); |
1042db2a | 1230 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); |
1a361cd8 | 1231 | |
1a361cd8 EG |
1232 | /* Ignore interrupt if there's nothing in NIC to service. |
1233 | * This may be due to IRQ shared with another device, | |
1234 | * or due to sporadic interrupts thrown from our NIC. */ | |
b80667ee | 1235 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); |
6c1011e1 | 1236 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); |
b80667ee | 1237 | if (!read) { |
0c325769 | 1238 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); |
1a361cd8 EG |
1239 | goto none; |
1240 | } | |
1241 | ||
b80667ee JB |
1242 | /* |
1243 | * Collect all entries up to the first 0, starting from ict_index; | |
1244 | * note we already read at ict_index. | |
1245 | */ | |
1246 | do { | |
1247 | val |= read; | |
0c325769 | 1248 | IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", |
b80667ee | 1249 | trans_pcie->ict_index, read); |
0c325769 EG |
1250 | trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; |
1251 | trans_pcie->ict_index = | |
1252 | iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); | |
1a361cd8 | 1253 | |
b80667ee | 1254 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); |
6c1011e1 | 1255 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, |
b80667ee JB |
1256 | read); |
1257 | } while (read); | |
1a361cd8 EG |
1258 | |
1259 | /* We should not get this value, just ignore it. */ | |
1260 | if (val == 0xffffffff) | |
1261 | val = 0; | |
1262 | ||
1263 | /* | |
1264 | * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit | |
1265 | * (bit 15 before shifting it to 31) to clear when using interrupt | |
1266 | * coalescing. fortunately, bits 18 and 19 stay set when this happens | |
1267 | * so we use them to decide on the real state of the Rx bit. | |
1268 | * In order words, bit 15 is set if bit 18 or bit 19 are set. | |
1269 | */ | |
1270 | if (val & 0xC0000) | |
1271 | val |= 0x8000; | |
1272 | ||
1273 | inta = (0xff & val) | ((0xff00 & val) << 16); | |
0c325769 | 1274 | IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", |
20d3b647 | 1275 | inta, inta_mask, val); |
1a361cd8 | 1276 | |
0c325769 EG |
1277 | inta &= trans_pcie->inta_mask; |
1278 | trans_pcie->inta |= inta; | |
1a361cd8 | 1279 | |
990aa6d7 | 1280 | /* iwl_pcie_tasklet() will service interrupts and re-enable them */ |
2bfb5092 JB |
1281 | if (likely(inta)) { |
1282 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | |
1283 | return IRQ_WAKE_THREAD; | |
1284 | } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && | |
b80667ee | 1285 | !trans_pcie->inta) { |
1a361cd8 EG |
1286 | /* Allow interrupt if was disabled by this handler and |
1287 | * no tasklet was schedules, We should not enable interrupt, | |
1288 | * tasklet will enable it. | |
1289 | */ | |
0c325769 | 1290 | iwl_enable_interrupts(trans); |
1a361cd8 EG |
1291 | } |
1292 | ||
7b11488f | 1293 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1294 | return IRQ_HANDLED; |
1295 | ||
1296 | none: | |
1297 | /* re-enable interrupts here since we don't have anything to service. | |
1298 | * only Re-enable if disabled by irq. | |
1299 | */ | |
83626404 | 1300 | if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
b80667ee | 1301 | !trans_pcie->inta) |
0c325769 | 1302 | iwl_enable_interrupts(trans); |
1a361cd8 | 1303 | |
7b11488f | 1304 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1a361cd8 EG |
1305 | return IRQ_NONE; |
1306 | } |