Commit | Line | Data |
---|---|---|
ab697a9f EG |
1 | /****************************************************************************** |
2 | * | |
51368bf7 | 3 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
26d535ae | 4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
bce97731 | 5 | * Copyright(c) 2016 Intel Deutschland GmbH |
ab697a9f EG |
6 | * |
7 | * Portions of this file are derived from the ipw3945 project, as well | |
8 | * as portions of the ieee80211 subsystem header files. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify it | |
11 | * under the terms of version 2 of the GNU General Public License as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
17 | * more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License along with | |
20 | * this program; if not, write to the Free Software Foundation, Inc., | |
21 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | |
22 | * | |
23 | * The full GNU General Public License is included in this distribution in the | |
24 | * file called LICENSE. | |
25 | * | |
26 | * Contact Information: | |
d01c5366 | 27 | * Intel Linux Wireless <linuxwifi@intel.com> |
ab697a9f EG |
28 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
29 | * | |
30 | *****************************************************************************/ | |
31 | #include <linux/sched.h> | |
32 | #include <linux/wait.h> | |
1a361cd8 | 33 | #include <linux/gfp.h> |
ab697a9f | 34 | |
1b29dc94 | 35 | #include "iwl-prph.h" |
ab697a9f | 36 | #include "iwl-io.h" |
6468a01a | 37 | #include "internal.h" |
db70f290 | 38 | #include "iwl-op-mode.h" |
ab697a9f EG |
39 | |
40 | /****************************************************************************** | |
41 | * | |
42 | * RX path functions | |
43 | * | |
44 | ******************************************************************************/ | |
45 | ||
46 | /* | |
47 | * Rx theory of operation | |
48 | * | |
49 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | |
50 | * each of which point to Receive Buffers to be filled by the NIC. These get | |
51 | * used not only for Rx frames, but for any command response or notification | |
52 | * from the NIC. The driver and NIC manage the Rx buffers by means | |
53 | * of indexes into the circular buffer. | |
54 | * | |
55 | * Rx Queue Indexes | |
56 | * The host/firmware share two index registers for managing the Rx buffers. | |
57 | * | |
58 | * The READ index maps to the first position that the firmware may be writing | |
59 | * to -- the driver can read up to (but not including) this position and get | |
60 | * good data. | |
61 | * The READ index is managed by the firmware once the card is enabled. | |
62 | * | |
63 | * The WRITE index maps to the last position the driver has read from -- the | |
64 | * position preceding WRITE is the last slot the firmware can place a packet. | |
65 | * | |
66 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | |
67 | * WRITE = READ. | |
68 | * | |
69 | * During initialization, the host sets up the READ queue position to the first | |
70 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | |
71 | * | |
72 | * When the firmware places a packet in a buffer, it will advance the READ index | |
73 | * and fire the RX interrupt. The driver can then query the READ index and | |
74 | * process as many packets as possible, moving the WRITE index forward as it | |
75 | * resets the Rx queue buffers with new memory. | |
76 | * | |
77 | * The management in the driver is as follows: | |
26d535ae SS |
78 | * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. |
79 | * When the interrupt handler is called, the request is processed. | |
80 | * The page is either stolen - transferred to the upper layer | |
81 | * or reused - added immediately to the iwl->rxq->rx_free list. | |
82 | * + When the page is stolen - the driver updates the matching queue's used | |
83 | * count, detaches the RBD and transfers it to the queue used list. | |
84 | * When there are two used RBDs - they are transferred to the allocator empty | |
85 | * list. Work is then scheduled for the allocator to start allocating | |
86 | * eight buffers. | |
87 | * When there are another 6 used RBDs - they are transferred to the allocator | |
88 | * empty list and the driver tries to claim the pre-allocated buffers and | |
89 | * add them to iwl->rxq->rx_free. If it fails - it continues to claim them | |
90 | * until ready. | |
91 | * When there are 8+ buffers in the free list - either from allocation or from | |
92 | * 8 reused unstolen pages - restock is called to update the FW and indexes. | |
93 | * + In order to make sure the allocator always has RBDs to use for allocation | |
94 | * the allocator has initial pool in the size of num_queues*(8-2) - the | |
95 | * maximum missing RBDs per allocation request (request posted with 2 | |
96 | * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). | |
97 | * The queues supplies the recycle of the rest of the RBDs. | |
ab697a9f EG |
98 | * + A received packet is processed and handed to the kernel network stack, |
99 | * detached from the iwl->rxq. The driver 'processed' index is updated. | |
26d535ae | 100 | * + If there are no allocated buffers in iwl->rxq->rx_free, |
2bfb5092 JB |
101 | * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. |
102 | * If there were enough free buffers and RX_STALLED is set it is cleared. | |
ab697a9f EG |
103 | * |
104 | * | |
105 | * Driver sequence: | |
106 | * | |
990aa6d7 EG |
107 | * iwl_rxq_alloc() Allocates rx_free |
108 | * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls | |
26d535ae SS |
109 | * iwl_pcie_rxq_restock. |
110 | * Used only during initialization. | |
990aa6d7 | 111 | * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx |
ab697a9f | 112 | * queue, updates firmware pointers, and updates |
26d535ae SS |
113 | * the WRITE index. |
114 | * iwl_pcie_rx_allocator() Background work for allocating pages. | |
ab697a9f EG |
115 | * |
116 | * -- enable interrupts -- | |
990aa6d7 | 117 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the |
ab697a9f EG |
118 | * READ INDEX, detaching the SKB from the pool. |
119 | * Moves the packet buffer from queue to rx_used. | |
26d535ae | 120 | * Posts and claims requests to the allocator. |
990aa6d7 | 121 | * Calls iwl_pcie_rxq_restock to refill any empty |
ab697a9f | 122 | * slots. |
26d535ae SS |
123 | * |
124 | * RBD life-cycle: | |
125 | * | |
126 | * Init: | |
127 | * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue | |
128 | * | |
129 | * Regular Receive interrupt: | |
130 | * Page Stolen: | |
131 | * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> | |
132 | * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue | |
133 | * Page not Stolen: | |
134 | * rxq.queue -> rxq.rx_free -> rxq.queue | |
ab697a9f EG |
135 | * ... |
136 | * | |
137 | */ | |
138 | ||
990aa6d7 EG |
139 | /* |
140 | * iwl_rxq_space - Return number of free slots available in queue. | |
ab697a9f | 141 | */ |
fecba09e | 142 | static int iwl_rxq_space(const struct iwl_rxq *rxq) |
ab697a9f | 143 | { |
96a6497b SS |
144 | /* Make sure rx queue size is a power of 2 */ |
145 | WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); | |
fecba09e | 146 | |
351746c9 IY |
147 | /* |
148 | * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity | |
149 | * between empty and completely full queues. | |
150 | * The following is equivalent to modulo by RX_QUEUE_SIZE and is well | |
151 | * defined for negative dividends. | |
152 | */ | |
96a6497b | 153 | return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); |
ab697a9f EG |
154 | } |
155 | ||
9805c446 EG |
156 | /* |
157 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | |
158 | */ | |
159 | static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) | |
160 | { | |
161 | return cpu_to_le32((u32)(dma_addr >> 8)); | |
162 | } | |
163 | ||
dfcfeef9 SS |
164 | static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs, |
165 | u64 val) | |
96a6497b | 166 | { |
dfcfeef9 SS |
167 | iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff); |
168 | iwl_write_prph_no_grab(trans, ofs + 4, val >> 32); | |
96a6497b SS |
169 | } |
170 | ||
49bd072d EG |
171 | /* |
172 | * iwl_pcie_rx_stop - stops the Rx DMA | |
173 | */ | |
9805c446 EG |
174 | int iwl_pcie_rx_stop(struct iwl_trans *trans) |
175 | { | |
9805c446 EG |
176 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
177 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | |
178 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | |
179 | } | |
180 | ||
990aa6d7 EG |
181 | /* |
182 | * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue | |
ab697a9f | 183 | */ |
78485054 SS |
184 | static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, |
185 | struct iwl_rxq *rxq) | |
ab697a9f | 186 | { |
ab697a9f EG |
187 | u32 reg; |
188 | ||
5d63f926 | 189 | lockdep_assert_held(&rxq->lock); |
ab697a9f | 190 | |
5045388c EP |
191 | /* |
192 | * explicitly wake up the NIC if: | |
193 | * 1. shadow registers aren't enabled | |
194 | * 2. there is a chance that the NIC is asleep | |
195 | */ | |
196 | if (!trans->cfg->base_params->shadow_reg_enable && | |
197 | test_bit(STATUS_TPOWER_PMI, &trans->status)) { | |
198 | reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); | |
199 | ||
200 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
201 | IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", | |
202 | reg); | |
203 | iwl_set_bit(trans, CSR_GP_CNTRL, | |
204 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | |
5d63f926 JB |
205 | rxq->need_update = true; |
206 | return; | |
ab697a9f EG |
207 | } |
208 | } | |
5045388c EP |
209 | |
210 | rxq->write_actual = round_down(rxq->write, 8); | |
96a6497b | 211 | if (trans->cfg->mq_rx_supported) |
1554ed20 SS |
212 | iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), |
213 | rxq->write_actual); | |
1316d595 SS |
214 | else |
215 | iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); | |
5d63f926 JB |
216 | } |
217 | ||
218 | static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) | |
219 | { | |
220 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
78485054 | 221 | int i; |
5d63f926 | 222 | |
78485054 SS |
223 | for (i = 0; i < trans->num_rx_queues; i++) { |
224 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
ab697a9f | 225 | |
78485054 SS |
226 | if (!rxq->need_update) |
227 | continue; | |
228 | spin_lock(&rxq->lock); | |
229 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | |
230 | rxq->need_update = false; | |
231 | spin_unlock(&rxq->lock); | |
232 | } | |
ab697a9f EG |
233 | } |
234 | ||
e0e168dc GG |
235 | /* |
236 | * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx | |
237 | */ | |
96a6497b SS |
238 | static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, |
239 | struct iwl_rxq *rxq) | |
240 | { | |
241 | struct iwl_rx_mem_buffer *rxb; | |
242 | ||
243 | /* | |
244 | * If the device isn't enabled - no need to try to add buffers... | |
245 | * This can happen when we stop the device and still have an interrupt | |
246 | * pending. We stop the APM before we sync the interrupts because we | |
247 | * have to (see comment there). On the other hand, since the APM is | |
248 | * stopped, we cannot access the HW (in particular not prph). | |
249 | * So don't try to restock if the APM has been already stopped. | |
250 | */ | |
251 | if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) | |
252 | return; | |
253 | ||
254 | spin_lock(&rxq->lock); | |
255 | while (rxq->free_count) { | |
256 | __le64 *bd = (__le64 *)rxq->bd; | |
257 | ||
258 | /* Get next free Rx buffer, remove from free list */ | |
259 | rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, | |
260 | list); | |
261 | list_del(&rxb->list); | |
262 | ||
263 | /* 12 first bits are expected to be empty */ | |
264 | WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); | |
265 | /* Point to Rx buffer via next RBD in circular buffer */ | |
266 | bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); | |
267 | rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; | |
268 | rxq->free_count--; | |
269 | } | |
270 | spin_unlock(&rxq->lock); | |
271 | ||
272 | /* | |
273 | * If we've added more space for the firmware to place data, tell it. | |
274 | * Increment device's write pointer in multiples of 8. | |
275 | */ | |
276 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
277 | spin_lock(&rxq->lock); | |
278 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | |
279 | spin_unlock(&rxq->lock); | |
280 | } | |
281 | } | |
282 | ||
990aa6d7 | 283 | /* |
e0e168dc | 284 | * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx |
ab697a9f | 285 | */ |
e0e168dc GG |
286 | static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans, |
287 | struct iwl_rxq *rxq) | |
ab697a9f | 288 | { |
ab697a9f | 289 | struct iwl_rx_mem_buffer *rxb; |
ab697a9f | 290 | |
7439046d EG |
291 | /* |
292 | * If the device isn't enabled - not need to try to add buffers... | |
293 | * This can happen when we stop the device and still have an interrupt | |
2bfb5092 JB |
294 | * pending. We stop the APM before we sync the interrupts because we |
295 | * have to (see comment there). On the other hand, since the APM is | |
296 | * stopped, we cannot access the HW (in particular not prph). | |
7439046d EG |
297 | * So don't try to restock if the APM has been already stopped. |
298 | */ | |
eb7ff77e | 299 | if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) |
7439046d EG |
300 | return; |
301 | ||
51232f7e | 302 | spin_lock(&rxq->lock); |
990aa6d7 | 303 | while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { |
96a6497b | 304 | __le32 *bd = (__le32 *)rxq->bd; |
ab697a9f EG |
305 | /* The overwritten rxb must be a used one */ |
306 | rxb = rxq->queue[rxq->write]; | |
307 | BUG_ON(rxb && rxb->page); | |
308 | ||
309 | /* Get next free Rx buffer, remove from free list */ | |
e2b1930e JB |
310 | rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, |
311 | list); | |
312 | list_del(&rxb->list); | |
ab697a9f EG |
313 | |
314 | /* Point to Rx buffer via next RBD in circular buffer */ | |
96a6497b | 315 | bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); |
ab697a9f EG |
316 | rxq->queue[rxq->write] = rxb; |
317 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | |
318 | rxq->free_count--; | |
319 | } | |
51232f7e | 320 | spin_unlock(&rxq->lock); |
ab697a9f | 321 | |
ab697a9f EG |
322 | /* If we've added more space for the firmware to place data, tell it. |
323 | * Increment device's write pointer in multiples of 8. */ | |
324 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
51232f7e | 325 | spin_lock(&rxq->lock); |
78485054 | 326 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); |
51232f7e | 327 | spin_unlock(&rxq->lock); |
ab697a9f EG |
328 | } |
329 | } | |
330 | ||
e0e168dc GG |
331 | /* |
332 | * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool | |
333 | * | |
334 | * If there are slots in the RX queue that need to be restocked, | |
335 | * and we have free pre-allocated buffers, fill the ranks as much | |
336 | * as we can, pulling from rx_free. | |
337 | * | |
338 | * This moves the 'write' index forward to catch up with 'processed', and | |
339 | * also updates the memory address in the firmware to reference the new | |
340 | * target buffer. | |
341 | */ | |
342 | static | |
343 | void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) | |
344 | { | |
345 | if (trans->cfg->mq_rx_supported) | |
346 | iwl_pcie_rxq_mq_restock(trans, rxq); | |
347 | else | |
348 | iwl_pcie_rxq_sq_restock(trans, rxq); | |
349 | } | |
350 | ||
26d535ae SS |
351 | /* |
352 | * iwl_pcie_rx_alloc_page - allocates and returns a page. | |
353 | * | |
354 | */ | |
355 | static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, | |
356 | gfp_t priority) | |
357 | { | |
358 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
26d535ae SS |
359 | struct page *page; |
360 | gfp_t gfp_mask = priority; | |
361 | ||
26d535ae SS |
362 | if (trans_pcie->rx_page_order > 0) |
363 | gfp_mask |= __GFP_COMP; | |
364 | ||
365 | /* Alloc a new receive buffer */ | |
366 | page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); | |
367 | if (!page) { | |
368 | if (net_ratelimit()) | |
369 | IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", | |
370 | trans_pcie->rx_page_order); | |
78485054 SS |
371 | /* |
372 | * Issue an error if we don't have enough pre-allocated | |
373 | * buffers. | |
26d535ae | 374 | ` */ |
78485054 | 375 | if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) |
26d535ae | 376 | IWL_CRIT(trans, |
78485054 | 377 | "Failed to alloc_pages\n"); |
26d535ae SS |
378 | return NULL; |
379 | } | |
380 | return page; | |
381 | } | |
382 | ||
358a46d4 | 383 | /* |
9805c446 | 384 | * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD |
ab697a9f | 385 | * |
358a46d4 EG |
386 | * A used RBD is an Rx buffer that has been given to the stack. To use it again |
387 | * a page must be allocated and the RBD must point to the page. This function | |
388 | * doesn't change the HW pointer but handles the list of pages that is used by | |
990aa6d7 | 389 | * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly |
358a46d4 | 390 | * allocated buffers. |
ab697a9f | 391 | */ |
78485054 SS |
392 | static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, |
393 | struct iwl_rxq *rxq) | |
ab697a9f | 394 | { |
20d3b647 | 395 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
ab697a9f EG |
396 | struct iwl_rx_mem_buffer *rxb; |
397 | struct page *page; | |
ab697a9f EG |
398 | |
399 | while (1) { | |
51232f7e | 400 | spin_lock(&rxq->lock); |
ab697a9f | 401 | if (list_empty(&rxq->rx_used)) { |
51232f7e | 402 | spin_unlock(&rxq->lock); |
ab697a9f EG |
403 | return; |
404 | } | |
51232f7e | 405 | spin_unlock(&rxq->lock); |
ab697a9f | 406 | |
ab697a9f | 407 | /* Alloc a new receive buffer */ |
26d535ae SS |
408 | page = iwl_pcie_rx_alloc_page(trans, priority); |
409 | if (!page) | |
ab697a9f | 410 | return; |
ab697a9f | 411 | |
51232f7e | 412 | spin_lock(&rxq->lock); |
ab697a9f EG |
413 | |
414 | if (list_empty(&rxq->rx_used)) { | |
51232f7e | 415 | spin_unlock(&rxq->lock); |
b2cf410c | 416 | __free_pages(page, trans_pcie->rx_page_order); |
ab697a9f EG |
417 | return; |
418 | } | |
e2b1930e JB |
419 | rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, |
420 | list); | |
421 | list_del(&rxb->list); | |
51232f7e | 422 | spin_unlock(&rxq->lock); |
ab697a9f EG |
423 | |
424 | BUG_ON(rxb->page); | |
425 | rxb->page = page; | |
426 | /* Get physical address of the RB */ | |
20d3b647 JB |
427 | rxb->page_dma = |
428 | dma_map_page(trans->dev, page, 0, | |
429 | PAGE_SIZE << trans_pcie->rx_page_order, | |
430 | DMA_FROM_DEVICE); | |
7c341582 JB |
431 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
432 | rxb->page = NULL; | |
51232f7e | 433 | spin_lock(&rxq->lock); |
7c341582 | 434 | list_add(&rxb->list, &rxq->rx_used); |
51232f7e | 435 | spin_unlock(&rxq->lock); |
7c341582 JB |
436 | __free_pages(page, trans_pcie->rx_page_order); |
437 | return; | |
438 | } | |
ab697a9f | 439 | |
51232f7e | 440 | spin_lock(&rxq->lock); |
ab697a9f EG |
441 | |
442 | list_add_tail(&rxb->list, &rxq->rx_free); | |
443 | rxq->free_count++; | |
444 | ||
51232f7e | 445 | spin_unlock(&rxq->lock); |
ab697a9f EG |
446 | } |
447 | } | |
448 | ||
78485054 | 449 | static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) |
9805c446 EG |
450 | { |
451 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
9805c446 EG |
452 | int i; |
453 | ||
7b542436 | 454 | for (i = 0; i < RX_POOL_SIZE; i++) { |
78485054 | 455 | if (!trans_pcie->rx_pool[i].page) |
c7df1f4b | 456 | continue; |
78485054 | 457 | dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, |
c7df1f4b JB |
458 | PAGE_SIZE << trans_pcie->rx_page_order, |
459 | DMA_FROM_DEVICE); | |
78485054 SS |
460 | __free_pages(trans_pcie->rx_pool[i].page, |
461 | trans_pcie->rx_page_order); | |
462 | trans_pcie->rx_pool[i].page = NULL; | |
9805c446 EG |
463 | } |
464 | } | |
465 | ||
26d535ae SS |
466 | /* |
467 | * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues | |
468 | * | |
469 | * Allocates for each received request 8 pages | |
470 | * Called as a scheduled work item. | |
471 | */ | |
472 | static void iwl_pcie_rx_allocator(struct iwl_trans *trans) | |
473 | { | |
474 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
475 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
476 | struct list_head local_empty; | |
477 | int pending = atomic_xchg(&rba->req_pending, 0); | |
478 | ||
479 | IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); | |
480 | ||
481 | /* If we were scheduled - there is at least one request */ | |
482 | spin_lock(&rba->lock); | |
483 | /* swap out the rba->rbd_empty to a local list */ | |
484 | list_replace_init(&rba->rbd_empty, &local_empty); | |
485 | spin_unlock(&rba->lock); | |
486 | ||
487 | while (pending) { | |
488 | int i; | |
489 | struct list_head local_allocated; | |
78485054 SS |
490 | gfp_t gfp_mask = GFP_KERNEL; |
491 | ||
492 | /* Do not post a warning if there are only a few requests */ | |
493 | if (pending < RX_PENDING_WATERMARK) | |
494 | gfp_mask |= __GFP_NOWARN; | |
26d535ae SS |
495 | |
496 | INIT_LIST_HEAD(&local_allocated); | |
497 | ||
498 | for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { | |
499 | struct iwl_rx_mem_buffer *rxb; | |
500 | struct page *page; | |
501 | ||
502 | /* List should never be empty - each reused RBD is | |
503 | * returned to the list, and initial pool covers any | |
504 | * possible gap between the time the page is allocated | |
505 | * to the time the RBD is added. | |
506 | */ | |
507 | BUG_ON(list_empty(&local_empty)); | |
508 | /* Get the first rxb from the rbd list */ | |
509 | rxb = list_first_entry(&local_empty, | |
510 | struct iwl_rx_mem_buffer, list); | |
511 | BUG_ON(rxb->page); | |
512 | ||
513 | /* Alloc a new receive buffer */ | |
78485054 | 514 | page = iwl_pcie_rx_alloc_page(trans, gfp_mask); |
26d535ae SS |
515 | if (!page) |
516 | continue; | |
517 | rxb->page = page; | |
518 | ||
519 | /* Get physical address of the RB */ | |
520 | rxb->page_dma = dma_map_page(trans->dev, page, 0, | |
521 | PAGE_SIZE << trans_pcie->rx_page_order, | |
522 | DMA_FROM_DEVICE); | |
523 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { | |
524 | rxb->page = NULL; | |
525 | __free_pages(page, trans_pcie->rx_page_order); | |
526 | continue; | |
527 | } | |
26d535ae SS |
528 | |
529 | /* move the allocated entry to the out list */ | |
530 | list_move(&rxb->list, &local_allocated); | |
531 | i++; | |
532 | } | |
533 | ||
534 | pending--; | |
535 | if (!pending) { | |
536 | pending = atomic_xchg(&rba->req_pending, 0); | |
537 | IWL_DEBUG_RX(trans, | |
538 | "Pending allocation requests = %d\n", | |
539 | pending); | |
540 | } | |
541 | ||
542 | spin_lock(&rba->lock); | |
543 | /* add the allocated rbds to the allocator allocated list */ | |
544 | list_splice_tail(&local_allocated, &rba->rbd_allocated); | |
545 | /* get more empty RBDs for current pending requests */ | |
546 | list_splice_tail_init(&rba->rbd_empty, &local_empty); | |
547 | spin_unlock(&rba->lock); | |
548 | ||
549 | atomic_inc(&rba->req_ready); | |
550 | } | |
551 | ||
552 | spin_lock(&rba->lock); | |
553 | /* return unused rbds to the allocator empty list */ | |
554 | list_splice_tail(&local_empty, &rba->rbd_empty); | |
555 | spin_unlock(&rba->lock); | |
556 | } | |
557 | ||
558 | /* | |
d56daea4 | 559 | * iwl_pcie_rx_allocator_get - returns the pre-allocated pages |
26d535ae SS |
560 | .* |
561 | .* Called by queue when the queue posted allocation request and | |
562 | * has freed 8 RBDs in order to restock itself. | |
d56daea4 SS |
563 | * This function directly moves the allocated RBs to the queue's ownership |
564 | * and updates the relevant counters. | |
26d535ae | 565 | */ |
d56daea4 SS |
566 | static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, |
567 | struct iwl_rxq *rxq) | |
26d535ae SS |
568 | { |
569 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
570 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
571 | int i; | |
572 | ||
d56daea4 SS |
573 | lockdep_assert_held(&rxq->lock); |
574 | ||
26d535ae SS |
575 | /* |
576 | * atomic_dec_if_positive returns req_ready - 1 for any scenario. | |
577 | * If req_ready is 0 atomic_dec_if_positive will return -1 and this | |
d56daea4 | 578 | * function will return early, as there are no ready requests. |
26d535ae SS |
579 | * atomic_dec_if_positive will perofrm the *actual* decrement only if |
580 | * req_ready > 0, i.e. - there are ready requests and the function | |
581 | * hands one request to the caller. | |
582 | */ | |
583 | if (atomic_dec_if_positive(&rba->req_ready) < 0) | |
d56daea4 | 584 | return; |
26d535ae SS |
585 | |
586 | spin_lock(&rba->lock); | |
587 | for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { | |
588 | /* Get next free Rx buffer, remove it from free list */ | |
d56daea4 SS |
589 | struct iwl_rx_mem_buffer *rxb = |
590 | list_first_entry(&rba->rbd_allocated, | |
591 | struct iwl_rx_mem_buffer, list); | |
592 | ||
593 | list_move(&rxb->list, &rxq->rx_free); | |
26d535ae SS |
594 | } |
595 | spin_unlock(&rba->lock); | |
596 | ||
d56daea4 SS |
597 | rxq->used_count -= RX_CLAIM_REQ_ALLOC; |
598 | rxq->free_count += RX_CLAIM_REQ_ALLOC; | |
26d535ae SS |
599 | } |
600 | ||
601 | static void iwl_pcie_rx_allocator_work(struct work_struct *data) | |
ab697a9f | 602 | { |
26d535ae SS |
603 | struct iwl_rb_allocator *rba_p = |
604 | container_of(data, struct iwl_rb_allocator, rx_alloc); | |
5a878bf6 | 605 | struct iwl_trans_pcie *trans_pcie = |
26d535ae | 606 | container_of(rba_p, struct iwl_trans_pcie, rba); |
ab697a9f | 607 | |
26d535ae | 608 | iwl_pcie_rx_allocator(trans_pcie->trans); |
ab697a9f EG |
609 | } |
610 | ||
9805c446 EG |
611 | static int iwl_pcie_rx_alloc(struct iwl_trans *trans) |
612 | { | |
613 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
26d535ae | 614 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
9805c446 | 615 | struct device *dev = trans->dev; |
78485054 | 616 | int i; |
96a6497b SS |
617 | int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : |
618 | sizeof(__le32); | |
9805c446 | 619 | |
78485054 SS |
620 | if (WARN_ON(trans_pcie->rxq)) |
621 | return -EINVAL; | |
622 | ||
623 | trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), | |
624 | GFP_KERNEL); | |
625 | if (!trans_pcie->rxq) | |
626 | return -EINVAL; | |
9805c446 | 627 | |
26d535ae | 628 | spin_lock_init(&rba->lock); |
9805c446 | 629 | |
78485054 SS |
630 | for (i = 0; i < trans->num_rx_queues; i++) { |
631 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
9805c446 | 632 | |
78485054 | 633 | spin_lock_init(&rxq->lock); |
96a6497b SS |
634 | if (trans->cfg->mq_rx_supported) |
635 | rxq->queue_size = MQ_RX_TABLE_SIZE; | |
636 | else | |
637 | rxq->queue_size = RX_QUEUE_SIZE; | |
638 | ||
78485054 SS |
639 | /* |
640 | * Allocate the circular buffer of Read Buffer Descriptors | |
641 | * (RBDs) | |
642 | */ | |
643 | rxq->bd = dma_zalloc_coherent(dev, | |
96a6497b SS |
644 | free_size * rxq->queue_size, |
645 | &rxq->bd_dma, GFP_KERNEL); | |
78485054 SS |
646 | if (!rxq->bd) |
647 | goto err; | |
9805c446 | 648 | |
96a6497b SS |
649 | if (trans->cfg->mq_rx_supported) { |
650 | rxq->used_bd = dma_zalloc_coherent(dev, | |
651 | sizeof(__le32) * | |
652 | rxq->queue_size, | |
653 | &rxq->used_bd_dma, | |
654 | GFP_KERNEL); | |
655 | if (!rxq->used_bd) | |
656 | goto err; | |
657 | } | |
9805c446 | 658 | |
78485054 SS |
659 | /*Allocate the driver's pointer to receive buffer status */ |
660 | rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), | |
661 | &rxq->rb_stts_dma, | |
662 | GFP_KERNEL); | |
663 | if (!rxq->rb_stts) | |
664 | goto err; | |
665 | } | |
9805c446 EG |
666 | return 0; |
667 | ||
78485054 SS |
668 | err: |
669 | for (i = 0; i < trans->num_rx_queues; i++) { | |
670 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
671 | ||
672 | if (rxq->bd) | |
96a6497b | 673 | dma_free_coherent(dev, free_size * rxq->queue_size, |
78485054 SS |
674 | rxq->bd, rxq->bd_dma); |
675 | rxq->bd_dma = 0; | |
676 | rxq->bd = NULL; | |
677 | ||
678 | if (rxq->rb_stts) | |
679 | dma_free_coherent(trans->dev, | |
680 | sizeof(struct iwl_rb_status), | |
681 | rxq->rb_stts, rxq->rb_stts_dma); | |
96a6497b SS |
682 | |
683 | if (rxq->used_bd) | |
684 | dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, | |
685 | rxq->used_bd, rxq->used_bd_dma); | |
686 | rxq->used_bd_dma = 0; | |
687 | rxq->used_bd = NULL; | |
78485054 SS |
688 | } |
689 | kfree(trans_pcie->rxq); | |
96a6497b | 690 | |
9805c446 | 691 | return -ENOMEM; |
ab697a9f EG |
692 | } |
693 | ||
9805c446 EG |
694 | static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) |
695 | { | |
696 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
697 | u32 rb_size; | |
dfcfeef9 | 698 | unsigned long flags; |
9805c446 EG |
699 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ |
700 | ||
6c4fbcbc EG |
701 | switch (trans_pcie->rx_buf_size) { |
702 | case IWL_AMSDU_4K: | |
703 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
704 | break; | |
705 | case IWL_AMSDU_8K: | |
9805c446 | 706 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; |
6c4fbcbc EG |
707 | break; |
708 | case IWL_AMSDU_12K: | |
709 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; | |
710 | break; | |
711 | default: | |
712 | WARN_ON(1); | |
9805c446 | 713 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; |
6c4fbcbc | 714 | } |
9805c446 | 715 | |
dfcfeef9 SS |
716 | if (!iwl_trans_grab_nic_access(trans, &flags)) |
717 | return; | |
718 | ||
9805c446 | 719 | /* Stop Rx DMA */ |
dfcfeef9 | 720 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
ddaf5a5b | 721 | /* reset and flush pointers */ |
dfcfeef9 SS |
722 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); |
723 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); | |
724 | iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); | |
9805c446 EG |
725 | |
726 | /* Reset driver's Rx queue write index */ | |
dfcfeef9 | 727 | iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); |
9805c446 EG |
728 | |
729 | /* Tell device where to find RBD circular buffer in DRAM */ | |
dfcfeef9 SS |
730 | iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, |
731 | (u32)(rxq->bd_dma >> 8)); | |
9805c446 EG |
732 | |
733 | /* Tell device where in DRAM to update its Rx status */ | |
dfcfeef9 SS |
734 | iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, |
735 | rxq->rb_stts_dma >> 4); | |
9805c446 EG |
736 | |
737 | /* Enable Rx DMA | |
738 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
739 | * the credit mechanism in 5000 HW RX FIFO | |
740 | * Direct rx interrupts to hosts | |
6c4fbcbc | 741 | * Rx buffer size 4 or 8k or 12k |
9805c446 EG |
742 | * RB timeout 0x10 |
743 | * 256 RBDs | |
744 | */ | |
dfcfeef9 SS |
745 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, |
746 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | |
747 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
748 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
749 | rb_size | | |
750 | (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | | |
751 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | |
752 | ||
753 | iwl_trans_release_nic_access(trans, &flags); | |
9805c446 EG |
754 | |
755 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
756 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
6960a059 EG |
757 | |
758 | /* W/A for interrupt coalescing bug in 7260 and 3160 */ | |
759 | if (trans->cfg->host_interrupt_operation_mode) | |
760 | iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); | |
9805c446 EG |
761 | } |
762 | ||
1316d595 SS |
763 | void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable) |
764 | { | |
765 | /* | |
766 | * Turn on the chicken-bits that cause MAC wakeup for RX-related | |
767 | * values. | |
768 | * This costs some power, but needed for W/A 9000 integrated A-step | |
769 | * bug where shadow registers are not in the retention list and their | |
770 | * value is lost when NIC powers down | |
771 | */ | |
772 | if (trans->cfg->integrated) { | |
773 | iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, | |
774 | CSR_MAC_SHADOW_REG_CTRL_RX_WAKE); | |
775 | iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2, | |
776 | CSR_MAC_SHADOW_REG_CTL2_RX_WAKE); | |
777 | } | |
778 | } | |
779 | ||
bce97731 | 780 | static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) |
c7df1f4b | 781 | { |
96a6497b SS |
782 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
783 | u32 rb_size, enabled = 0; | |
dfcfeef9 | 784 | unsigned long flags; |
96a6497b | 785 | int i; |
c7df1f4b | 786 | |
96a6497b SS |
787 | switch (trans_pcie->rx_buf_size) { |
788 | case IWL_AMSDU_4K: | |
789 | rb_size = RFH_RXF_DMA_RB_SIZE_4K; | |
790 | break; | |
791 | case IWL_AMSDU_8K: | |
792 | rb_size = RFH_RXF_DMA_RB_SIZE_8K; | |
793 | break; | |
794 | case IWL_AMSDU_12K: | |
795 | rb_size = RFH_RXF_DMA_RB_SIZE_12K; | |
796 | break; | |
797 | default: | |
798 | WARN_ON(1); | |
799 | rb_size = RFH_RXF_DMA_RB_SIZE_4K; | |
800 | } | |
c7df1f4b | 801 | |
dfcfeef9 SS |
802 | if (!iwl_trans_grab_nic_access(trans, &flags)) |
803 | return; | |
804 | ||
96a6497b | 805 | /* Stop Rx DMA */ |
dfcfeef9 | 806 | iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); |
96a6497b | 807 | /* disable free amd used rx queue operation */ |
dfcfeef9 | 808 | iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); |
26d535ae | 809 | |
96a6497b SS |
810 | for (i = 0; i < trans->num_rx_queues; i++) { |
811 | /* Tell device where to find RBD free table in DRAM */ | |
dfcfeef9 SS |
812 | iwl_pcie_write_prph_64_no_grab(trans, |
813 | RFH_Q_FRBDCB_BA_LSB(i), | |
814 | trans_pcie->rxq[i].bd_dma); | |
96a6497b | 815 | /* Tell device where to find RBD used table in DRAM */ |
dfcfeef9 SS |
816 | iwl_pcie_write_prph_64_no_grab(trans, |
817 | RFH_Q_URBDCB_BA_LSB(i), | |
818 | trans_pcie->rxq[i].used_bd_dma); | |
96a6497b | 819 | /* Tell device where in DRAM to update its Rx status */ |
dfcfeef9 SS |
820 | iwl_pcie_write_prph_64_no_grab(trans, |
821 | RFH_Q_URBD_STTS_WPTR_LSB(i), | |
822 | trans_pcie->rxq[i].rb_stts_dma); | |
96a6497b | 823 | /* Reset device indice tables */ |
dfcfeef9 SS |
824 | iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); |
825 | iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); | |
826 | iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); | |
96a6497b SS |
827 | |
828 | enabled |= BIT(i) | BIT(i + 16); | |
829 | } | |
26d535ae | 830 | |
96a6497b SS |
831 | /* restock default queue */ |
832 | iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]); | |
833 | ||
834 | /* | |
835 | * Enable Rx DMA | |
836 | * Single frame mode | |
837 | * Rx buffer size 4 or 8k or 12k | |
838 | * Min RB size 4 or 8 | |
88076015 | 839 | * Drop frames that exceed RB size |
96a6497b SS |
840 | * 512 RBDs |
841 | */ | |
dfcfeef9 SS |
842 | iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, |
843 | RFH_DMA_EN_ENABLE_VAL | | |
844 | rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | | |
845 | RFH_RXF_DMA_MIN_RB_4_8 | | |
846 | RFH_RXF_DMA_DROP_TOO_LARGE_MASK | | |
847 | RFH_RXF_DMA_RBDCB_SIZE_512); | |
96a6497b | 848 | |
88076015 SS |
849 | /* |
850 | * Activate DMA snooping. | |
e5f91d91 | 851 | * Set RX DMA chunk size to 64B |
88076015 SS |
852 | * Default queue is 0 |
853 | */ | |
dfcfeef9 SS |
854 | iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | |
855 | (DEFAULT_RXQ_NUM << | |
856 | RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | | |
857 | RFH_GEN_CFG_SERVICE_DMA_SNOOP); | |
88076015 | 858 | /* Enable the relevant rx queues */ |
dfcfeef9 SS |
859 | iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); |
860 | ||
861 | iwl_trans_release_nic_access(trans, &flags); | |
26d535ae | 862 | |
96a6497b SS |
863 | /* Set interrupt coalescing timer to default (2048 usecs) */ |
864 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
1316d595 SS |
865 | |
866 | iwl_pcie_enable_rx_wake(trans, true); | |
26d535ae SS |
867 | } |
868 | ||
96a6497b | 869 | static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) |
26d535ae | 870 | { |
96a6497b | 871 | lockdep_assert_held(&rxq->lock); |
26d535ae | 872 | |
96a6497b SS |
873 | INIT_LIST_HEAD(&rxq->rx_free); |
874 | INIT_LIST_HEAD(&rxq->rx_used); | |
875 | rxq->free_count = 0; | |
876 | rxq->used_count = 0; | |
26d535ae SS |
877 | } |
878 | ||
bce97731 SS |
879 | static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) |
880 | { | |
881 | WARN_ON(1); | |
882 | return 0; | |
883 | } | |
884 | ||
9805c446 EG |
885 | int iwl_pcie_rx_init(struct iwl_trans *trans) |
886 | { | |
887 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
78485054 | 888 | struct iwl_rxq *def_rxq; |
26d535ae | 889 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
7b542436 | 890 | int i, err, queue_size, allocator_pool_size, num_alloc; |
9805c446 | 891 | |
78485054 | 892 | if (!trans_pcie->rxq) { |
9805c446 EG |
893 | err = iwl_pcie_rx_alloc(trans); |
894 | if (err) | |
895 | return err; | |
896 | } | |
78485054 | 897 | def_rxq = trans_pcie->rxq; |
26d535ae SS |
898 | if (!rba->alloc_wq) |
899 | rba->alloc_wq = alloc_workqueue("rb_allocator", | |
900 | WQ_HIGHPRI | WQ_UNBOUND, 1); | |
901 | INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); | |
902 | ||
903 | spin_lock(&rba->lock); | |
904 | atomic_set(&rba->req_pending, 0); | |
905 | atomic_set(&rba->req_ready, 0); | |
96a6497b SS |
906 | INIT_LIST_HEAD(&rba->rbd_allocated); |
907 | INIT_LIST_HEAD(&rba->rbd_empty); | |
26d535ae | 908 | spin_unlock(&rba->lock); |
9805c446 | 909 | |
c7df1f4b | 910 | /* free all first - we might be reconfigured for a different size */ |
78485054 | 911 | iwl_pcie_free_rbs_pool(trans); |
9805c446 EG |
912 | |
913 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
78485054 | 914 | def_rxq->queue[i] = NULL; |
9805c446 | 915 | |
78485054 SS |
916 | for (i = 0; i < trans->num_rx_queues; i++) { |
917 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
918 | ||
96a6497b SS |
919 | rxq->id = i; |
920 | ||
78485054 SS |
921 | spin_lock(&rxq->lock); |
922 | /* | |
923 | * Set read write pointer to reflect that we have processed | |
924 | * and used all buffers, but have not restocked the Rx queue | |
925 | * with fresh buffers | |
926 | */ | |
927 | rxq->read = 0; | |
928 | rxq->write = 0; | |
929 | rxq->write_actual = 0; | |
930 | memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); | |
9805c446 | 931 | |
78485054 SS |
932 | iwl_pcie_rx_init_rxb_lists(rxq); |
933 | ||
bce97731 SS |
934 | if (!rxq->napi.poll) |
935 | netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, | |
936 | iwl_pcie_dummy_napi_poll, 64); | |
937 | ||
78485054 SS |
938 | spin_unlock(&rxq->lock); |
939 | } | |
9805c446 | 940 | |
96a6497b | 941 | /* move the pool to the default queue and allocator ownerships */ |
7b542436 SS |
942 | queue_size = trans->cfg->mq_rx_supported ? |
943 | MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; | |
96a6497b SS |
944 | allocator_pool_size = trans->num_rx_queues * |
945 | (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); | |
7b542436 | 946 | num_alloc = queue_size + allocator_pool_size; |
43146925 SS |
947 | BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != |
948 | ARRAY_SIZE(trans_pcie->rx_pool)); | |
7b542436 | 949 | for (i = 0; i < num_alloc; i++) { |
96a6497b SS |
950 | struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; |
951 | ||
952 | if (i < allocator_pool_size) | |
953 | list_add(&rxb->list, &rba->rbd_empty); | |
954 | else | |
955 | list_add(&rxb->list, &def_rxq->rx_used); | |
956 | trans_pcie->global_table[i] = rxb; | |
957 | rxb->vid = (u16)i; | |
958 | } | |
9805c446 | 959 | |
78485054 | 960 | iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); |
96a6497b | 961 | if (trans->cfg->mq_rx_supported) { |
bce97731 | 962 | iwl_pcie_rx_mq_hw_init(trans); |
96a6497b | 963 | } else { |
e0e168dc | 964 | iwl_pcie_rxq_sq_restock(trans, def_rxq); |
96a6497b SS |
965 | iwl_pcie_rx_hw_init(trans, def_rxq); |
966 | } | |
78485054 SS |
967 | |
968 | spin_lock(&def_rxq->lock); | |
969 | iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); | |
970 | spin_unlock(&def_rxq->lock); | |
9805c446 EG |
971 | |
972 | return 0; | |
973 | } | |
974 | ||
975 | void iwl_pcie_rx_free(struct iwl_trans *trans) | |
976 | { | |
977 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
26d535ae | 978 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
96a6497b SS |
979 | int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : |
980 | sizeof(__le32); | |
78485054 | 981 | int i; |
9805c446 | 982 | |
78485054 SS |
983 | /* |
984 | * if rxq is NULL, it means that nothing has been allocated, | |
985 | * exit now | |
986 | */ | |
987 | if (!trans_pcie->rxq) { | |
9805c446 EG |
988 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); |
989 | return; | |
990 | } | |
991 | ||
26d535ae SS |
992 | cancel_work_sync(&rba->rx_alloc); |
993 | if (rba->alloc_wq) { | |
994 | destroy_workqueue(rba->alloc_wq); | |
995 | rba->alloc_wq = NULL; | |
996 | } | |
997 | ||
78485054 SS |
998 | iwl_pcie_free_rbs_pool(trans); |
999 | ||
1000 | for (i = 0; i < trans->num_rx_queues; i++) { | |
1001 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
1002 | ||
1003 | if (rxq->bd) | |
1004 | dma_free_coherent(trans->dev, | |
96a6497b | 1005 | free_size * rxq->queue_size, |
78485054 SS |
1006 | rxq->bd, rxq->bd_dma); |
1007 | rxq->bd_dma = 0; | |
1008 | rxq->bd = NULL; | |
1009 | ||
1010 | if (rxq->rb_stts) | |
1011 | dma_free_coherent(trans->dev, | |
1012 | sizeof(struct iwl_rb_status), | |
1013 | rxq->rb_stts, rxq->rb_stts_dma); | |
1014 | else | |
1015 | IWL_DEBUG_INFO(trans, | |
1016 | "Free rxq->rb_stts which is NULL\n"); | |
9805c446 | 1017 | |
96a6497b SS |
1018 | if (rxq->used_bd) |
1019 | dma_free_coherent(trans->dev, | |
1020 | sizeof(__le32) * rxq->queue_size, | |
1021 | rxq->used_bd, rxq->used_bd_dma); | |
1022 | rxq->used_bd_dma = 0; | |
1023 | rxq->used_bd = NULL; | |
bce97731 SS |
1024 | |
1025 | if (rxq->napi.poll) | |
1026 | netif_napi_del(&rxq->napi); | |
96a6497b | 1027 | } |
78485054 | 1028 | kfree(trans_pcie->rxq); |
9805c446 EG |
1029 | } |
1030 | ||
26d535ae SS |
1031 | /* |
1032 | * iwl_pcie_rx_reuse_rbd - Recycle used RBDs | |
1033 | * | |
1034 | * Called when a RBD can be reused. The RBD is transferred to the allocator. | |
1035 | * When there are 2 empty RBDs - a request for allocation is posted | |
1036 | */ | |
1037 | static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, | |
1038 | struct iwl_rx_mem_buffer *rxb, | |
1039 | struct iwl_rxq *rxq, bool emergency) | |
1040 | { | |
1041 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1042 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
1043 | ||
1044 | /* Move the RBD to the used list, will be moved to allocator in batches | |
1045 | * before claiming or posting a request*/ | |
1046 | list_add_tail(&rxb->list, &rxq->rx_used); | |
1047 | ||
1048 | if (unlikely(emergency)) | |
1049 | return; | |
1050 | ||
1051 | /* Count the allocator owned RBDs */ | |
1052 | rxq->used_count++; | |
1053 | ||
1054 | /* If we have RX_POST_REQ_ALLOC new released rx buffers - | |
1055 | * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is | |
1056 | * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, | |
1057 | * after but we still need to post another request. | |
1058 | */ | |
1059 | if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { | |
1060 | /* Move the 2 RBDs to the allocator ownership. | |
1061 | Allocator has another 6 from pool for the request completion*/ | |
1062 | spin_lock(&rba->lock); | |
1063 | list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); | |
1064 | spin_unlock(&rba->lock); | |
1065 | ||
1066 | atomic_inc(&rba->req_pending); | |
1067 | queue_work(rba->alloc_wq, &rba->rx_alloc); | |
1068 | } | |
1069 | } | |
1070 | ||
9805c446 | 1071 | static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, |
78485054 | 1072 | struct iwl_rxq *rxq, |
26d535ae SS |
1073 | struct iwl_rx_mem_buffer *rxb, |
1074 | bool emergency) | |
df2f3216 JB |
1075 | { |
1076 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
990aa6d7 | 1077 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; |
0c19744c | 1078 | bool page_stolen = false; |
b2cf410c | 1079 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; |
0c19744c | 1080 | u32 offset = 0; |
df2f3216 JB |
1081 | |
1082 | if (WARN_ON(!rxb)) | |
1083 | return; | |
1084 | ||
0c19744c JB |
1085 | dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); |
1086 | ||
1087 | while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { | |
1088 | struct iwl_rx_packet *pkt; | |
0c19744c JB |
1089 | u16 sequence; |
1090 | bool reclaim; | |
f7e6469f | 1091 | int index, cmd_index, len; |
0c19744c JB |
1092 | struct iwl_rx_cmd_buffer rxcb = { |
1093 | ._offset = offset, | |
d13f1862 | 1094 | ._rx_page_order = trans_pcie->rx_page_order, |
0c19744c JB |
1095 | ._page = rxb->page, |
1096 | ._page_stolen = false, | |
0d6c4a2e | 1097 | .truesize = max_len, |
0c19744c JB |
1098 | }; |
1099 | ||
1100 | pkt = rxb_addr(&rxcb); | |
1101 | ||
1102 | if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) | |
1103 | break; | |
1104 | ||
9243efcc LK |
1105 | IWL_DEBUG_RX(trans, |
1106 | "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", | |
1107 | rxcb._offset, | |
39bdb17e SD |
1108 | iwl_get_cmd_string(trans, |
1109 | iwl_cmd_id(pkt->hdr.cmd, | |
1110 | pkt->hdr.group_id, | |
1111 | 0)), | |
9243efcc | 1112 | pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); |
0c19744c | 1113 | |
65b30348 | 1114 | len = iwl_rx_packet_len(pkt); |
0c19744c | 1115 | len += sizeof(u32); /* account for status word */ |
f042c2eb JB |
1116 | trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); |
1117 | trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); | |
0c19744c JB |
1118 | |
1119 | /* Reclaim a command buffer only if this packet is a response | |
1120 | * to a (driver-originated) command. | |
1121 | * If the packet (e.g. Rx frame) originated from uCode, | |
1122 | * there is no command buffer to reclaim. | |
1123 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | |
1124 | * but apparently a few don't get set; catch them here. */ | |
1125 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); | |
1126 | if (reclaim) { | |
1127 | int i; | |
1128 | ||
1129 | for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { | |
1130 | if (trans_pcie->no_reclaim_cmds[i] == | |
1131 | pkt->hdr.cmd) { | |
1132 | reclaim = false; | |
1133 | break; | |
1134 | } | |
d663ee73 JB |
1135 | } |
1136 | } | |
df2f3216 | 1137 | |
0c19744c JB |
1138 | sequence = le16_to_cpu(pkt->hdr.sequence); |
1139 | index = SEQ_TO_INDEX(sequence); | |
1140 | cmd_index = get_cmd_index(&txq->q, index); | |
1141 | ||
bce97731 SS |
1142 | if (rxq->id == 0) |
1143 | iwl_op_mode_rx(trans->op_mode, &rxq->napi, | |
1144 | &rxcb); | |
1145 | else | |
1146 | iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, | |
1147 | &rxcb, rxq->id); | |
0c19744c | 1148 | |
96791422 | 1149 | if (reclaim) { |
5d4185ae | 1150 | kzfree(txq->entries[cmd_index].free_buf); |
f4feb8ac | 1151 | txq->entries[cmd_index].free_buf = NULL; |
96791422 EG |
1152 | } |
1153 | ||
0c19744c JB |
1154 | /* |
1155 | * After here, we should always check rxcb._page_stolen, | |
1156 | * if it is true then one of the handlers took the page. | |
1157 | */ | |
1158 | ||
1159 | if (reclaim) { | |
1160 | /* Invoke any callbacks, transfer the buffer to caller, | |
1161 | * and fire off the (possibly) blocking | |
1162 | * iwl_trans_send_cmd() | |
1163 | * as we reclaim the driver command queue */ | |
1164 | if (!rxcb._page_stolen) | |
f7e6469f | 1165 | iwl_pcie_hcmd_complete(trans, &rxcb); |
0c19744c JB |
1166 | else |
1167 | IWL_WARN(trans, "Claim null rxb?\n"); | |
1168 | } | |
1169 | ||
1170 | page_stolen |= rxcb._page_stolen; | |
1171 | offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); | |
df2f3216 JB |
1172 | } |
1173 | ||
0c19744c JB |
1174 | /* page was stolen from us -- free our reference */ |
1175 | if (page_stolen) { | |
b2cf410c | 1176 | __free_pages(rxb->page, trans_pcie->rx_page_order); |
df2f3216 | 1177 | rxb->page = NULL; |
0c19744c | 1178 | } |
df2f3216 JB |
1179 | |
1180 | /* Reuse the page if possible. For notification packets and | |
1181 | * SKBs that fail to Rx correctly, add them back into the | |
1182 | * rx_free list for reuse later. */ | |
df2f3216 JB |
1183 | if (rxb->page != NULL) { |
1184 | rxb->page_dma = | |
1185 | dma_map_page(trans->dev, rxb->page, 0, | |
20d3b647 JB |
1186 | PAGE_SIZE << trans_pcie->rx_page_order, |
1187 | DMA_FROM_DEVICE); | |
7c341582 JB |
1188 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
1189 | /* | |
1190 | * free the page(s) as well to not break | |
1191 | * the invariant that the items on the used | |
1192 | * list have no page(s) | |
1193 | */ | |
1194 | __free_pages(rxb->page, trans_pcie->rx_page_order); | |
1195 | rxb->page = NULL; | |
26d535ae | 1196 | iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); |
7c341582 JB |
1197 | } else { |
1198 | list_add_tail(&rxb->list, &rxq->rx_free); | |
1199 | rxq->free_count++; | |
1200 | } | |
df2f3216 | 1201 | } else |
26d535ae | 1202 | iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); |
df2f3216 JB |
1203 | } |
1204 | ||
990aa6d7 EG |
1205 | /* |
1206 | * iwl_pcie_rx_handle - Main entry function for receiving responses from fw | |
ab697a9f | 1207 | */ |
2e5d4a8f | 1208 | static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) |
ab697a9f | 1209 | { |
df2f3216 | 1210 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
2e5d4a8f | 1211 | struct iwl_rxq *rxq = &trans_pcie->rxq[queue]; |
d56daea4 | 1212 | u32 r, i, count = 0; |
26d535ae | 1213 | bool emergency = false; |
ab697a9f | 1214 | |
f14d6b39 JB |
1215 | restart: |
1216 | spin_lock(&rxq->lock); | |
ab697a9f EG |
1217 | /* uCode's read index (stored in shared DRAM) indicates the last Rx |
1218 | * buffer that the driver may process (last buffer filled by ucode). */ | |
52e2a99e | 1219 | r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; |
ab697a9f EG |
1220 | i = rxq->read; |
1221 | ||
5eae443e SS |
1222 | /* W/A 9000 device step A0 wrap-around bug */ |
1223 | r &= (rxq->queue_size - 1); | |
1224 | ||
ab697a9f EG |
1225 | /* Rx interrupt, but nothing sent from uCode */ |
1226 | if (i == r) | |
5eae443e | 1227 | IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); |
ab697a9f | 1228 | |
ab697a9f | 1229 | while (i != r) { |
48a2d66f | 1230 | struct iwl_rx_mem_buffer *rxb; |
ab697a9f | 1231 | |
96a6497b | 1232 | if (unlikely(rxq->used_count == rxq->queue_size / 2)) |
26d535ae SS |
1233 | emergency = true; |
1234 | ||
96a6497b SS |
1235 | if (trans->cfg->mq_rx_supported) { |
1236 | /* | |
1237 | * used_bd is a 32 bit but only 12 are used to retrieve | |
1238 | * the vid | |
1239 | */ | |
5eae443e | 1240 | u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF; |
96a6497b | 1241 | |
5eae443e SS |
1242 | if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table), |
1243 | "Invalid rxb index from HW %u\n", (u32)vid)) | |
1244 | goto out; | |
96a6497b SS |
1245 | rxb = trans_pcie->global_table[vid]; |
1246 | } else { | |
1247 | rxb = rxq->queue[i]; | |
1248 | rxq->queue[i] = NULL; | |
1249 | } | |
ab697a9f | 1250 | |
5eae443e | 1251 | IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); |
78485054 | 1252 | iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); |
ab697a9f | 1253 | |
96a6497b | 1254 | i = (i + 1) & (rxq->queue_size - 1); |
26d535ae | 1255 | |
d56daea4 SS |
1256 | /* |
1257 | * If we have RX_CLAIM_REQ_ALLOC released rx buffers - | |
1258 | * try to claim the pre-allocated buffers from the allocator. | |
1259 | * If not ready - will try to reclaim next time. | |
1260 | * There is no need to reschedule work - allocator exits only | |
1261 | * on success | |
1262 | */ | |
1263 | if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) | |
1264 | iwl_pcie_rx_allocator_get(trans, rxq); | |
1265 | ||
1266 | if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { | |
26d535ae | 1267 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
26d535ae | 1268 | |
d56daea4 SS |
1269 | /* Add the remaining empty RBDs for allocator use */ |
1270 | spin_lock(&rba->lock); | |
1271 | list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); | |
1272 | spin_unlock(&rba->lock); | |
1273 | } else if (emergency) { | |
255ba065 | 1274 | count++; |
26d535ae | 1275 | if (count == 8) { |
255ba065 | 1276 | count = 0; |
96a6497b | 1277 | if (rxq->used_count < rxq->queue_size / 3) |
26d535ae | 1278 | emergency = false; |
e0e168dc GG |
1279 | |
1280 | rxq->read = i; | |
26d535ae | 1281 | spin_unlock(&rxq->lock); |
78485054 | 1282 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); |
96a6497b | 1283 | iwl_pcie_rxq_restock(trans, rxq); |
e0e168dc GG |
1284 | goto restart; |
1285 | } | |
26d535ae | 1286 | } |
ab697a9f | 1287 | } |
5eae443e | 1288 | out: |
ab697a9f EG |
1289 | /* Backtrack one entry */ |
1290 | rxq->read = i; | |
f14d6b39 JB |
1291 | spin_unlock(&rxq->lock); |
1292 | ||
26d535ae SS |
1293 | /* |
1294 | * handle a case where in emergency there are some unallocated RBDs. | |
1295 | * those RBDs are in the used list, but are not tracked by the queue's | |
1296 | * used_count which counts allocator owned RBDs. | |
1297 | * unallocated emergency RBDs must be allocated on exit, otherwise | |
1298 | * when called again the function may not be in emergency mode and | |
1299 | * they will be handed to the allocator with no tracking in the RBD | |
1300 | * allocator counters, which will lead to them never being claimed back | |
1301 | * by the queue. | |
1302 | * by allocating them here, they are now in the queue free list, and | |
1303 | * will be restocked by the next call of iwl_pcie_rxq_restock. | |
1304 | */ | |
1305 | if (unlikely(emergency && count)) | |
78485054 | 1306 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); |
255ba065 | 1307 | |
bce97731 SS |
1308 | if (rxq->napi.poll) |
1309 | napi_gro_flush(&rxq->napi, false); | |
e0e168dc GG |
1310 | |
1311 | iwl_pcie_rxq_restock(trans, rxq); | |
ab697a9f EG |
1312 | } |
1313 | ||
2e5d4a8f HD |
1314 | static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) |
1315 | { | |
1316 | u8 queue = entry->entry; | |
1317 | struct msix_entry *entries = entry - queue; | |
1318 | ||
1319 | return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); | |
1320 | } | |
1321 | ||
1322 | static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, | |
1323 | struct msix_entry *entry) | |
1324 | { | |
1325 | /* | |
1326 | * Before sending the interrupt the HW disables it to prevent | |
1327 | * a nested interrupt. This is done by writing 1 to the corresponding | |
1328 | * bit in the mask register. After handling the interrupt, it should be | |
1329 | * re-enabled by clearing this bit. This register is defined as | |
1330 | * write 1 clear (W1C) register, meaning that it's being clear | |
1331 | * by writing 1 to the bit. | |
1332 | */ | |
7ef3dd26 | 1333 | iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); |
2e5d4a8f HD |
1334 | } |
1335 | ||
1336 | /* | |
1337 | * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw | |
1338 | * This interrupt handler should be used with RSS queue only. | |
1339 | */ | |
1340 | irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) | |
1341 | { | |
1342 | struct msix_entry *entry = dev_id; | |
1343 | struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); | |
1344 | struct iwl_trans *trans = trans_pcie->trans; | |
1345 | ||
5eae443e SS |
1346 | if (WARN_ON(entry->entry >= trans->num_rx_queues)) |
1347 | return IRQ_NONE; | |
1348 | ||
2e5d4a8f HD |
1349 | lock_map_acquire(&trans->sync_cmd_lockdep_map); |
1350 | ||
1351 | local_bh_disable(); | |
1352 | iwl_pcie_rx_handle(trans, entry->entry); | |
1353 | local_bh_enable(); | |
1354 | ||
1355 | iwl_pcie_clear_irq(trans, entry); | |
1356 | ||
1357 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
1358 | ||
1359 | return IRQ_HANDLED; | |
1360 | } | |
1361 | ||
990aa6d7 EG |
1362 | /* |
1363 | * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card | |
7ff94706 | 1364 | */ |
990aa6d7 | 1365 | static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) |
7ff94706 | 1366 | { |
f946b529 | 1367 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1103323c | 1368 | int i; |
f946b529 | 1369 | |
7ff94706 | 1370 | /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ |
035f7ff2 | 1371 | if (trans->cfg->internal_wimax_coex && |
95411d04 | 1372 | !trans->cfg->apmg_not_supported && |
1042db2a | 1373 | (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & |
20d3b647 | 1374 | APMS_CLK_VAL_MRB_FUNC_MODE) || |
1042db2a | 1375 | (iwl_read_prph(trans, APMG_PS_CTRL_REG) & |
20d3b647 | 1376 | APMG_PS_CTRL_VAL_RESET_REQ))) { |
eb7ff77e | 1377 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); |
8a8bbdb4 | 1378 | iwl_op_mode_wimax_active(trans->op_mode); |
f946b529 | 1379 | wake_up(&trans_pcie->wait_command_queue); |
7ff94706 EG |
1380 | return; |
1381 | } | |
1382 | ||
990aa6d7 | 1383 | iwl_pcie_dump_csr(trans); |
313b0a29 | 1384 | iwl_dump_fh(trans, NULL); |
7ff94706 | 1385 | |
2bfb5092 | 1386 | local_bh_disable(); |
2a988e98 AN |
1387 | /* The STATUS_FW_ERROR bit is set in this function. This must happen |
1388 | * before we wake up the command caller, to ensure a proper cleanup. */ | |
1389 | iwl_trans_fw_error(trans); | |
2bfb5092 | 1390 | local_bh_enable(); |
2a988e98 | 1391 | |
1103323c EG |
1392 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) |
1393 | del_timer(&trans_pcie->txq[i].stuck_timer); | |
1394 | ||
2a988e98 AN |
1395 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); |
1396 | wake_up(&trans_pcie->wait_command_queue); | |
7ff94706 EG |
1397 | } |
1398 | ||
7117c000 | 1399 | static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) |
fc84472b | 1400 | { |
fc84472b EG |
1401 | u32 inta; |
1402 | ||
46e81af9 | 1403 | lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); |
fc84472b EG |
1404 | |
1405 | trace_iwlwifi_dev_irq(trans->dev); | |
1406 | ||
1407 | /* Discover which interrupts are active/pending */ | |
1408 | inta = iwl_read32(trans, CSR_INT); | |
1409 | ||
fc84472b | 1410 | /* the thread will service interrupts and re-enable them */ |
fe523dc9 | 1411 | return inta; |
fc84472b EG |
1412 | } |
1413 | ||
1414 | /* a device (PCI-E) page is 4096 bytes long */ | |
1415 | #define ICT_SHIFT 12 | |
1416 | #define ICT_SIZE (1 << ICT_SHIFT) | |
1417 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) | |
1418 | ||
1419 | /* interrupt handler using ict table, with this interrupt driver will | |
1420 | * stop using INTA register to get device's interrupt, reading this register | |
1421 | * is expensive, device will write interrupts in ICT dram table, increment | |
1422 | * index then will fire interrupt to driver, driver will OR all ICT table | |
1423 | * entries from current index up to table entry with 0 value. the result is | |
1424 | * the interrupt we need to service, driver will set the entries back to 0 and | |
1425 | * set index. | |
1426 | */ | |
7117c000 | 1427 | static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) |
fc84472b EG |
1428 | { |
1429 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
fc84472b EG |
1430 | u32 inta; |
1431 | u32 val = 0; | |
1432 | u32 read; | |
1433 | ||
fc84472b EG |
1434 | trace_iwlwifi_dev_irq(trans->dev); |
1435 | ||
1436 | /* Ignore interrupt if there's nothing in NIC to service. | |
1437 | * This may be due to IRQ shared with another device, | |
1438 | * or due to sporadic interrupts thrown from our NIC. */ | |
1439 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); | |
1440 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); | |
7ba1faa4 EG |
1441 | if (!read) |
1442 | return 0; | |
fc84472b EG |
1443 | |
1444 | /* | |
1445 | * Collect all entries up to the first 0, starting from ict_index; | |
1446 | * note we already read at ict_index. | |
1447 | */ | |
1448 | do { | |
1449 | val |= read; | |
1450 | IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", | |
1451 | trans_pcie->ict_index, read); | |
1452 | trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; | |
1453 | trans_pcie->ict_index = | |
83f32a4b | 1454 | ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); |
fc84472b EG |
1455 | |
1456 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); | |
1457 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, | |
1458 | read); | |
1459 | } while (read); | |
1460 | ||
1461 | /* We should not get this value, just ignore it. */ | |
1462 | if (val == 0xffffffff) | |
1463 | val = 0; | |
1464 | ||
1465 | /* | |
1466 | * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit | |
1467 | * (bit 15 before shifting it to 31) to clear when using interrupt | |
1468 | * coalescing. fortunately, bits 18 and 19 stay set when this happens | |
1469 | * so we use them to decide on the real state of the Rx bit. | |
1470 | * In order words, bit 15 is set if bit 18 or bit 19 are set. | |
1471 | */ | |
1472 | if (val & 0xC0000) | |
1473 | val |= 0x8000; | |
1474 | ||
1475 | inta = (0xff & val) | ((0xff00 & val) << 16); | |
fe523dc9 | 1476 | return inta; |
fc84472b EG |
1477 | } |
1478 | ||
2bfb5092 | 1479 | irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) |
ab697a9f | 1480 | { |
2bfb5092 | 1481 | struct iwl_trans *trans = dev_id; |
20d3b647 JB |
1482 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1483 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
ab697a9f EG |
1484 | u32 inta = 0; |
1485 | u32 handled = 0; | |
ab697a9f | 1486 | |
2bfb5092 JB |
1487 | lock_map_acquire(&trans->sync_cmd_lockdep_map); |
1488 | ||
7b70bd63 | 1489 | spin_lock(&trans_pcie->irq_lock); |
ab697a9f | 1490 | |
0fec9542 EG |
1491 | /* dram interrupt table not set yet, |
1492 | * use legacy interrupt. | |
1493 | */ | |
1494 | if (likely(trans_pcie->use_ict)) | |
7117c000 | 1495 | inta = iwl_pcie_int_cause_ict(trans); |
0fec9542 | 1496 | else |
7117c000 | 1497 | inta = iwl_pcie_int_cause_non_ict(trans); |
0fec9542 | 1498 | |
7ba1faa4 EG |
1499 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
1500 | IWL_DEBUG_ISR(trans, | |
1501 | "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", | |
1502 | inta, trans_pcie->inta_mask, | |
1503 | iwl_read32(trans, CSR_INT_MASK), | |
1504 | iwl_read32(trans, CSR_FH_INT_STATUS)); | |
1505 | if (inta & (~trans_pcie->inta_mask)) | |
1506 | IWL_DEBUG_ISR(trans, | |
1507 | "We got a masked interrupt (0x%08x)\n", | |
1508 | inta & (~trans_pcie->inta_mask)); | |
1509 | } | |
1510 | ||
1511 | inta &= trans_pcie->inta_mask; | |
1512 | ||
1513 | /* | |
1514 | * Ignore interrupt if there's nothing in NIC to service. | |
1515 | * This may be due to IRQ shared with another device, | |
1516 | * or due to sporadic interrupts thrown from our NIC. | |
1517 | */ | |
7117c000 | 1518 | if (unlikely(!inta)) { |
7ba1faa4 EG |
1519 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); |
1520 | /* | |
1521 | * Re-enable interrupts here since we don't | |
1522 | * have anything to service | |
1523 | */ | |
1524 | if (test_bit(STATUS_INT_ENABLED, &trans->status)) | |
1525 | iwl_enable_interrupts(trans); | |
7b70bd63 | 1526 | spin_unlock(&trans_pcie->irq_lock); |
7117c000 EG |
1527 | lock_map_release(&trans->sync_cmd_lockdep_map); |
1528 | return IRQ_NONE; | |
1529 | } | |
1530 | ||
7ba1faa4 EG |
1531 | if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { |
1532 | /* | |
1533 | * Hardware disappeared. It might have | |
1534 | * already raised an interrupt. | |
1535 | */ | |
1536 | IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); | |
7b70bd63 | 1537 | spin_unlock(&trans_pcie->irq_lock); |
7117c000 | 1538 | goto out; |
a0f337cc EG |
1539 | } |
1540 | ||
ab697a9f EG |
1541 | /* Ack/clear/reset pending uCode interrupts. |
1542 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | |
1543 | */ | |
1544 | /* There is a hardware bug in the interrupt mask function that some | |
1545 | * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if | |
1546 | * they are disabled in the CSR_INT_MASK register. Furthermore the | |
1547 | * ICT interrupt handling mechanism has another bug that might cause | |
1548 | * these unmasked interrupts fail to be detected. We workaround the | |
1549 | * hardware bugs here by ACKing all the possible interrupts so that | |
1550 | * interrupt coalescing can still be achieved. | |
1551 | */ | |
7117c000 | 1552 | iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); |
ab697a9f | 1553 | |
51cd53ad | 1554 | if (iwl_have_debug_level(IWL_DL_ISR)) |
0ca24daf | 1555 | IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", |
51cd53ad | 1556 | inta, iwl_read32(trans, CSR_INT_MASK)); |
ab697a9f | 1557 | |
7b70bd63 | 1558 | spin_unlock(&trans_pcie->irq_lock); |
b49ba04a | 1559 | |
ab697a9f EG |
1560 | /* Now service all interrupt bits discovered above. */ |
1561 | if (inta & CSR_INT_BIT_HW_ERR) { | |
0c325769 | 1562 | IWL_ERR(trans, "Hardware error detected. Restarting.\n"); |
ab697a9f EG |
1563 | |
1564 | /* Tell the device to stop sending interrupts */ | |
0c325769 | 1565 | iwl_disable_interrupts(trans); |
ab697a9f | 1566 | |
1f7b6172 | 1567 | isr_stats->hw++; |
990aa6d7 | 1568 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
1569 | |
1570 | handled |= CSR_INT_BIT_HW_ERR; | |
1571 | ||
2bfb5092 | 1572 | goto out; |
ab697a9f EG |
1573 | } |
1574 | ||
a8bceb39 | 1575 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
ab697a9f EG |
1576 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ |
1577 | if (inta & CSR_INT_BIT_SCD) { | |
51cd53ad JB |
1578 | IWL_DEBUG_ISR(trans, |
1579 | "Scheduler finished to transmit the frame/frames.\n"); | |
1f7b6172 | 1580 | isr_stats->sch++; |
ab697a9f EG |
1581 | } |
1582 | ||
1583 | /* Alive notification via Rx interrupt will do the real work */ | |
1584 | if (inta & CSR_INT_BIT_ALIVE) { | |
0c325769 | 1585 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); |
1f7b6172 | 1586 | isr_stats->alive++; |
ab697a9f EG |
1587 | } |
1588 | } | |
51cd53ad | 1589 | |
ab697a9f EG |
1590 | /* Safely ignore these bits for debug checks below */ |
1591 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | |
1592 | ||
1593 | /* HW RF KILL switch toggled */ | |
1594 | if (inta & CSR_INT_BIT_RF_KILL) { | |
c9eec95c | 1595 | bool hw_rfkill; |
ab697a9f | 1596 | |
8d425517 | 1597 | hw_rfkill = iwl_is_rfkill_set(trans); |
0c325769 | 1598 | IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", |
20d3b647 | 1599 | hw_rfkill ? "disable radio" : "enable radio"); |
ab697a9f | 1600 | |
1f7b6172 | 1601 | isr_stats->rfkill++; |
ab697a9f | 1602 | |
fa9f3281 | 1603 | mutex_lock(&trans_pcie->mutex); |
14cfca71 | 1604 | iwl_trans_pcie_rf_kill(trans, hw_rfkill); |
fa9f3281 | 1605 | mutex_unlock(&trans_pcie->mutex); |
f946b529 | 1606 | if (hw_rfkill) { |
eb7ff77e AN |
1607 | set_bit(STATUS_RFKILL, &trans->status); |
1608 | if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, | |
1609 | &trans->status)) | |
f946b529 EG |
1610 | IWL_DEBUG_RF_KILL(trans, |
1611 | "Rfkill while SYNC HCMD in flight\n"); | |
1612 | wake_up(&trans_pcie->wait_command_queue); | |
1613 | } else { | |
eb7ff77e | 1614 | clear_bit(STATUS_RFKILL, &trans->status); |
f946b529 | 1615 | } |
ab697a9f EG |
1616 | |
1617 | handled |= CSR_INT_BIT_RF_KILL; | |
1618 | } | |
1619 | ||
1620 | /* Chip got too hot and stopped itself */ | |
1621 | if (inta & CSR_INT_BIT_CT_KILL) { | |
0c325769 | 1622 | IWL_ERR(trans, "Microcode CT kill error detected.\n"); |
1f7b6172 | 1623 | isr_stats->ctkill++; |
ab697a9f EG |
1624 | handled |= CSR_INT_BIT_CT_KILL; |
1625 | } | |
1626 | ||
1627 | /* Error detected by uCode */ | |
1628 | if (inta & CSR_INT_BIT_SW_ERR) { | |
0c325769 | 1629 | IWL_ERR(trans, "Microcode SW error detected. " |
ab697a9f | 1630 | " Restarting 0x%X.\n", inta); |
1f7b6172 | 1631 | isr_stats->sw++; |
990aa6d7 | 1632 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
1633 | handled |= CSR_INT_BIT_SW_ERR; |
1634 | } | |
1635 | ||
1636 | /* uCode wakes up after power-down sleep */ | |
1637 | if (inta & CSR_INT_BIT_WAKEUP) { | |
0c325769 | 1638 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
5d63f926 | 1639 | iwl_pcie_rxq_check_wrptr(trans); |
ea68f460 | 1640 | iwl_pcie_txq_check_wrptrs(trans); |
ab697a9f | 1641 | |
1f7b6172 | 1642 | isr_stats->wakeup++; |
ab697a9f EG |
1643 | |
1644 | handled |= CSR_INT_BIT_WAKEUP; | |
1645 | } | |
1646 | ||
1647 | /* All uCode command responses, including Tx command responses, | |
1648 | * Rx "responses" (frame-received notification), and other | |
1649 | * notifications from uCode come through here*/ | |
1650 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | | |
20d3b647 | 1651 | CSR_INT_BIT_RX_PERIODIC)) { |
0c325769 | 1652 | IWL_DEBUG_ISR(trans, "Rx interrupt\n"); |
ab697a9f EG |
1653 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { |
1654 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | |
1042db2a | 1655 | iwl_write32(trans, CSR_FH_INT_STATUS, |
ab697a9f EG |
1656 | CSR_FH_INT_RX_MASK); |
1657 | } | |
1658 | if (inta & CSR_INT_BIT_RX_PERIODIC) { | |
1659 | handled |= CSR_INT_BIT_RX_PERIODIC; | |
1042db2a | 1660 | iwl_write32(trans, |
0c325769 | 1661 | CSR_INT, CSR_INT_BIT_RX_PERIODIC); |
ab697a9f EG |
1662 | } |
1663 | /* Sending RX interrupt require many steps to be done in the | |
1664 | * the device: | |
1665 | * 1- write interrupt to current index in ICT table. | |
1666 | * 2- dma RX frame. | |
1667 | * 3- update RX shared data to indicate last write index. | |
1668 | * 4- send interrupt. | |
1669 | * This could lead to RX race, driver could receive RX interrupt | |
1670 | * but the shared data changes does not reflect this; | |
1671 | * periodic interrupt will detect any dangling Rx activity. | |
1672 | */ | |
1673 | ||
1674 | /* Disable periodic interrupt; we use it as just a one-shot. */ | |
1042db2a | 1675 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
ab697a9f | 1676 | CSR_INT_PERIODIC_DIS); |
6379103e | 1677 | |
ab697a9f EG |
1678 | /* |
1679 | * Enable periodic interrupt in 8 msec only if we received | |
1680 | * real RX interrupt (instead of just periodic int), to catch | |
1681 | * any dangling Rx interrupt. If it was just the periodic | |
1682 | * interrupt, there was no dangling Rx activity, and no need | |
1683 | * to extend the periodic interrupt; one-shot is enough. | |
1684 | */ | |
1685 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) | |
1042db2a | 1686 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
20d3b647 | 1687 | CSR_INT_PERIODIC_ENA); |
ab697a9f | 1688 | |
1f7b6172 | 1689 | isr_stats->rx++; |
f14d6b39 JB |
1690 | |
1691 | local_bh_disable(); | |
2e5d4a8f | 1692 | iwl_pcie_rx_handle(trans, 0); |
f14d6b39 | 1693 | local_bh_enable(); |
ab697a9f EG |
1694 | } |
1695 | ||
1696 | /* This "Tx" DMA channel is used only for loading uCode */ | |
1697 | if (inta & CSR_INT_BIT_FH_TX) { | |
1042db2a | 1698 | iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); |
0c325769 | 1699 | IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); |
1f7b6172 | 1700 | isr_stats->tx++; |
ab697a9f EG |
1701 | handled |= CSR_INT_BIT_FH_TX; |
1702 | /* Wake up uCode load routine, now that load is complete */ | |
13df1aab JB |
1703 | trans_pcie->ucode_write_complete = true; |
1704 | wake_up(&trans_pcie->ucode_write_waitq); | |
ab697a9f EG |
1705 | } |
1706 | ||
1707 | if (inta & ~handled) { | |
0c325769 | 1708 | IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); |
1f7b6172 | 1709 | isr_stats->unhandled++; |
ab697a9f EG |
1710 | } |
1711 | ||
0c325769 EG |
1712 | if (inta & ~(trans_pcie->inta_mask)) { |
1713 | IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", | |
1714 | inta & ~trans_pcie->inta_mask); | |
ab697a9f EG |
1715 | } |
1716 | ||
a6bd005f EG |
1717 | /* we are loading the firmware, enable FH_TX interrupt only */ |
1718 | if (handled & CSR_INT_BIT_FH_TX) | |
1719 | iwl_enable_fw_load_int(trans); | |
1720 | /* only Re-enable all interrupt if disabled by irq */ | |
1721 | else if (test_bit(STATUS_INT_ENABLED, &trans->status)) | |
0c325769 | 1722 | iwl_enable_interrupts(trans); |
ab697a9f | 1723 | /* Re-enable RF_KILL if it occurred */ |
8722c899 SG |
1724 | else if (handled & CSR_INT_BIT_RF_KILL) |
1725 | iwl_enable_rfkill_int(trans); | |
2bfb5092 JB |
1726 | |
1727 | out: | |
1728 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
1729 | return IRQ_HANDLED; | |
ab697a9f EG |
1730 | } |
1731 | ||
1a361cd8 EG |
1732 | /****************************************************************************** |
1733 | * | |
1734 | * ICT functions | |
1735 | * | |
1736 | ******************************************************************************/ | |
10667136 | 1737 | |
1a361cd8 | 1738 | /* Free dram table */ |
990aa6d7 | 1739 | void iwl_pcie_free_ict(struct iwl_trans *trans) |
1a361cd8 | 1740 | { |
20d3b647 | 1741 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
0c325769 | 1742 | |
10667136 | 1743 | if (trans_pcie->ict_tbl) { |
1042db2a | 1744 | dma_free_coherent(trans->dev, ICT_SIZE, |
10667136 | 1745 | trans_pcie->ict_tbl, |
0c325769 | 1746 | trans_pcie->ict_tbl_dma); |
10667136 JB |
1747 | trans_pcie->ict_tbl = NULL; |
1748 | trans_pcie->ict_tbl_dma = 0; | |
1a361cd8 EG |
1749 | } |
1750 | } | |
1751 | ||
10667136 JB |
1752 | /* |
1753 | * allocate dram shared table, it is an aligned memory | |
1754 | * block of ICT_SIZE. | |
1a361cd8 EG |
1755 | * also reset all data related to ICT table interrupt. |
1756 | */ | |
990aa6d7 | 1757 | int iwl_pcie_alloc_ict(struct iwl_trans *trans) |
1a361cd8 | 1758 | { |
20d3b647 | 1759 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1760 | |
10667136 | 1761 | trans_pcie->ict_tbl = |
eef31718 | 1762 | dma_zalloc_coherent(trans->dev, ICT_SIZE, |
10667136 JB |
1763 | &trans_pcie->ict_tbl_dma, |
1764 | GFP_KERNEL); | |
1765 | if (!trans_pcie->ict_tbl) | |
1a361cd8 EG |
1766 | return -ENOMEM; |
1767 | ||
10667136 JB |
1768 | /* just an API sanity check ... it is guaranteed to be aligned */ |
1769 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { | |
990aa6d7 | 1770 | iwl_pcie_free_ict(trans); |
10667136 JB |
1771 | return -EINVAL; |
1772 | } | |
1a361cd8 | 1773 | |
1a361cd8 EG |
1774 | return 0; |
1775 | } | |
1776 | ||
1777 | /* Device is going up inform it about using ICT interrupt table, | |
1778 | * also we need to tell the driver to start using ICT interrupt. | |
1779 | */ | |
990aa6d7 | 1780 | void iwl_pcie_reset_ict(struct iwl_trans *trans) |
1a361cd8 | 1781 | { |
20d3b647 | 1782 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1783 | u32 val; |
1a361cd8 | 1784 | |
10667136 | 1785 | if (!trans_pcie->ict_tbl) |
ed6a3803 | 1786 | return; |
1a361cd8 | 1787 | |
7b70bd63 | 1788 | spin_lock(&trans_pcie->irq_lock); |
0c325769 | 1789 | iwl_disable_interrupts(trans); |
1a361cd8 | 1790 | |
10667136 | 1791 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); |
1a361cd8 | 1792 | |
10667136 | 1793 | val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; |
1a361cd8 | 1794 | |
18f5a374 EP |
1795 | val |= CSR_DRAM_INT_TBL_ENABLE | |
1796 | CSR_DRAM_INIT_TBL_WRAP_CHECK | | |
1797 | CSR_DRAM_INIT_TBL_WRITE_POINTER; | |
1a361cd8 | 1798 | |
10667136 | 1799 | IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); |
1a361cd8 | 1800 | |
1042db2a | 1801 | iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); |
0c325769 EG |
1802 | trans_pcie->use_ict = true; |
1803 | trans_pcie->ict_index = 0; | |
1042db2a | 1804 | iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); |
0c325769 | 1805 | iwl_enable_interrupts(trans); |
7b70bd63 | 1806 | spin_unlock(&trans_pcie->irq_lock); |
1a361cd8 EG |
1807 | } |
1808 | ||
1809 | /* Device is going down disable ict interrupt usage */ | |
990aa6d7 | 1810 | void iwl_pcie_disable_ict(struct iwl_trans *trans) |
1a361cd8 | 1811 | { |
20d3b647 | 1812 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1813 | |
7b70bd63 | 1814 | spin_lock(&trans_pcie->irq_lock); |
0c325769 | 1815 | trans_pcie->use_ict = false; |
7b70bd63 | 1816 | spin_unlock(&trans_pcie->irq_lock); |
1a361cd8 EG |
1817 | } |
1818 | ||
85bf9da1 EG |
1819 | irqreturn_t iwl_pcie_isr(int irq, void *data) |
1820 | { | |
1821 | struct iwl_trans *trans = data; | |
1822 | ||
1823 | if (!trans) | |
1824 | return IRQ_NONE; | |
1825 | ||
1826 | /* Disable (but don't clear!) interrupts here to avoid | |
1827 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
1828 | * If we have something to service, the tasklet will re-enable ints. | |
1829 | * If we *don't* have something, we'll re-enable before leaving here. | |
1830 | */ | |
1831 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); | |
1832 | ||
a0f337cc | 1833 | return IRQ_WAKE_THREAD; |
85bf9da1 | 1834 | } |
2e5d4a8f HD |
1835 | |
1836 | irqreturn_t iwl_pcie_msix_isr(int irq, void *data) | |
1837 | { | |
1838 | return IRQ_WAKE_THREAD; | |
1839 | } | |
1840 | ||
1841 | irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) | |
1842 | { | |
1843 | struct msix_entry *entry = dev_id; | |
1844 | struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); | |
1845 | struct iwl_trans *trans = trans_pcie->trans; | |
46167a8f | 1846 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; |
2e5d4a8f HD |
1847 | u32 inta_fh, inta_hw; |
1848 | ||
1849 | lock_map_acquire(&trans->sync_cmd_lockdep_map); | |
1850 | ||
1851 | spin_lock(&trans_pcie->irq_lock); | |
7ef3dd26 HD |
1852 | inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); |
1853 | inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); | |
2e5d4a8f HD |
1854 | /* |
1855 | * Clear causes registers to avoid being handling the same cause. | |
1856 | */ | |
7ef3dd26 HD |
1857 | iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); |
1858 | iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); | |
2e5d4a8f HD |
1859 | spin_unlock(&trans_pcie->irq_lock); |
1860 | ||
1861 | if (unlikely(!(inta_fh | inta_hw))) { | |
1862 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); | |
1863 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
1864 | return IRQ_NONE; | |
1865 | } | |
1866 | ||
1867 | if (iwl_have_debug_level(IWL_DL_ISR)) | |
1868 | IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n", | |
1869 | inta_fh, | |
1870 | iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); | |
1871 | ||
1872 | /* This "Tx" DMA channel is used only for loading uCode */ | |
1873 | if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { | |
1874 | IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); | |
1875 | isr_stats->tx++; | |
1876 | /* | |
1877 | * Wake up uCode load routine, | |
1878 | * now that load is complete | |
1879 | */ | |
1880 | trans_pcie->ucode_write_complete = true; | |
1881 | wake_up(&trans_pcie->ucode_write_waitq); | |
1882 | } | |
1883 | ||
1884 | /* Error detected by uCode */ | |
1885 | if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || | |
1886 | (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { | |
1887 | IWL_ERR(trans, | |
1888 | "Microcode SW error detected. Restarting 0x%X.\n", | |
1889 | inta_fh); | |
1890 | isr_stats->sw++; | |
1891 | iwl_pcie_irq_handle_error(trans); | |
1892 | } | |
1893 | ||
1894 | /* After checking FH register check HW register */ | |
1895 | if (iwl_have_debug_level(IWL_DL_ISR)) | |
1896 | IWL_DEBUG_ISR(trans, | |
1897 | "ISR inta_hw 0x%08x, enabled 0x%08x\n", | |
1898 | inta_hw, | |
1899 | iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); | |
1900 | ||
1901 | /* Alive notification via Rx interrupt will do the real work */ | |
1902 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { | |
1903 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); | |
1904 | isr_stats->alive++; | |
1905 | } | |
1906 | ||
1907 | /* uCode wakes up after power-down sleep */ | |
1908 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { | |
1909 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); | |
1910 | iwl_pcie_rxq_check_wrptr(trans); | |
1911 | iwl_pcie_txq_check_wrptrs(trans); | |
1912 | ||
1913 | isr_stats->wakeup++; | |
1914 | } | |
1915 | ||
1916 | /* Chip got too hot and stopped itself */ | |
1917 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { | |
1918 | IWL_ERR(trans, "Microcode CT kill error detected.\n"); | |
1919 | isr_stats->ctkill++; | |
1920 | } | |
1921 | ||
1922 | /* HW RF KILL switch toggled */ | |
1923 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) { | |
1924 | bool hw_rfkill; | |
1925 | ||
1926 | hw_rfkill = iwl_is_rfkill_set(trans); | |
1927 | IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", | |
1928 | hw_rfkill ? "disable radio" : "enable radio"); | |
1929 | ||
1930 | isr_stats->rfkill++; | |
1931 | ||
1932 | mutex_lock(&trans_pcie->mutex); | |
1933 | iwl_trans_pcie_rf_kill(trans, hw_rfkill); | |
1934 | mutex_unlock(&trans_pcie->mutex); | |
1935 | if (hw_rfkill) { | |
1936 | set_bit(STATUS_RFKILL, &trans->status); | |
1937 | if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, | |
1938 | &trans->status)) | |
1939 | IWL_DEBUG_RF_KILL(trans, | |
1940 | "Rfkill while SYNC HCMD in flight\n"); | |
1941 | wake_up(&trans_pcie->wait_command_queue); | |
1942 | } else { | |
1943 | clear_bit(STATUS_RFKILL, &trans->status); | |
1944 | } | |
1945 | } | |
1946 | ||
1947 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { | |
1948 | IWL_ERR(trans, | |
1949 | "Hardware error detected. Restarting.\n"); | |
1950 | ||
1951 | isr_stats->hw++; | |
1952 | iwl_pcie_irq_handle_error(trans); | |
1953 | } | |
1954 | ||
1955 | iwl_pcie_clear_irq(trans, entry); | |
1956 | ||
1957 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
1958 | ||
1959 | return IRQ_HANDLED; | |
1960 | } |