Commit | Line | Data |
---|---|---|
8e730c15 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
4 | * Copyright 2006-2008 Solarflare Communications Inc. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/bitops.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/pci.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/seq_file.h> | |
16 | #include "net_driver.h" | |
17 | #include "bitfield.h" | |
18 | #include "efx.h" | |
19 | #include "nic.h" | |
20 | #include "regs.h" | |
21 | #include "io.h" | |
22 | #include "workarounds.h" | |
23 | ||
24 | /************************************************************************** | |
25 | * | |
26 | * Configurable values | |
27 | * | |
28 | ************************************************************************** | |
29 | */ | |
30 | ||
31 | /* This is set to 16 for a good reason. In summary, if larger than | |
32 | * 16, the descriptor cache holds more than a default socket | |
33 | * buffer's worth of packets (for UDP we can only have at most one | |
34 | * socket buffer's worth outstanding). This combined with the fact | |
35 | * that we only get 1 TX event per descriptor cache means the NIC | |
36 | * goes idle. | |
37 | */ | |
38 | #define TX_DC_ENTRIES 16 | |
39 | #define TX_DC_ENTRIES_ORDER 1 | |
40 | ||
41 | #define RX_DC_ENTRIES 64 | |
42 | #define RX_DC_ENTRIES_ORDER 3 | |
43 | ||
44 | /* RX FIFO XOFF watermark | |
45 | * | |
46 | * When the amount of the RX FIFO increases used increases past this | |
47 | * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A) | |
48 | * This also has an effect on RX/TX arbitration | |
49 | */ | |
50 | int efx_nic_rx_xoff_thresh = -1; | |
51 | module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644); | |
52 | MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold"); | |
53 | ||
54 | /* RX FIFO XON watermark | |
55 | * | |
56 | * When the amount of the RX FIFO used decreases below this | |
57 | * watermark send XON. Only used if TX flow control is enabled (ethtool -A) | |
58 | * This also has an effect on RX/TX arbitration | |
59 | */ | |
60 | int efx_nic_rx_xon_thresh = -1; | |
61 | module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644); | |
62 | MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |
63 | ||
64 | /* If EFX_MAX_INT_ERRORS internal errors occur within | |
65 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and | |
66 | * disable it. | |
67 | */ | |
68 | #define EFX_INT_ERROR_EXPIRE 3600 | |
69 | #define EFX_MAX_INT_ERRORS 5 | |
70 | ||
71 | /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times | |
72 | */ | |
73 | #define EFX_FLUSH_INTERVAL 10 | |
74 | #define EFX_FLUSH_POLL_COUNT 100 | |
75 | ||
76 | /* Size and alignment of special buffers (4KB) */ | |
77 | #define EFX_BUF_SIZE 4096 | |
78 | ||
79 | /* Depth of RX flush request fifo */ | |
80 | #define EFX_RX_FLUSH_COUNT 4 | |
81 | ||
82 | /************************************************************************** | |
83 | * | |
84 | * Solarstorm hardware access | |
85 | * | |
86 | **************************************************************************/ | |
87 | ||
88 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | |
89 | unsigned int index) | |
90 | { | |
91 | efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, | |
92 | value, index); | |
93 | } | |
94 | ||
95 | /* Read the current event from the event queue */ | |
96 | static inline efx_qword_t *efx_event(struct efx_channel *channel, | |
97 | unsigned int index) | |
98 | { | |
99 | return (((efx_qword_t *) (channel->eventq.addr)) + index); | |
100 | } | |
101 | ||
102 | /* See if an event is present | |
103 | * | |
104 | * We check both the high and low dword of the event for all ones. We | |
105 | * wrote all ones when we cleared the event, and no valid event can | |
106 | * have all ones in either its high or low dwords. This approach is | |
107 | * robust against reordering. | |
108 | * | |
109 | * Note that using a single 64-bit comparison is incorrect; even | |
110 | * though the CPU read will be atomic, the DMA write may not be. | |
111 | */ | |
112 | static inline int efx_event_present(efx_qword_t *event) | |
113 | { | |
114 | return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | | |
115 | EFX_DWORD_IS_ALL_ONES(event->dword[1]))); | |
116 | } | |
117 | ||
118 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | |
119 | const efx_oword_t *mask) | |
120 | { | |
121 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || | |
122 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); | |
123 | } | |
124 | ||
125 | int efx_nic_test_registers(struct efx_nic *efx, | |
126 | const struct efx_nic_register_test *regs, | |
127 | size_t n_regs) | |
128 | { | |
129 | unsigned address = 0, i, j; | |
130 | efx_oword_t mask, imask, original, reg, buf; | |
131 | ||
132 | /* Falcon should be in loopback to isolate the XMAC from the PHY */ | |
133 | WARN_ON(!LOOPBACK_INTERNAL(efx)); | |
134 | ||
135 | for (i = 0; i < n_regs; ++i) { | |
136 | address = regs[i].address; | |
137 | mask = imask = regs[i].mask; | |
138 | EFX_INVERT_OWORD(imask); | |
139 | ||
140 | efx_reado(efx, &original, address); | |
141 | ||
142 | /* bit sweep on and off */ | |
143 | for (j = 0; j < 128; j++) { | |
144 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) | |
145 | continue; | |
146 | ||
147 | /* Test this testable bit can be set in isolation */ | |
148 | EFX_AND_OWORD(reg, original, mask); | |
149 | EFX_SET_OWORD32(reg, j, j, 1); | |
150 | ||
151 | efx_writeo(efx, ®, address); | |
152 | efx_reado(efx, &buf, address); | |
153 | ||
154 | if (efx_masked_compare_oword(®, &buf, &mask)) | |
155 | goto fail; | |
156 | ||
157 | /* Test this testable bit can be cleared in isolation */ | |
158 | EFX_OR_OWORD(reg, original, mask); | |
159 | EFX_SET_OWORD32(reg, j, j, 0); | |
160 | ||
161 | efx_writeo(efx, ®, address); | |
162 | efx_reado(efx, &buf, address); | |
163 | ||
164 | if (efx_masked_compare_oword(®, &buf, &mask)) | |
165 | goto fail; | |
166 | } | |
167 | ||
168 | efx_writeo(efx, &original, address); | |
169 | } | |
170 | ||
171 | return 0; | |
172 | ||
173 | fail: | |
174 | EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | |
175 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | |
176 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | |
177 | return -EIO; | |
178 | } | |
179 | ||
180 | /************************************************************************** | |
181 | * | |
182 | * Special buffer handling | |
183 | * Special buffers are used for event queues and the TX and RX | |
184 | * descriptor rings. | |
185 | * | |
186 | *************************************************************************/ | |
187 | ||
188 | /* | |
189 | * Initialise a special buffer | |
190 | * | |
191 | * This will define a buffer (previously allocated via | |
192 | * efx_alloc_special_buffer()) in the buffer table, allowing | |
193 | * it to be used for event queues, descriptor rings etc. | |
194 | */ | |
195 | static void | |
196 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |
197 | { | |
198 | efx_qword_t buf_desc; | |
199 | int index; | |
200 | dma_addr_t dma_addr; | |
201 | int i; | |
202 | ||
203 | EFX_BUG_ON_PARANOID(!buffer->addr); | |
204 | ||
205 | /* Write buffer descriptors to NIC */ | |
206 | for (i = 0; i < buffer->entries; i++) { | |
207 | index = buffer->index + i; | |
208 | dma_addr = buffer->dma_addr + (i * 4096); | |
209 | EFX_LOG(efx, "mapping special buffer %d at %llx\n", | |
210 | index, (unsigned long long)dma_addr); | |
211 | EFX_POPULATE_QWORD_3(buf_desc, | |
212 | FRF_AZ_BUF_ADR_REGION, 0, | |
213 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, | |
214 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); | |
215 | efx_write_buf_tbl(efx, &buf_desc, index); | |
216 | } | |
217 | } | |
218 | ||
219 | /* Unmaps a buffer and clears the buffer table entries */ | |
220 | static void | |
221 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |
222 | { | |
223 | efx_oword_t buf_tbl_upd; | |
224 | unsigned int start = buffer->index; | |
225 | unsigned int end = (buffer->index + buffer->entries - 1); | |
226 | ||
227 | if (!buffer->entries) | |
228 | return; | |
229 | ||
230 | EFX_LOG(efx, "unmapping special buffers %d-%d\n", | |
231 | buffer->index, buffer->index + buffer->entries - 1); | |
232 | ||
233 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | |
234 | FRF_AZ_BUF_UPD_CMD, 0, | |
235 | FRF_AZ_BUF_CLR_CMD, 1, | |
236 | FRF_AZ_BUF_CLR_END_ID, end, | |
237 | FRF_AZ_BUF_CLR_START_ID, start); | |
238 | efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); | |
239 | } | |
240 | ||
241 | /* | |
242 | * Allocate a new special buffer | |
243 | * | |
244 | * This allocates memory for a new buffer, clears it and allocates a | |
245 | * new buffer ID range. It does not write into the buffer table. | |
246 | * | |
247 | * This call will allocate 4KB buffers, since 8KB buffers can't be | |
248 | * used for event queues and descriptor rings. | |
249 | */ | |
250 | static int efx_alloc_special_buffer(struct efx_nic *efx, | |
251 | struct efx_special_buffer *buffer, | |
252 | unsigned int len) | |
253 | { | |
254 | len = ALIGN(len, EFX_BUF_SIZE); | |
255 | ||
256 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | |
257 | &buffer->dma_addr); | |
258 | if (!buffer->addr) | |
259 | return -ENOMEM; | |
260 | buffer->len = len; | |
261 | buffer->entries = len / EFX_BUF_SIZE; | |
262 | BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); | |
263 | ||
264 | /* All zeros is a potentially valid event so memset to 0xff */ | |
265 | memset(buffer->addr, 0xff, len); | |
266 | ||
267 | /* Select new buffer ID */ | |
268 | buffer->index = efx->next_buffer_table; | |
269 | efx->next_buffer_table += buffer->entries; | |
270 | ||
271 | EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " | |
272 | "(virt %p phys %llx)\n", buffer->index, | |
273 | buffer->index + buffer->entries - 1, | |
274 | (u64)buffer->dma_addr, len, | |
275 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | |
276 | ||
277 | return 0; | |
278 | } | |
279 | ||
280 | static void | |
281 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |
282 | { | |
283 | if (!buffer->addr) | |
284 | return; | |
285 | ||
286 | EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " | |
287 | "(virt %p phys %llx)\n", buffer->index, | |
288 | buffer->index + buffer->entries - 1, | |
289 | (u64)buffer->dma_addr, buffer->len, | |
290 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | |
291 | ||
292 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, | |
293 | buffer->dma_addr); | |
294 | buffer->addr = NULL; | |
295 | buffer->entries = 0; | |
296 | } | |
297 | ||
298 | /************************************************************************** | |
299 | * | |
300 | * Generic buffer handling | |
301 | * These buffers are used for interrupt status and MAC stats | |
302 | * | |
303 | **************************************************************************/ | |
304 | ||
305 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | |
306 | unsigned int len) | |
307 | { | |
308 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | |
309 | &buffer->dma_addr); | |
310 | if (!buffer->addr) | |
311 | return -ENOMEM; | |
312 | buffer->len = len; | |
313 | memset(buffer->addr, 0, len); | |
314 | return 0; | |
315 | } | |
316 | ||
317 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) | |
318 | { | |
319 | if (buffer->addr) { | |
320 | pci_free_consistent(efx->pci_dev, buffer->len, | |
321 | buffer->addr, buffer->dma_addr); | |
322 | buffer->addr = NULL; | |
323 | } | |
324 | } | |
325 | ||
326 | /************************************************************************** | |
327 | * | |
328 | * TX path | |
329 | * | |
330 | **************************************************************************/ | |
331 | ||
332 | /* Returns a pointer to the specified transmit descriptor in the TX | |
333 | * descriptor queue belonging to the specified channel. | |
334 | */ | |
335 | static inline efx_qword_t * | |
336 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) | |
337 | { | |
338 | return (((efx_qword_t *) (tx_queue->txd.addr)) + index); | |
339 | } | |
340 | ||
341 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | |
342 | static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) | |
343 | { | |
344 | unsigned write_ptr; | |
345 | efx_dword_t reg; | |
346 | ||
347 | write_ptr = tx_queue->write_count & EFX_TXQ_MASK; | |
348 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); | |
349 | efx_writed_page(tx_queue->efx, ®, | |
350 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | |
351 | } | |
352 | ||
353 | ||
354 | /* For each entry inserted into the software descriptor ring, create a | |
355 | * descriptor in the hardware TX descriptor ring (in host memory), and | |
356 | * write a doorbell. | |
357 | */ | |
358 | void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | |
359 | { | |
360 | ||
361 | struct efx_tx_buffer *buffer; | |
362 | efx_qword_t *txd; | |
363 | unsigned write_ptr; | |
364 | ||
365 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | |
366 | ||
367 | do { | |
368 | write_ptr = tx_queue->write_count & EFX_TXQ_MASK; | |
369 | buffer = &tx_queue->buffer[write_ptr]; | |
370 | txd = efx_tx_desc(tx_queue, write_ptr); | |
371 | ++tx_queue->write_count; | |
372 | ||
373 | /* Create TX descriptor ring entry */ | |
374 | EFX_POPULATE_QWORD_4(*txd, | |
375 | FSF_AZ_TX_KER_CONT, buffer->continuation, | |
376 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, | |
377 | FSF_AZ_TX_KER_BUF_REGION, 0, | |
378 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); | |
379 | } while (tx_queue->write_count != tx_queue->insert_count); | |
380 | ||
381 | wmb(); /* Ensure descriptors are written before they are fetched */ | |
382 | efx_notify_tx_desc(tx_queue); | |
383 | } | |
384 | ||
385 | /* Allocate hardware resources for a TX queue */ | |
386 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) | |
387 | { | |
388 | struct efx_nic *efx = tx_queue->efx; | |
389 | BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 || | |
390 | EFX_TXQ_SIZE & EFX_TXQ_MASK); | |
391 | return efx_alloc_special_buffer(efx, &tx_queue->txd, | |
392 | EFX_TXQ_SIZE * sizeof(efx_qword_t)); | |
393 | } | |
394 | ||
395 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | |
396 | { | |
397 | efx_oword_t tx_desc_ptr; | |
398 | struct efx_nic *efx = tx_queue->efx; | |
399 | ||
400 | tx_queue->flushed = FLUSH_NONE; | |
401 | ||
402 | /* Pin TX descriptor ring */ | |
403 | efx_init_special_buffer(efx, &tx_queue->txd); | |
404 | ||
405 | /* Push TX descriptor ring to card */ | |
406 | EFX_POPULATE_OWORD_10(tx_desc_ptr, | |
407 | FRF_AZ_TX_DESCQ_EN, 1, | |
408 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, | |
409 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, | |
410 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | |
411 | FRF_AZ_TX_DESCQ_EVQ_ID, | |
412 | tx_queue->channel->channel, | |
413 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, | |
414 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, | |
415 | FRF_AZ_TX_DESCQ_SIZE, | |
416 | __ffs(tx_queue->txd.entries), | |
417 | FRF_AZ_TX_DESCQ_TYPE, 0, | |
418 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); | |
419 | ||
420 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | |
421 | int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; | |
422 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); | |
423 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, | |
424 | !csum); | |
425 | } | |
426 | ||
427 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | |
428 | tx_queue->queue); | |
429 | ||
430 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { | |
431 | efx_oword_t reg; | |
432 | ||
433 | /* Only 128 bits in this register */ | |
434 | BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); | |
435 | ||
436 | efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); | |
437 | if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) | |
438 | clear_bit_le(tx_queue->queue, (void *)®); | |
439 | else | |
440 | set_bit_le(tx_queue->queue, (void *)®); | |
441 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); | |
442 | } | |
443 | } | |
444 | ||
445 | static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) | |
446 | { | |
447 | struct efx_nic *efx = tx_queue->efx; | |
448 | efx_oword_t tx_flush_descq; | |
449 | ||
450 | tx_queue->flushed = FLUSH_PENDING; | |
451 | ||
452 | /* Post a flush command */ | |
453 | EFX_POPULATE_OWORD_2(tx_flush_descq, | |
454 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, | |
455 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); | |
456 | efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); | |
457 | } | |
458 | ||
459 | void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) | |
460 | { | |
461 | struct efx_nic *efx = tx_queue->efx; | |
462 | efx_oword_t tx_desc_ptr; | |
463 | ||
464 | /* The queue should have been flushed */ | |
465 | WARN_ON(tx_queue->flushed != FLUSH_DONE); | |
466 | ||
467 | /* Remove TX descriptor ring from card */ | |
468 | EFX_ZERO_OWORD(tx_desc_ptr); | |
469 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | |
470 | tx_queue->queue); | |
471 | ||
472 | /* Unpin TX descriptor ring */ | |
473 | efx_fini_special_buffer(efx, &tx_queue->txd); | |
474 | } | |
475 | ||
476 | /* Free buffers backing TX queue */ | |
477 | void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) | |
478 | { | |
479 | efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); | |
480 | } | |
481 | ||
482 | /************************************************************************** | |
483 | * | |
484 | * RX path | |
485 | * | |
486 | **************************************************************************/ | |
487 | ||
488 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ | |
489 | static inline efx_qword_t * | |
490 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | |
491 | { | |
492 | return (((efx_qword_t *) (rx_queue->rxd.addr)) + index); | |
493 | } | |
494 | ||
495 | /* This creates an entry in the RX descriptor queue */ | |
496 | static inline void | |
497 | efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) | |
498 | { | |
499 | struct efx_rx_buffer *rx_buf; | |
500 | efx_qword_t *rxd; | |
501 | ||
502 | rxd = efx_rx_desc(rx_queue, index); | |
503 | rx_buf = efx_rx_buffer(rx_queue, index); | |
504 | EFX_POPULATE_QWORD_3(*rxd, | |
505 | FSF_AZ_RX_KER_BUF_SIZE, | |
506 | rx_buf->len - | |
507 | rx_queue->efx->type->rx_buffer_padding, | |
508 | FSF_AZ_RX_KER_BUF_REGION, 0, | |
509 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | |
510 | } | |
511 | ||
512 | /* This writes to the RX_DESC_WPTR register for the specified receive | |
513 | * descriptor ring. | |
514 | */ | |
515 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) | |
516 | { | |
517 | efx_dword_t reg; | |
518 | unsigned write_ptr; | |
519 | ||
520 | while (rx_queue->notified_count != rx_queue->added_count) { | |
521 | efx_build_rx_desc(rx_queue, | |
522 | rx_queue->notified_count & | |
523 | EFX_RXQ_MASK); | |
524 | ++rx_queue->notified_count; | |
525 | } | |
526 | ||
527 | wmb(); | |
528 | write_ptr = rx_queue->added_count & EFX_RXQ_MASK; | |
529 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); | |
530 | efx_writed_page(rx_queue->efx, ®, | |
531 | FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue); | |
532 | } | |
533 | ||
534 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) | |
535 | { | |
536 | struct efx_nic *efx = rx_queue->efx; | |
537 | BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 || | |
538 | EFX_RXQ_SIZE & EFX_RXQ_MASK); | |
539 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, | |
540 | EFX_RXQ_SIZE * sizeof(efx_qword_t)); | |
541 | } | |
542 | ||
543 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | |
544 | { | |
545 | efx_oword_t rx_desc_ptr; | |
546 | struct efx_nic *efx = rx_queue->efx; | |
547 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; | |
548 | bool iscsi_digest_en = is_b0; | |
549 | ||
550 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | |
551 | rx_queue->queue, rx_queue->rxd.index, | |
552 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | |
553 | ||
554 | rx_queue->flushed = FLUSH_NONE; | |
555 | ||
556 | /* Pin RX descriptor ring */ | |
557 | efx_init_special_buffer(efx, &rx_queue->rxd); | |
558 | ||
559 | /* Push RX descriptor ring to card */ | |
560 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | |
561 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, | |
562 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, | |
563 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | |
564 | FRF_AZ_RX_DESCQ_EVQ_ID, | |
565 | rx_queue->channel->channel, | |
566 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, | |
567 | FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue, | |
568 | FRF_AZ_RX_DESCQ_SIZE, | |
569 | __ffs(rx_queue->rxd.entries), | |
570 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , | |
571 | /* For >=B0 this is scatter so disable */ | |
572 | FRF_AZ_RX_DESCQ_JUMBO, !is_b0, | |
573 | FRF_AZ_RX_DESCQ_EN, 1); | |
574 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | |
575 | rx_queue->queue); | |
576 | } | |
577 | ||
578 | static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) | |
579 | { | |
580 | struct efx_nic *efx = rx_queue->efx; | |
581 | efx_oword_t rx_flush_descq; | |
582 | ||
583 | rx_queue->flushed = FLUSH_PENDING; | |
584 | ||
585 | /* Post a flush command */ | |
586 | EFX_POPULATE_OWORD_2(rx_flush_descq, | |
587 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, | |
588 | FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue); | |
589 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); | |
590 | } | |
591 | ||
592 | void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) | |
593 | { | |
594 | efx_oword_t rx_desc_ptr; | |
595 | struct efx_nic *efx = rx_queue->efx; | |
596 | ||
597 | /* The queue should already have been flushed */ | |
598 | WARN_ON(rx_queue->flushed != FLUSH_DONE); | |
599 | ||
600 | /* Remove RX descriptor ring from card */ | |
601 | EFX_ZERO_OWORD(rx_desc_ptr); | |
602 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | |
603 | rx_queue->queue); | |
604 | ||
605 | /* Unpin RX descriptor ring */ | |
606 | efx_fini_special_buffer(efx, &rx_queue->rxd); | |
607 | } | |
608 | ||
609 | /* Free buffers backing RX queue */ | |
610 | void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) | |
611 | { | |
612 | efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); | |
613 | } | |
614 | ||
615 | /************************************************************************** | |
616 | * | |
617 | * Event queue processing | |
618 | * Event queues are processed by per-channel tasklets. | |
619 | * | |
620 | **************************************************************************/ | |
621 | ||
622 | /* Update a channel's event queue's read pointer (RPTR) register | |
623 | * | |
624 | * This writes the EVQ_RPTR_REG register for the specified channel's | |
625 | * event queue. | |
626 | * | |
627 | * Note that EVQ_RPTR_REG contains the index of the "last read" event, | |
628 | * whereas channel->eventq_read_ptr contains the index of the "next to | |
629 | * read" event. | |
630 | */ | |
631 | void efx_nic_eventq_read_ack(struct efx_channel *channel) | |
632 | { | |
633 | efx_dword_t reg; | |
634 | struct efx_nic *efx = channel->efx; | |
635 | ||
636 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); | |
637 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, | |
638 | channel->channel); | |
639 | } | |
640 | ||
641 | /* Use HW to insert a SW defined event */ | |
642 | void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) | |
643 | { | |
644 | efx_oword_t drv_ev_reg; | |
645 | ||
646 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || | |
647 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); | |
648 | drv_ev_reg.u32[0] = event->u32[0]; | |
649 | drv_ev_reg.u32[1] = event->u32[1]; | |
650 | drv_ev_reg.u32[2] = 0; | |
651 | drv_ev_reg.u32[3] = 0; | |
652 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); | |
653 | efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); | |
654 | } | |
655 | ||
656 | /* Handle a transmit completion event | |
657 | * | |
658 | * The NIC batches TX completion events; the message we receive is of | |
659 | * the form "complete all TX events up to this index". | |
660 | */ | |
661 | static void | |
662 | efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |
663 | { | |
664 | unsigned int tx_ev_desc_ptr; | |
665 | unsigned int tx_ev_q_label; | |
666 | struct efx_tx_queue *tx_queue; | |
667 | struct efx_nic *efx = channel->efx; | |
668 | ||
669 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | |
670 | /* Transmit completion */ | |
671 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); | |
672 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | |
673 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | |
674 | channel->irq_mod_score += | |
675 | (tx_ev_desc_ptr - tx_queue->read_count) & | |
676 | EFX_TXQ_MASK; | |
677 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | |
678 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { | |
679 | /* Rewrite the FIFO write pointer */ | |
680 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | |
681 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | |
682 | ||
683 | if (efx_dev_registered(efx)) | |
684 | netif_tx_lock(efx->net_dev); | |
685 | efx_notify_tx_desc(tx_queue); | |
686 | if (efx_dev_registered(efx)) | |
687 | netif_tx_unlock(efx->net_dev); | |
688 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && | |
689 | EFX_WORKAROUND_10727(efx)) { | |
690 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | |
691 | } else { | |
692 | EFX_ERR(efx, "channel %d unexpected TX event " | |
693 | EFX_QWORD_FMT"\n", channel->channel, | |
694 | EFX_QWORD_VAL(*event)); | |
695 | } | |
696 | } | |
697 | ||
698 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | |
699 | static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |
700 | const efx_qword_t *event, | |
701 | bool *rx_ev_pkt_ok, | |
702 | bool *discard) | |
703 | { | |
704 | struct efx_nic *efx = rx_queue->efx; | |
705 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | |
706 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | |
707 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | |
708 | bool rx_ev_other_err, rx_ev_pause_frm; | |
709 | bool rx_ev_hdr_type, rx_ev_mcast_pkt; | |
710 | unsigned rx_ev_pkt_type; | |
711 | ||
712 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | |
713 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | |
714 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); | |
715 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); | |
716 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | |
717 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); | |
718 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | |
719 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); | |
720 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | |
721 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); | |
722 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); | |
723 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); | |
724 | rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? | |
725 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); | |
726 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); | |
727 | ||
728 | /* Every error apart from tobe_disc and pause_frm */ | |
729 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | |
730 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | |
731 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | |
732 | ||
733 | /* Count errors that are not in MAC stats. Ignore expected | |
734 | * checksum errors during self-test. */ | |
735 | if (rx_ev_frm_trunc) | |
736 | ++rx_queue->channel->n_rx_frm_trunc; | |
737 | else if (rx_ev_tobe_disc) | |
738 | ++rx_queue->channel->n_rx_tobe_disc; | |
739 | else if (!efx->loopback_selftest) { | |
740 | if (rx_ev_ip_hdr_chksum_err) | |
741 | ++rx_queue->channel->n_rx_ip_hdr_chksum_err; | |
742 | else if (rx_ev_tcp_udp_chksum_err) | |
743 | ++rx_queue->channel->n_rx_tcp_udp_chksum_err; | |
744 | } | |
745 | ||
746 | /* The frame must be discarded if any of these are true. */ | |
747 | *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | | |
748 | rx_ev_tobe_disc | rx_ev_pause_frm); | |
749 | ||
750 | /* TOBE_DISC is expected on unicast mismatches; don't print out an | |
751 | * error message. FRM_TRUNC indicates RXDP dropped the packet due | |
752 | * to a FIFO overflow. | |
753 | */ | |
754 | #ifdef EFX_ENABLE_DEBUG | |
755 | if (rx_ev_other_err) { | |
756 | EFX_INFO_RL(efx, " RX queue %d unexpected RX event " | |
757 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | |
758 | rx_queue->queue, EFX_QWORD_VAL(*event), | |
759 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | |
760 | rx_ev_ip_hdr_chksum_err ? | |
761 | " [IP_HDR_CHKSUM_ERR]" : "", | |
762 | rx_ev_tcp_udp_chksum_err ? | |
763 | " [TCP_UDP_CHKSUM_ERR]" : "", | |
764 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | |
765 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | |
766 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | |
767 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | |
768 | rx_ev_pause_frm ? " [PAUSE]" : ""); | |
769 | } | |
770 | #endif | |
771 | } | |
772 | ||
773 | /* Handle receive events that are not in-order. */ | |
774 | static void | |
775 | efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | |
776 | { | |
777 | struct efx_nic *efx = rx_queue->efx; | |
778 | unsigned expected, dropped; | |
779 | ||
780 | expected = rx_queue->removed_count & EFX_RXQ_MASK; | |
781 | dropped = (index - expected) & EFX_RXQ_MASK; | |
782 | EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", | |
783 | dropped, index, expected); | |
784 | ||
785 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | |
786 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | |
787 | } | |
788 | ||
789 | /* Handle a packet received event | |
790 | * | |
791 | * The NIC gives a "discard" flag if it's a unicast packet with the | |
792 | * wrong destination address | |
793 | * Also "is multicast" and "matches multicast filter" flags can be used to | |
794 | * discard non-matching multicast packets. | |
795 | */ | |
796 | static void | |
797 | efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | |
798 | { | |
799 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; | |
800 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; | |
801 | unsigned expected_ptr; | |
802 | bool rx_ev_pkt_ok, discard = false, checksummed; | |
803 | struct efx_rx_queue *rx_queue; | |
804 | struct efx_nic *efx = channel->efx; | |
805 | ||
806 | /* Basic packet information */ | |
807 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); | |
808 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); | |
809 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | |
810 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); | |
811 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); | |
812 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != | |
813 | channel->channel); | |
814 | ||
815 | rx_queue = &efx->rx_queue[channel->channel]; | |
816 | ||
817 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); | |
818 | expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; | |
819 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) | |
820 | efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); | |
821 | ||
822 | if (likely(rx_ev_pkt_ok)) { | |
823 | /* If packet is marked as OK and packet type is TCP/IP or | |
824 | * UDP/IP, then we can rely on the hardware checksum. | |
825 | */ | |
826 | checksummed = | |
827 | likely(efx->rx_checksum_enabled) && | |
828 | (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || | |
829 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP); | |
830 | } else { | |
831 | efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); | |
832 | checksummed = false; | |
833 | } | |
834 | ||
835 | /* Detect multicast packets that didn't match the filter */ | |
836 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | |
837 | if (rx_ev_mcast_pkt) { | |
838 | unsigned int rx_ev_mcast_hash_match = | |
839 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); | |
840 | ||
841 | if (unlikely(!rx_ev_mcast_hash_match)) { | |
842 | ++channel->n_rx_mcast_mismatch; | |
843 | discard = true; | |
844 | } | |
845 | } | |
846 | ||
847 | channel->irq_mod_score += 2; | |
848 | ||
849 | /* Handle received packet */ | |
850 | efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, | |
851 | checksummed, discard); | |
852 | } | |
853 | ||
854 | /* Global events are basically PHY events */ | |
855 | static void | |
856 | efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) | |
857 | { | |
858 | struct efx_nic *efx = channel->efx; | |
859 | bool handled = false; | |
860 | ||
861 | if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || | |
862 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || | |
863 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) { | |
864 | /* Ignored */ | |
865 | handled = true; | |
866 | } | |
867 | ||
868 | if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) && | |
869 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { | |
870 | efx->xmac_poll_required = true; | |
871 | handled = true; | |
872 | } | |
873 | ||
874 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? | |
875 | EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : | |
876 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { | |
877 | EFX_ERR(efx, "channel %d seen global RX_RESET " | |
878 | "event. Resetting.\n", channel->channel); | |
879 | ||
880 | atomic_inc(&efx->rx_reset); | |
881 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? | |
882 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | |
883 | handled = true; | |
884 | } | |
885 | ||
886 | if (!handled) | |
887 | EFX_ERR(efx, "channel %d unknown global event " | |
888 | EFX_QWORD_FMT "\n", channel->channel, | |
889 | EFX_QWORD_VAL(*event)); | |
890 | } | |
891 | ||
892 | static void | |
893 | efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |
894 | { | |
895 | struct efx_nic *efx = channel->efx; | |
896 | unsigned int ev_sub_code; | |
897 | unsigned int ev_sub_data; | |
898 | ||
899 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); | |
900 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | |
901 | ||
902 | switch (ev_sub_code) { | |
903 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: | |
904 | EFX_TRACE(efx, "channel %d TXQ %d flushed\n", | |
905 | channel->channel, ev_sub_data); | |
906 | break; | |
907 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | |
908 | EFX_TRACE(efx, "channel %d RXQ %d flushed\n", | |
909 | channel->channel, ev_sub_data); | |
910 | break; | |
911 | case FSE_AZ_EVQ_INIT_DONE_EV: | |
912 | EFX_LOG(efx, "channel %d EVQ %d initialised\n", | |
913 | channel->channel, ev_sub_data); | |
914 | break; | |
915 | case FSE_AZ_SRM_UPD_DONE_EV: | |
916 | EFX_TRACE(efx, "channel %d SRAM update done\n", | |
917 | channel->channel); | |
918 | break; | |
919 | case FSE_AZ_WAKE_UP_EV: | |
920 | EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", | |
921 | channel->channel, ev_sub_data); | |
922 | break; | |
923 | case FSE_AZ_TIMER_EV: | |
924 | EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", | |
925 | channel->channel, ev_sub_data); | |
926 | break; | |
927 | case FSE_AA_RX_RECOVER_EV: | |
928 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " | |
929 | "Resetting.\n", channel->channel); | |
930 | atomic_inc(&efx->rx_reset); | |
931 | efx_schedule_reset(efx, | |
932 | EFX_WORKAROUND_6555(efx) ? | |
933 | RESET_TYPE_RX_RECOVERY : | |
934 | RESET_TYPE_DISABLE); | |
935 | break; | |
936 | case FSE_BZ_RX_DSC_ERROR_EV: | |
937 | EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." | |
938 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | |
939 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | |
940 | break; | |
941 | case FSE_BZ_TX_DSC_ERROR_EV: | |
942 | EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." | |
943 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | |
944 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | |
945 | break; | |
946 | default: | |
947 | EFX_TRACE(efx, "channel %d unknown driver event code %d " | |
948 | "data %04x\n", channel->channel, ev_sub_code, | |
949 | ev_sub_data); | |
950 | break; | |
951 | } | |
952 | } | |
953 | ||
954 | int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota) | |
955 | { | |
956 | unsigned int read_ptr; | |
957 | efx_qword_t event, *p_event; | |
958 | int ev_code; | |
959 | int rx_packets = 0; | |
960 | ||
961 | read_ptr = channel->eventq_read_ptr; | |
962 | ||
963 | do { | |
964 | p_event = efx_event(channel, read_ptr); | |
965 | event = *p_event; | |
966 | ||
967 | if (!efx_event_present(&event)) | |
968 | /* End of events */ | |
969 | break; | |
970 | ||
971 | EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n", | |
972 | channel->channel, EFX_QWORD_VAL(event)); | |
973 | ||
974 | /* Clear this event by marking it all ones */ | |
975 | EFX_SET_QWORD(*p_event); | |
976 | ||
977 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | |
978 | ||
979 | switch (ev_code) { | |
980 | case FSE_AZ_EV_CODE_RX_EV: | |
981 | efx_handle_rx_event(channel, &event); | |
982 | ++rx_packets; | |
983 | break; | |
984 | case FSE_AZ_EV_CODE_TX_EV: | |
985 | efx_handle_tx_event(channel, &event); | |
986 | break; | |
987 | case FSE_AZ_EV_CODE_DRV_GEN_EV: | |
988 | channel->eventq_magic = EFX_QWORD_FIELD( | |
989 | event, FSF_AZ_DRV_GEN_EV_MAGIC); | |
990 | EFX_LOG(channel->efx, "channel %d received generated " | |
991 | "event "EFX_QWORD_FMT"\n", channel->channel, | |
992 | EFX_QWORD_VAL(event)); | |
993 | break; | |
994 | case FSE_AZ_EV_CODE_GLOBAL_EV: | |
995 | efx_handle_global_event(channel, &event); | |
996 | break; | |
997 | case FSE_AZ_EV_CODE_DRIVER_EV: | |
998 | efx_handle_driver_event(channel, &event); | |
999 | break; | |
1000 | default: | |
1001 | EFX_ERR(channel->efx, "channel %d unknown event type %d" | |
1002 | " (data " EFX_QWORD_FMT ")\n", channel->channel, | |
1003 | ev_code, EFX_QWORD_VAL(event)); | |
1004 | } | |
1005 | ||
1006 | /* Increment read pointer */ | |
1007 | read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; | |
1008 | ||
1009 | } while (rx_packets < rx_quota); | |
1010 | ||
1011 | channel->eventq_read_ptr = read_ptr; | |
1012 | return rx_packets; | |
1013 | } | |
1014 | ||
1015 | ||
1016 | /* Allocate buffer table entries for event queue */ | |
1017 | int efx_nic_probe_eventq(struct efx_channel *channel) | |
1018 | { | |
1019 | struct efx_nic *efx = channel->efx; | |
1020 | BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 || | |
1021 | EFX_EVQ_SIZE & EFX_EVQ_MASK); | |
1022 | return efx_alloc_special_buffer(efx, &channel->eventq, | |
1023 | EFX_EVQ_SIZE * sizeof(efx_qword_t)); | |
1024 | } | |
1025 | ||
1026 | void efx_nic_init_eventq(struct efx_channel *channel) | |
1027 | { | |
1028 | efx_oword_t evq_ptr; | |
1029 | struct efx_nic *efx = channel->efx; | |
1030 | ||
1031 | EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", | |
1032 | channel->channel, channel->eventq.index, | |
1033 | channel->eventq.index + channel->eventq.entries - 1); | |
1034 | ||
1035 | /* Pin event queue buffer */ | |
1036 | efx_init_special_buffer(efx, &channel->eventq); | |
1037 | ||
1038 | /* Fill event queue with all ones (i.e. empty events) */ | |
1039 | memset(channel->eventq.addr, 0xff, channel->eventq.len); | |
1040 | ||
1041 | /* Push event queue to card */ | |
1042 | EFX_POPULATE_OWORD_3(evq_ptr, | |
1043 | FRF_AZ_EVQ_EN, 1, | |
1044 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), | |
1045 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); | |
1046 | efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, | |
1047 | channel->channel); | |
1048 | ||
1049 | efx->type->push_irq_moderation(channel); | |
1050 | } | |
1051 | ||
1052 | void efx_nic_fini_eventq(struct efx_channel *channel) | |
1053 | { | |
1054 | efx_oword_t eventq_ptr; | |
1055 | struct efx_nic *efx = channel->efx; | |
1056 | ||
1057 | /* Remove event queue from card */ | |
1058 | EFX_ZERO_OWORD(eventq_ptr); | |
1059 | efx_writeo_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base, | |
1060 | channel->channel); | |
1061 | ||
1062 | /* Unpin event queue */ | |
1063 | efx_fini_special_buffer(efx, &channel->eventq); | |
1064 | } | |
1065 | ||
1066 | /* Free buffers backing event queue */ | |
1067 | void efx_nic_remove_eventq(struct efx_channel *channel) | |
1068 | { | |
1069 | efx_free_special_buffer(channel->efx, &channel->eventq); | |
1070 | } | |
1071 | ||
1072 | ||
1073 | /* Generates a test event on the event queue. A subsequent call to | |
1074 | * process_eventq() should pick up the event and place the value of | |
1075 | * "magic" into channel->eventq_magic; | |
1076 | */ | |
1077 | void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic) | |
1078 | { | |
1079 | efx_qword_t test_event; | |
1080 | ||
1081 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, | |
1082 | FSE_AZ_EV_CODE_DRV_GEN_EV, | |
1083 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | |
1084 | efx_generate_event(channel, &test_event); | |
1085 | } | |
1086 | ||
1087 | /************************************************************************** | |
1088 | * | |
1089 | * Flush handling | |
1090 | * | |
1091 | **************************************************************************/ | |
1092 | ||
1093 | ||
1094 | static void efx_poll_flush_events(struct efx_nic *efx) | |
1095 | { | |
1096 | struct efx_channel *channel = &efx->channel[0]; | |
1097 | struct efx_tx_queue *tx_queue; | |
1098 | struct efx_rx_queue *rx_queue; | |
1099 | unsigned int read_ptr = channel->eventq_read_ptr; | |
1100 | unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK; | |
1101 | ||
1102 | do { | |
1103 | efx_qword_t *event = efx_event(channel, read_ptr); | |
1104 | int ev_code, ev_sub_code, ev_queue; | |
1105 | bool ev_failed; | |
1106 | ||
1107 | if (!efx_event_present(event)) | |
1108 | break; | |
1109 | ||
1110 | ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); | |
1111 | ev_sub_code = EFX_QWORD_FIELD(*event, | |
1112 | FSF_AZ_DRIVER_EV_SUBCODE); | |
1113 | if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | |
1114 | ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { | |
1115 | ev_queue = EFX_QWORD_FIELD(*event, | |
1116 | FSF_AZ_DRIVER_EV_SUBDATA); | |
1117 | if (ev_queue < EFX_TX_QUEUE_COUNT) { | |
1118 | tx_queue = efx->tx_queue + ev_queue; | |
1119 | tx_queue->flushed = FLUSH_DONE; | |
1120 | } | |
1121 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | |
1122 | ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { | |
1123 | ev_queue = EFX_QWORD_FIELD( | |
1124 | *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); | |
1125 | ev_failed = EFX_QWORD_FIELD( | |
1126 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); | |
1127 | if (ev_queue < efx->n_rx_queues) { | |
1128 | rx_queue = efx->rx_queue + ev_queue; | |
1129 | rx_queue->flushed = | |
1130 | ev_failed ? FLUSH_FAILED : FLUSH_DONE; | |
1131 | } | |
1132 | } | |
1133 | ||
1134 | /* We're about to destroy the queue anyway, so | |
1135 | * it's ok to throw away every non-flush event */ | |
1136 | EFX_SET_QWORD(*event); | |
1137 | ||
1138 | read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; | |
1139 | } while (read_ptr != end_ptr); | |
1140 | ||
1141 | channel->eventq_read_ptr = read_ptr; | |
1142 | } | |
1143 | ||
1144 | /* Handle tx and rx flushes at the same time, since they run in | |
1145 | * parallel in the hardware and there's no reason for us to | |
1146 | * serialise them */ | |
1147 | int efx_nic_flush_queues(struct efx_nic *efx) | |
1148 | { | |
1149 | struct efx_rx_queue *rx_queue; | |
1150 | struct efx_tx_queue *tx_queue; | |
1151 | int i, tx_pending, rx_pending; | |
1152 | ||
1153 | /* If necessary prepare the hardware for flushing */ | |
1154 | efx->type->prepare_flush(efx); | |
1155 | ||
1156 | /* Flush all tx queues in parallel */ | |
1157 | efx_for_each_tx_queue(tx_queue, efx) | |
1158 | efx_flush_tx_queue(tx_queue); | |
1159 | ||
1160 | /* The hardware supports four concurrent rx flushes, each of which may | |
1161 | * need to be retried if there is an outstanding descriptor fetch */ | |
1162 | for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { | |
1163 | rx_pending = tx_pending = 0; | |
1164 | efx_for_each_rx_queue(rx_queue, efx) { | |
1165 | if (rx_queue->flushed == FLUSH_PENDING) | |
1166 | ++rx_pending; | |
1167 | } | |
1168 | efx_for_each_rx_queue(rx_queue, efx) { | |
1169 | if (rx_pending == EFX_RX_FLUSH_COUNT) | |
1170 | break; | |
1171 | if (rx_queue->flushed == FLUSH_FAILED || | |
1172 | rx_queue->flushed == FLUSH_NONE) { | |
1173 | efx_flush_rx_queue(rx_queue); | |
1174 | ++rx_pending; | |
1175 | } | |
1176 | } | |
1177 | efx_for_each_tx_queue(tx_queue, efx) { | |
1178 | if (tx_queue->flushed != FLUSH_DONE) | |
1179 | ++tx_pending; | |
1180 | } | |
1181 | ||
1182 | if (rx_pending == 0 && tx_pending == 0) | |
1183 | return 0; | |
1184 | ||
1185 | msleep(EFX_FLUSH_INTERVAL); | |
1186 | efx_poll_flush_events(efx); | |
1187 | } | |
1188 | ||
1189 | /* Mark the queues as all flushed. We're going to return failure | |
1190 | * leading to a reset, or fake up success anyway */ | |
1191 | efx_for_each_tx_queue(tx_queue, efx) { | |
1192 | if (tx_queue->flushed != FLUSH_DONE) | |
1193 | EFX_ERR(efx, "tx queue %d flush command timed out\n", | |
1194 | tx_queue->queue); | |
1195 | tx_queue->flushed = FLUSH_DONE; | |
1196 | } | |
1197 | efx_for_each_rx_queue(rx_queue, efx) { | |
1198 | if (rx_queue->flushed != FLUSH_DONE) | |
1199 | EFX_ERR(efx, "rx queue %d flush command timed out\n", | |
1200 | rx_queue->queue); | |
1201 | rx_queue->flushed = FLUSH_DONE; | |
1202 | } | |
1203 | ||
1204 | if (EFX_WORKAROUND_7803(efx)) | |
1205 | return 0; | |
1206 | ||
1207 | return -ETIMEDOUT; | |
1208 | } | |
1209 | ||
1210 | /************************************************************************** | |
1211 | * | |
1212 | * Hardware interrupts | |
1213 | * The hardware interrupt handler does very little work; all the event | |
1214 | * queue processing is carried out by per-channel tasklets. | |
1215 | * | |
1216 | **************************************************************************/ | |
1217 | ||
1218 | /* Enable/disable/generate interrupts */ | |
1219 | static inline void efx_nic_interrupts(struct efx_nic *efx, | |
1220 | bool enabled, bool force) | |
1221 | { | |
1222 | efx_oword_t int_en_reg_ker; | |
1223 | ||
1224 | EFX_POPULATE_OWORD_2(int_en_reg_ker, | |
1225 | FRF_AZ_KER_INT_KER, force, | |
1226 | FRF_AZ_DRV_INT_EN_KER, enabled); | |
1227 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); | |
1228 | } | |
1229 | ||
1230 | void efx_nic_enable_interrupts(struct efx_nic *efx) | |
1231 | { | |
1232 | struct efx_channel *channel; | |
1233 | ||
1234 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); | |
1235 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ | |
1236 | ||
1237 | /* Enable interrupts */ | |
1238 | efx_nic_interrupts(efx, true, false); | |
1239 | ||
1240 | /* Force processing of all the channels to get the EVQ RPTRs up to | |
1241 | date */ | |
1242 | efx_for_each_channel(channel, efx) | |
1243 | efx_schedule_channel(channel); | |
1244 | } | |
1245 | ||
1246 | void efx_nic_disable_interrupts(struct efx_nic *efx) | |
1247 | { | |
1248 | /* Disable interrupts */ | |
1249 | efx_nic_interrupts(efx, false, false); | |
1250 | } | |
1251 | ||
1252 | /* Generate a test interrupt | |
1253 | * Interrupt must already have been enabled, otherwise nasty things | |
1254 | * may happen. | |
1255 | */ | |
1256 | void efx_nic_generate_interrupt(struct efx_nic *efx) | |
1257 | { | |
1258 | efx_nic_interrupts(efx, true, true); | |
1259 | } | |
1260 | ||
1261 | /* Process a fatal interrupt | |
1262 | * Disable bus mastering ASAP and schedule a reset | |
1263 | */ | |
1264 | irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | |
1265 | { | |
1266 | struct falcon_nic_data *nic_data = efx->nic_data; | |
1267 | efx_oword_t *int_ker = efx->irq_status.addr; | |
1268 | efx_oword_t fatal_intr; | |
1269 | int error, mem_perr; | |
1270 | ||
1271 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); | |
1272 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); | |
1273 | ||
1274 | EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " | |
1275 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | |
1276 | EFX_OWORD_VAL(fatal_intr), | |
1277 | error ? "disabling bus mastering" : "no recognised error"); | |
1278 | if (error == 0) | |
1279 | goto out; | |
1280 | ||
1281 | /* If this is a memory parity error dump which blocks are offending */ | |
1282 | mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER); | |
1283 | if (mem_perr) { | |
1284 | efx_oword_t reg; | |
1285 | efx_reado(efx, ®, FR_AZ_MEM_STAT); | |
1286 | EFX_ERR(efx, "SYSTEM ERROR: memory parity error " | |
1287 | EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); | |
1288 | } | |
1289 | ||
1290 | /* Disable both devices */ | |
1291 | pci_clear_master(efx->pci_dev); | |
1292 | if (efx_nic_is_dual_func(efx)) | |
1293 | pci_clear_master(nic_data->pci_dev2); | |
1294 | efx_nic_disable_interrupts(efx); | |
1295 | ||
1296 | /* Count errors and reset or disable the NIC accordingly */ | |
1297 | if (efx->int_error_count == 0 || | |
1298 | time_after(jiffies, efx->int_error_expire)) { | |
1299 | efx->int_error_count = 0; | |
1300 | efx->int_error_expire = | |
1301 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; | |
1302 | } | |
1303 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { | |
1304 | EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); | |
1305 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | |
1306 | } else { | |
1307 | EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen." | |
1308 | "NIC will be disabled\n"); | |
1309 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | |
1310 | } | |
1311 | out: | |
1312 | return IRQ_HANDLED; | |
1313 | } | |
1314 | ||
1315 | /* Handle a legacy interrupt | |
1316 | * Acknowledges the interrupt and schedule event queue processing. | |
1317 | */ | |
1318 | static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | |
1319 | { | |
1320 | struct efx_nic *efx = dev_id; | |
1321 | efx_oword_t *int_ker = efx->irq_status.addr; | |
1322 | irqreturn_t result = IRQ_NONE; | |
1323 | struct efx_channel *channel; | |
1324 | efx_dword_t reg; | |
1325 | u32 queues; | |
1326 | int syserr; | |
1327 | ||
1328 | /* Read the ISR which also ACKs the interrupts */ | |
1329 | efx_readd(efx, ®, FR_BZ_INT_ISR0); | |
1330 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | |
1331 | ||
1332 | /* Check to see if we have a serious error condition */ | |
1333 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | |
1334 | if (unlikely(syserr)) | |
1335 | return efx_nic_fatal_interrupt(efx); | |
1336 | ||
1337 | /* Schedule processing of any interrupting queues */ | |
1338 | efx_for_each_channel(channel, efx) { | |
1339 | if ((queues & 1) || | |
1340 | efx_event_present( | |
1341 | efx_event(channel, channel->eventq_read_ptr))) { | |
1342 | efx_schedule_channel(channel); | |
1343 | result = IRQ_HANDLED; | |
1344 | } | |
1345 | queues >>= 1; | |
1346 | } | |
1347 | ||
1348 | if (result == IRQ_HANDLED) { | |
1349 | efx->last_irq_cpu = raw_smp_processor_id(); | |
1350 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | |
1351 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | |
1352 | } | |
1353 | ||
1354 | return result; | |
1355 | } | |
1356 | ||
1357 | /* Handle an MSI interrupt | |
1358 | * | |
1359 | * Handle an MSI hardware interrupt. This routine schedules event | |
1360 | * queue processing. No interrupt acknowledgement cycle is necessary. | |
1361 | * Also, we never need to check that the interrupt is for us, since | |
1362 | * MSI interrupts cannot be shared. | |
1363 | */ | |
1364 | static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) | |
1365 | { | |
1366 | struct efx_channel *channel = dev_id; | |
1367 | struct efx_nic *efx = channel->efx; | |
1368 | efx_oword_t *int_ker = efx->irq_status.addr; | |
1369 | int syserr; | |
1370 | ||
1371 | efx->last_irq_cpu = raw_smp_processor_id(); | |
1372 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | |
1373 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | |
1374 | ||
1375 | /* Check to see if we have a serious error condition */ | |
1376 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | |
1377 | if (unlikely(syserr)) | |
1378 | return efx_nic_fatal_interrupt(efx); | |
1379 | ||
1380 | /* Schedule processing of the channel */ | |
1381 | efx_schedule_channel(channel); | |
1382 | ||
1383 | return IRQ_HANDLED; | |
1384 | } | |
1385 | ||
1386 | ||
1387 | /* Setup RSS indirection table. | |
1388 | * This maps from the hash value of the packet to RXQ | |
1389 | */ | |
1390 | static void efx_setup_rss_indir_table(struct efx_nic *efx) | |
1391 | { | |
1392 | int i = 0; | |
1393 | unsigned long offset; | |
1394 | efx_dword_t dword; | |
1395 | ||
1396 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | |
1397 | return; | |
1398 | ||
1399 | for (offset = FR_BZ_RX_INDIRECTION_TBL; | |
1400 | offset < FR_BZ_RX_INDIRECTION_TBL + 0x800; | |
1401 | offset += 0x10) { | |
1402 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, | |
1403 | i % efx->n_rx_queues); | |
1404 | efx_writed(efx, &dword, offset); | |
1405 | i++; | |
1406 | } | |
1407 | } | |
1408 | ||
1409 | /* Hook interrupt handler(s) | |
1410 | * Try MSI and then legacy interrupts. | |
1411 | */ | |
1412 | int efx_nic_init_interrupt(struct efx_nic *efx) | |
1413 | { | |
1414 | struct efx_channel *channel; | |
1415 | int rc; | |
1416 | ||
1417 | if (!EFX_INT_MODE_USE_MSI(efx)) { | |
1418 | irq_handler_t handler; | |
1419 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | |
1420 | handler = efx_legacy_interrupt; | |
1421 | else | |
1422 | handler = falcon_legacy_interrupt_a1; | |
1423 | ||
1424 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, | |
1425 | efx->name, efx); | |
1426 | if (rc) { | |
1427 | EFX_ERR(efx, "failed to hook legacy IRQ %d\n", | |
1428 | efx->pci_dev->irq); | |
1429 | goto fail1; | |
1430 | } | |
1431 | return 0; | |
1432 | } | |
1433 | ||
1434 | /* Hook MSI or MSI-X interrupt */ | |
1435 | efx_for_each_channel(channel, efx) { | |
1436 | rc = request_irq(channel->irq, efx_msi_interrupt, | |
1437 | IRQF_PROBE_SHARED, /* Not shared */ | |
1438 | channel->name, channel); | |
1439 | if (rc) { | |
1440 | EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq); | |
1441 | goto fail2; | |
1442 | } | |
1443 | } | |
1444 | ||
1445 | return 0; | |
1446 | ||
1447 | fail2: | |
1448 | efx_for_each_channel(channel, efx) | |
1449 | free_irq(channel->irq, channel); | |
1450 | fail1: | |
1451 | return rc; | |
1452 | } | |
1453 | ||
1454 | void efx_nic_fini_interrupt(struct efx_nic *efx) | |
1455 | { | |
1456 | struct efx_channel *channel; | |
1457 | efx_oword_t reg; | |
1458 | ||
1459 | /* Disable MSI/MSI-X interrupts */ | |
1460 | efx_for_each_channel(channel, efx) { | |
1461 | if (channel->irq) | |
1462 | free_irq(channel->irq, channel); | |
1463 | } | |
1464 | ||
1465 | /* ACK legacy interrupt */ | |
1466 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | |
1467 | efx_reado(efx, ®, FR_BZ_INT_ISR0); | |
1468 | else | |
1469 | falcon_irq_ack_a1(efx); | |
1470 | ||
1471 | /* Disable legacy interrupt */ | |
1472 | if (efx->legacy_irq) | |
1473 | free_irq(efx->legacy_irq, efx); | |
1474 | } | |
1475 | ||
1476 | u32 efx_nic_fpga_ver(struct efx_nic *efx) | |
1477 | { | |
1478 | efx_oword_t altera_build; | |
1479 | efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); | |
1480 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); | |
1481 | } | |
1482 | ||
1483 | void efx_nic_init_common(struct efx_nic *efx) | |
1484 | { | |
1485 | efx_oword_t temp; | |
1486 | ||
1487 | /* Set positions of descriptor caches in SRAM. */ | |
1488 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, | |
1489 | efx->type->tx_dc_base / 8); | |
1490 | efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); | |
1491 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, | |
1492 | efx->type->rx_dc_base / 8); | |
1493 | efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); | |
1494 | ||
1495 | /* Set TX descriptor cache size. */ | |
1496 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); | |
1497 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | |
1498 | efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); | |
1499 | ||
1500 | /* Set RX descriptor cache size. Set low watermark to size-8, as | |
1501 | * this allows most efficient prefetching. | |
1502 | */ | |
1503 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); | |
1504 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | |
1505 | efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); | |
1506 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | |
1507 | efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); | |
1508 | ||
1509 | /* Program INT_KER address */ | |
1510 | EFX_POPULATE_OWORD_2(temp, | |
1511 | FRF_AZ_NORM_INT_VEC_DIS_KER, | |
1512 | EFX_INT_MODE_USE_MSI(efx), | |
1513 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); | |
1514 | efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); | |
1515 | ||
1516 | /* Enable all the genuinely fatal interrupts. (They are still | |
1517 | * masked by the overall interrupt mask, controlled by | |
1518 | * falcon_interrupts()). | |
1519 | * | |
1520 | * Note: All other fatal interrupts are enabled | |
1521 | */ | |
1522 | EFX_POPULATE_OWORD_3(temp, | |
1523 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, | |
1524 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, | |
1525 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); | |
1526 | EFX_INVERT_OWORD(temp); | |
1527 | efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); | |
1528 | ||
1529 | efx_setup_rss_indir_table(efx); | |
1530 | ||
1531 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | |
1532 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | |
1533 | */ | |
1534 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | |
1535 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); | |
1536 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); | |
1537 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); | |
1538 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0); | |
1539 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); | |
1540 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | |
1541 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | |
1542 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | |
1543 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); | |
1544 | /* Squash TX of packets of 16 bytes or less */ | |
1545 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | |
1546 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | |
1547 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | |
1548 | } |