1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include "net_driver.h"
22 #include "workarounds.h"
24 /**************************************************************************
28 **************************************************************************
31 /* This is set to 16 for a good reason. In summary, if larger than
32 * 16, the descriptor cache holds more than a default socket
33 * buffer's worth of packets (for UDP we can only have at most one
34 * socket buffer's worth outstanding). This combined with the fact
35 * that we only get 1 TX event per descriptor cache means the NIC
38 #define TX_DC_ENTRIES 16
39 #define TX_DC_ENTRIES_ORDER 1
41 #define RX_DC_ENTRIES 64
42 #define RX_DC_ENTRIES_ORDER 3
44 /* RX FIFO XOFF watermark
46 * When the amount of the RX FIFO increases used increases past this
47 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
48 * This also has an effect on RX/TX arbitration
50 int efx_nic_rx_xoff_thresh
= -1;
51 module_param_named(rx_xoff_thresh_bytes
, efx_nic_rx_xoff_thresh
, int, 0644);
52 MODULE_PARM_DESC(rx_xoff_thresh_bytes
, "RX fifo XOFF threshold");
54 /* RX FIFO XON watermark
56 * When the amount of the RX FIFO used decreases below this
57 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
58 * This also has an effect on RX/TX arbitration
60 int efx_nic_rx_xon_thresh
= -1;
61 module_param_named(rx_xon_thresh_bytes
, efx_nic_rx_xon_thresh
, int, 0644);
62 MODULE_PARM_DESC(rx_xon_thresh_bytes
, "RX fifo XON threshold");
64 /* If EFX_MAX_INT_ERRORS internal errors occur within
65 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
68 #define EFX_INT_ERROR_EXPIRE 3600
69 #define EFX_MAX_INT_ERRORS 5
71 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
73 #define EFX_FLUSH_INTERVAL 10
74 #define EFX_FLUSH_POLL_COUNT 100
76 /* Size and alignment of special buffers (4KB) */
77 #define EFX_BUF_SIZE 4096
79 /* Depth of RX flush request fifo */
80 #define EFX_RX_FLUSH_COUNT 4
82 /* Generated event code for efx_generate_test_event() */
83 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
84 (0x00010100 + (_channel)->channel)
86 /* Generated event code for efx_generate_fill_event() */
87 #define EFX_CHANNEL_MAGIC_FILL(_channel) \
88 (0x00010200 + (_channel)->channel)
90 /**************************************************************************
92 * Solarstorm hardware access
94 **************************************************************************/
96 static inline void efx_write_buf_tbl(struct efx_nic
*efx
, efx_qword_t
*value
,
99 efx_sram_writeq(efx
, efx
->membase
+ efx
->type
->buf_tbl_base
,
103 /* Read the current event from the event queue */
104 static inline efx_qword_t
*efx_event(struct efx_channel
*channel
,
107 return ((efx_qword_t
*) (channel
->eventq
.addr
)) + index
;
110 /* See if an event is present
112 * We check both the high and low dword of the event for all ones. We
113 * wrote all ones when we cleared the event, and no valid event can
114 * have all ones in either its high or low dwords. This approach is
115 * robust against reordering.
117 * Note that using a single 64-bit comparison is incorrect; even
118 * though the CPU read will be atomic, the DMA write may not be.
120 static inline int efx_event_present(efx_qword_t
*event
)
122 return !(EFX_DWORD_IS_ALL_ONES(event
->dword
[0]) |
123 EFX_DWORD_IS_ALL_ONES(event
->dword
[1]));
126 static bool efx_masked_compare_oword(const efx_oword_t
*a
, const efx_oword_t
*b
,
127 const efx_oword_t
*mask
)
129 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
130 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
133 int efx_nic_test_registers(struct efx_nic
*efx
,
134 const struct efx_nic_register_test
*regs
,
137 unsigned address
= 0, i
, j
;
138 efx_oword_t mask
, imask
, original
, reg
, buf
;
140 /* Falcon should be in loopback to isolate the XMAC from the PHY */
141 WARN_ON(!LOOPBACK_INTERNAL(efx
));
143 for (i
= 0; i
< n_regs
; ++i
) {
144 address
= regs
[i
].address
;
145 mask
= imask
= regs
[i
].mask
;
146 EFX_INVERT_OWORD(imask
);
148 efx_reado(efx
, &original
, address
);
150 /* bit sweep on and off */
151 for (j
= 0; j
< 128; j
++) {
152 if (!EFX_EXTRACT_OWORD32(mask
, j
, j
))
155 /* Test this testable bit can be set in isolation */
156 EFX_AND_OWORD(reg
, original
, mask
);
157 EFX_SET_OWORD32(reg
, j
, j
, 1);
159 efx_writeo(efx
, ®
, address
);
160 efx_reado(efx
, &buf
, address
);
162 if (efx_masked_compare_oword(®
, &buf
, &mask
))
165 /* Test this testable bit can be cleared in isolation */
166 EFX_OR_OWORD(reg
, original
, mask
);
167 EFX_SET_OWORD32(reg
, j
, j
, 0);
169 efx_writeo(efx
, ®
, address
);
170 efx_reado(efx
, &buf
, address
);
172 if (efx_masked_compare_oword(®
, &buf
, &mask
))
176 efx_writeo(efx
, &original
, address
);
182 netif_err(efx
, hw
, efx
->net_dev
,
183 "wrote "EFX_OWORD_FMT
" read "EFX_OWORD_FMT
184 " at address 0x%x mask "EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
),
185 EFX_OWORD_VAL(buf
), address
, EFX_OWORD_VAL(mask
));
189 /**************************************************************************
191 * Special buffer handling
192 * Special buffers are used for event queues and the TX and RX
195 *************************************************************************/
198 * Initialise a special buffer
200 * This will define a buffer (previously allocated via
201 * efx_alloc_special_buffer()) in the buffer table, allowing
202 * it to be used for event queues, descriptor rings etc.
205 efx_init_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
207 efx_qword_t buf_desc
;
212 EFX_BUG_ON_PARANOID(!buffer
->addr
);
214 /* Write buffer descriptors to NIC */
215 for (i
= 0; i
< buffer
->entries
; i
++) {
216 index
= buffer
->index
+ i
;
217 dma_addr
= buffer
->dma_addr
+ (i
* 4096);
218 netif_dbg(efx
, probe
, efx
->net_dev
,
219 "mapping special buffer %d at %llx\n",
220 index
, (unsigned long long)dma_addr
);
221 EFX_POPULATE_QWORD_3(buf_desc
,
222 FRF_AZ_BUF_ADR_REGION
, 0,
223 FRF_AZ_BUF_ADR_FBUF
, dma_addr
>> 12,
224 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
225 efx_write_buf_tbl(efx
, &buf_desc
, index
);
229 /* Unmaps a buffer and clears the buffer table entries */
231 efx_fini_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
233 efx_oword_t buf_tbl_upd
;
234 unsigned int start
= buffer
->index
;
235 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
237 if (!buffer
->entries
)
240 netif_dbg(efx
, hw
, efx
->net_dev
, "unmapping special buffers %d-%d\n",
241 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
243 EFX_POPULATE_OWORD_4(buf_tbl_upd
,
244 FRF_AZ_BUF_UPD_CMD
, 0,
245 FRF_AZ_BUF_CLR_CMD
, 1,
246 FRF_AZ_BUF_CLR_END_ID
, end
,
247 FRF_AZ_BUF_CLR_START_ID
, start
);
248 efx_writeo(efx
, &buf_tbl_upd
, FR_AZ_BUF_TBL_UPD
);
252 * Allocate a new special buffer
254 * This allocates memory for a new buffer, clears it and allocates a
255 * new buffer ID range. It does not write into the buffer table.
257 * This call will allocate 4KB buffers, since 8KB buffers can't be
258 * used for event queues and descriptor rings.
260 static int efx_alloc_special_buffer(struct efx_nic
*efx
,
261 struct efx_special_buffer
*buffer
,
264 len
= ALIGN(len
, EFX_BUF_SIZE
);
266 buffer
->addr
= dma_alloc_coherent(&efx
->pci_dev
->dev
, len
,
267 &buffer
->dma_addr
, GFP_KERNEL
);
271 buffer
->entries
= len
/ EFX_BUF_SIZE
;
272 BUG_ON(buffer
->dma_addr
& (EFX_BUF_SIZE
- 1));
274 /* All zeros is a potentially valid event so memset to 0xff */
275 memset(buffer
->addr
, 0xff, len
);
277 /* Select new buffer ID */
278 buffer
->index
= efx
->next_buffer_table
;
279 efx
->next_buffer_table
+= buffer
->entries
;
281 netif_dbg(efx
, probe
, efx
->net_dev
,
282 "allocating special buffers %d-%d at %llx+%x "
283 "(virt %p phys %llx)\n", buffer
->index
,
284 buffer
->index
+ buffer
->entries
- 1,
285 (u64
)buffer
->dma_addr
, len
,
286 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
292 efx_free_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
297 netif_dbg(efx
, hw
, efx
->net_dev
,
298 "deallocating special buffers %d-%d at %llx+%x "
299 "(virt %p phys %llx)\n", buffer
->index
,
300 buffer
->index
+ buffer
->entries
- 1,
301 (u64
)buffer
->dma_addr
, buffer
->len
,
302 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
304 dma_free_coherent(&efx
->pci_dev
->dev
, buffer
->len
, buffer
->addr
,
310 /**************************************************************************
312 * Generic buffer handling
313 * These buffers are used for interrupt status and MAC stats
315 **************************************************************************/
317 int efx_nic_alloc_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
,
320 buffer
->addr
= pci_alloc_consistent(efx
->pci_dev
, len
,
325 memset(buffer
->addr
, 0, len
);
329 void efx_nic_free_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
)
332 pci_free_consistent(efx
->pci_dev
, buffer
->len
,
333 buffer
->addr
, buffer
->dma_addr
);
338 /**************************************************************************
342 **************************************************************************/
344 /* Returns a pointer to the specified transmit descriptor in the TX
345 * descriptor queue belonging to the specified channel.
347 static inline efx_qword_t
*
348 efx_tx_desc(struct efx_tx_queue
*tx_queue
, unsigned int index
)
350 return ((efx_qword_t
*) (tx_queue
->txd
.addr
)) + index
;
353 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
354 static inline void efx_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
359 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
360 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_TX_DESC_WPTR_DWORD
, write_ptr
);
361 efx_writed_page(tx_queue
->efx
, ®
,
362 FR_AZ_TX_DESC_UPD_DWORD_P0
, tx_queue
->queue
);
366 /* For each entry inserted into the software descriptor ring, create a
367 * descriptor in the hardware TX descriptor ring (in host memory), and
370 void efx_nic_push_buffers(struct efx_tx_queue
*tx_queue
)
373 struct efx_tx_buffer
*buffer
;
377 BUG_ON(tx_queue
->write_count
== tx_queue
->insert_count
);
380 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
381 buffer
= &tx_queue
->buffer
[write_ptr
];
382 txd
= efx_tx_desc(tx_queue
, write_ptr
);
383 ++tx_queue
->write_count
;
385 /* Create TX descriptor ring entry */
386 EFX_POPULATE_QWORD_4(*txd
,
387 FSF_AZ_TX_KER_CONT
, buffer
->continuation
,
388 FSF_AZ_TX_KER_BYTE_COUNT
, buffer
->len
,
389 FSF_AZ_TX_KER_BUF_REGION
, 0,
390 FSF_AZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
391 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
393 wmb(); /* Ensure descriptors are written before they are fetched */
394 efx_notify_tx_desc(tx_queue
);
397 /* Allocate hardware resources for a TX queue */
398 int efx_nic_probe_tx(struct efx_tx_queue
*tx_queue
)
400 struct efx_nic
*efx
= tx_queue
->efx
;
403 entries
= tx_queue
->ptr_mask
+ 1;
404 return efx_alloc_special_buffer(efx
, &tx_queue
->txd
,
405 entries
* sizeof(efx_qword_t
));
408 void efx_nic_init_tx(struct efx_tx_queue
*tx_queue
)
410 efx_oword_t tx_desc_ptr
;
411 struct efx_nic
*efx
= tx_queue
->efx
;
413 tx_queue
->flushed
= FLUSH_NONE
;
415 /* Pin TX descriptor ring */
416 efx_init_special_buffer(efx
, &tx_queue
->txd
);
418 /* Push TX descriptor ring to card */
419 EFX_POPULATE_OWORD_10(tx_desc_ptr
,
420 FRF_AZ_TX_DESCQ_EN
, 1,
421 FRF_AZ_TX_ISCSI_DDIG_EN
, 0,
422 FRF_AZ_TX_ISCSI_HDIG_EN
, 0,
423 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
424 FRF_AZ_TX_DESCQ_EVQ_ID
,
425 tx_queue
->channel
->channel
,
426 FRF_AZ_TX_DESCQ_OWNER_ID
, 0,
427 FRF_AZ_TX_DESCQ_LABEL
, tx_queue
->queue
,
428 FRF_AZ_TX_DESCQ_SIZE
,
429 __ffs(tx_queue
->txd
.entries
),
430 FRF_AZ_TX_DESCQ_TYPE
, 0,
431 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
433 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
434 int csum
= tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
;
435 EFX_SET_OWORD_FIELD(tx_desc_ptr
, FRF_BZ_TX_IP_CHKSM_DIS
, !csum
);
436 EFX_SET_OWORD_FIELD(tx_desc_ptr
, FRF_BZ_TX_TCP_CHKSM_DIS
,
440 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
443 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
) {
446 /* Only 128 bits in this register */
447 BUILD_BUG_ON(EFX_MAX_TX_QUEUES
> 128);
449 efx_reado(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
450 if (tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
)
451 clear_bit_le(tx_queue
->queue
, (void *)®
);
453 set_bit_le(tx_queue
->queue
, (void *)®
);
454 efx_writeo(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
458 static void efx_flush_tx_queue(struct efx_tx_queue
*tx_queue
)
460 struct efx_nic
*efx
= tx_queue
->efx
;
461 efx_oword_t tx_flush_descq
;
463 tx_queue
->flushed
= FLUSH_PENDING
;
465 /* Post a flush command */
466 EFX_POPULATE_OWORD_2(tx_flush_descq
,
467 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
468 FRF_AZ_TX_FLUSH_DESCQ
, tx_queue
->queue
);
469 efx_writeo(efx
, &tx_flush_descq
, FR_AZ_TX_FLUSH_DESCQ
);
472 void efx_nic_fini_tx(struct efx_tx_queue
*tx_queue
)
474 struct efx_nic
*efx
= tx_queue
->efx
;
475 efx_oword_t tx_desc_ptr
;
477 /* The queue should have been flushed */
478 WARN_ON(tx_queue
->flushed
!= FLUSH_DONE
);
480 /* Remove TX descriptor ring from card */
481 EFX_ZERO_OWORD(tx_desc_ptr
);
482 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
485 /* Unpin TX descriptor ring */
486 efx_fini_special_buffer(efx
, &tx_queue
->txd
);
489 /* Free buffers backing TX queue */
490 void efx_nic_remove_tx(struct efx_tx_queue
*tx_queue
)
492 efx_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
495 /**************************************************************************
499 **************************************************************************/
501 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
502 static inline efx_qword_t
*
503 efx_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned int index
)
505 return ((efx_qword_t
*) (rx_queue
->rxd
.addr
)) + index
;
508 /* This creates an entry in the RX descriptor queue */
510 efx_build_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned index
)
512 struct efx_rx_buffer
*rx_buf
;
515 rxd
= efx_rx_desc(rx_queue
, index
);
516 rx_buf
= efx_rx_buffer(rx_queue
, index
);
517 EFX_POPULATE_QWORD_3(*rxd
,
518 FSF_AZ_RX_KER_BUF_SIZE
,
520 rx_queue
->efx
->type
->rx_buffer_padding
,
521 FSF_AZ_RX_KER_BUF_REGION
, 0,
522 FSF_AZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
525 /* This writes to the RX_DESC_WPTR register for the specified receive
528 void efx_nic_notify_rx_desc(struct efx_rx_queue
*rx_queue
)
530 struct efx_nic
*efx
= rx_queue
->efx
;
534 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
537 rx_queue
->notified_count
& rx_queue
->ptr_mask
);
538 ++rx_queue
->notified_count
;
542 write_ptr
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
543 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_RX_DESC_WPTR_DWORD
, write_ptr
);
544 efx_writed_page(efx
, ®
, FR_AZ_RX_DESC_UPD_DWORD_P0
,
545 efx_rx_queue_index(rx_queue
));
548 int efx_nic_probe_rx(struct efx_rx_queue
*rx_queue
)
550 struct efx_nic
*efx
= rx_queue
->efx
;
553 entries
= rx_queue
->ptr_mask
+ 1;
554 return efx_alloc_special_buffer(efx
, &rx_queue
->rxd
,
555 entries
* sizeof(efx_qword_t
));
558 void efx_nic_init_rx(struct efx_rx_queue
*rx_queue
)
560 efx_oword_t rx_desc_ptr
;
561 struct efx_nic
*efx
= rx_queue
->efx
;
562 bool is_b0
= efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
;
563 bool iscsi_digest_en
= is_b0
;
565 netif_dbg(efx
, hw
, efx
->net_dev
,
566 "RX queue %d ring in special buffers %d-%d\n",
567 efx_rx_queue_index(rx_queue
), rx_queue
->rxd
.index
,
568 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
570 rx_queue
->flushed
= FLUSH_NONE
;
572 /* Pin RX descriptor ring */
573 efx_init_special_buffer(efx
, &rx_queue
->rxd
);
575 /* Push RX descriptor ring to card */
576 EFX_POPULATE_OWORD_10(rx_desc_ptr
,
577 FRF_AZ_RX_ISCSI_DDIG_EN
, iscsi_digest_en
,
578 FRF_AZ_RX_ISCSI_HDIG_EN
, iscsi_digest_en
,
579 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
580 FRF_AZ_RX_DESCQ_EVQ_ID
,
581 efx_rx_queue_channel(rx_queue
)->channel
,
582 FRF_AZ_RX_DESCQ_OWNER_ID
, 0,
583 FRF_AZ_RX_DESCQ_LABEL
,
584 efx_rx_queue_index(rx_queue
),
585 FRF_AZ_RX_DESCQ_SIZE
,
586 __ffs(rx_queue
->rxd
.entries
),
587 FRF_AZ_RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
588 /* For >=B0 this is scatter so disable */
589 FRF_AZ_RX_DESCQ_JUMBO
, !is_b0
,
590 FRF_AZ_RX_DESCQ_EN
, 1);
591 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
592 efx_rx_queue_index(rx_queue
));
595 static void efx_flush_rx_queue(struct efx_rx_queue
*rx_queue
)
597 struct efx_nic
*efx
= rx_queue
->efx
;
598 efx_oword_t rx_flush_descq
;
600 rx_queue
->flushed
= FLUSH_PENDING
;
602 /* Post a flush command */
603 EFX_POPULATE_OWORD_2(rx_flush_descq
,
604 FRF_AZ_RX_FLUSH_DESCQ_CMD
, 1,
605 FRF_AZ_RX_FLUSH_DESCQ
,
606 efx_rx_queue_index(rx_queue
));
607 efx_writeo(efx
, &rx_flush_descq
, FR_AZ_RX_FLUSH_DESCQ
);
610 void efx_nic_fini_rx(struct efx_rx_queue
*rx_queue
)
612 efx_oword_t rx_desc_ptr
;
613 struct efx_nic
*efx
= rx_queue
->efx
;
615 /* The queue should already have been flushed */
616 WARN_ON(rx_queue
->flushed
!= FLUSH_DONE
);
618 /* Remove RX descriptor ring from card */
619 EFX_ZERO_OWORD(rx_desc_ptr
);
620 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
621 efx_rx_queue_index(rx_queue
));
623 /* Unpin RX descriptor ring */
624 efx_fini_special_buffer(efx
, &rx_queue
->rxd
);
627 /* Free buffers backing RX queue */
628 void efx_nic_remove_rx(struct efx_rx_queue
*rx_queue
)
630 efx_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
633 /**************************************************************************
635 * Event queue processing
636 * Event queues are processed by per-channel tasklets.
638 **************************************************************************/
640 /* Update a channel's event queue's read pointer (RPTR) register
642 * This writes the EVQ_RPTR_REG register for the specified channel's
645 void efx_nic_eventq_read_ack(struct efx_channel
*channel
)
648 struct efx_nic
*efx
= channel
->efx
;
650 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_EVQ_RPTR
, channel
->eventq_read_ptr
);
651 efx_writed_table(efx
, ®
, efx
->type
->evq_rptr_tbl_base
,
655 /* Use HW to insert a SW defined event */
656 static void efx_generate_event(struct efx_channel
*channel
, efx_qword_t
*event
)
658 efx_oword_t drv_ev_reg
;
660 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN
!= 0 ||
661 FRF_AZ_DRV_EV_DATA_WIDTH
!= 64);
662 drv_ev_reg
.u32
[0] = event
->u32
[0];
663 drv_ev_reg
.u32
[1] = event
->u32
[1];
664 drv_ev_reg
.u32
[2] = 0;
665 drv_ev_reg
.u32
[3] = 0;
666 EFX_SET_OWORD_FIELD(drv_ev_reg
, FRF_AZ_DRV_EV_QID
, channel
->channel
);
667 efx_writeo(channel
->efx
, &drv_ev_reg
, FR_AZ_DRV_EV
);
670 /* Handle a transmit completion event
672 * The NIC batches TX completion events; the message we receive is of
673 * the form "complete all TX events up to this index".
676 efx_handle_tx_event(struct efx_channel
*channel
, efx_qword_t
*event
)
678 unsigned int tx_ev_desc_ptr
;
679 unsigned int tx_ev_q_label
;
680 struct efx_tx_queue
*tx_queue
;
681 struct efx_nic
*efx
= channel
->efx
;
684 if (likely(EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_COMP
))) {
685 /* Transmit completion */
686 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_DESC_PTR
);
687 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
688 tx_queue
= efx_channel_get_tx_queue(
689 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
690 tx_packets
= ((tx_ev_desc_ptr
- tx_queue
->read_count
) &
692 channel
->irq_mod_score
+= tx_packets
;
693 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
);
694 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_WQ_FF_FULL
)) {
695 /* Rewrite the FIFO write pointer */
696 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
697 tx_queue
= efx_channel_get_tx_queue(
698 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
700 if (efx_dev_registered(efx
))
701 netif_tx_lock(efx
->net_dev
);
702 efx_notify_tx_desc(tx_queue
);
703 if (efx_dev_registered(efx
))
704 netif_tx_unlock(efx
->net_dev
);
705 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_PKT_ERR
) &&
706 EFX_WORKAROUND_10727(efx
)) {
707 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
709 netif_err(efx
, tx_err
, efx
->net_dev
,
710 "channel %d unexpected TX event "
711 EFX_QWORD_FMT
"\n", channel
->channel
,
712 EFX_QWORD_VAL(*event
));
718 /* Detect errors included in the rx_evt_pkt_ok bit. */
719 static void efx_handle_rx_not_ok(struct efx_rx_queue
*rx_queue
,
720 const efx_qword_t
*event
,
724 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
725 struct efx_nic
*efx
= rx_queue
->efx
;
726 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
727 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
728 bool rx_ev_frm_trunc
, rx_ev_drib_nib
, rx_ev_tobe_disc
;
729 bool rx_ev_other_err
, rx_ev_pause_frm
;
730 bool rx_ev_hdr_type
, rx_ev_mcast_pkt
;
731 unsigned rx_ev_pkt_type
;
733 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
734 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
735 rx_ev_tobe_disc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_TOBE_DISC
);
736 rx_ev_pkt_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_TYPE
);
737 rx_ev_buf_owner_id_err
= EFX_QWORD_FIELD(*event
,
738 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR
);
739 rx_ev_ip_hdr_chksum_err
= EFX_QWORD_FIELD(*event
,
740 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR
);
741 rx_ev_tcp_udp_chksum_err
= EFX_QWORD_FIELD(*event
,
742 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR
);
743 rx_ev_eth_crc_err
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_ETH_CRC_ERR
);
744 rx_ev_frm_trunc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_FRM_TRUNC
);
745 rx_ev_drib_nib
= ((efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) ?
746 0 : EFX_QWORD_FIELD(*event
, FSF_AA_RX_EV_DRIB_NIB
));
747 rx_ev_pause_frm
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PAUSE_FRM_ERR
);
749 /* Every error apart from tobe_disc and pause_frm */
750 rx_ev_other_err
= (rx_ev_drib_nib
| rx_ev_tcp_udp_chksum_err
|
751 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
752 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
754 /* Count errors that are not in MAC stats. Ignore expected
755 * checksum errors during self-test. */
757 ++channel
->n_rx_frm_trunc
;
758 else if (rx_ev_tobe_disc
)
759 ++channel
->n_rx_tobe_disc
;
760 else if (!efx
->loopback_selftest
) {
761 if (rx_ev_ip_hdr_chksum_err
)
762 ++channel
->n_rx_ip_hdr_chksum_err
;
763 else if (rx_ev_tcp_udp_chksum_err
)
764 ++channel
->n_rx_tcp_udp_chksum_err
;
767 /* The frame must be discarded if any of these are true. */
768 *discard
= (rx_ev_eth_crc_err
| rx_ev_frm_trunc
| rx_ev_drib_nib
|
769 rx_ev_tobe_disc
| rx_ev_pause_frm
);
771 /* TOBE_DISC is expected on unicast mismatches; don't print out an
772 * error message. FRM_TRUNC indicates RXDP dropped the packet due
773 * to a FIFO overflow.
775 #ifdef EFX_ENABLE_DEBUG
776 if (rx_ev_other_err
&& net_ratelimit()) {
777 netif_dbg(efx
, rx_err
, efx
->net_dev
,
778 " RX queue %d unexpected RX event "
779 EFX_QWORD_FMT
"%s%s%s%s%s%s%s%s\n",
780 efx_rx_queue_index(rx_queue
), EFX_QWORD_VAL(*event
),
781 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
782 rx_ev_ip_hdr_chksum_err
?
783 " [IP_HDR_CHKSUM_ERR]" : "",
784 rx_ev_tcp_udp_chksum_err
?
785 " [TCP_UDP_CHKSUM_ERR]" : "",
786 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
787 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
788 rx_ev_drib_nib
? " [DRIB_NIB]" : "",
789 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
790 rx_ev_pause_frm
? " [PAUSE]" : "");
795 /* Handle receive events that are not in-order. */
797 efx_handle_rx_bad_index(struct efx_rx_queue
*rx_queue
, unsigned index
)
799 struct efx_nic
*efx
= rx_queue
->efx
;
800 unsigned expected
, dropped
;
802 expected
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
803 dropped
= (index
- expected
) & rx_queue
->ptr_mask
;
804 netif_info(efx
, rx_err
, efx
->net_dev
,
805 "dropped %d events (index=%d expected=%d)\n",
806 dropped
, index
, expected
);
808 efx_schedule_reset(efx
, EFX_WORKAROUND_5676(efx
) ?
809 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
812 /* Handle a packet received event
814 * The NIC gives a "discard" flag if it's a unicast packet with the
815 * wrong destination address
816 * Also "is multicast" and "matches multicast filter" flags can be used to
817 * discard non-matching multicast packets.
820 efx_handle_rx_event(struct efx_channel
*channel
, const efx_qword_t
*event
)
822 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
823 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
824 unsigned expected_ptr
;
825 bool rx_ev_pkt_ok
, discard
= false, checksummed
;
826 struct efx_rx_queue
*rx_queue
;
827 struct efx_nic
*efx
= channel
->efx
;
829 /* Basic packet information */
830 rx_ev_byte_cnt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_BYTE_CNT
);
831 rx_ev_pkt_ok
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_OK
);
832 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
833 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_JUMBO_CONT
));
834 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_SOP
) != 1);
835 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_Q_LABEL
) !=
838 rx_queue
= efx_channel_get_rx_queue(channel
);
840 rx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_DESC_PTR
);
841 expected_ptr
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
842 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
))
843 efx_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
);
845 if (likely(rx_ev_pkt_ok
)) {
846 /* If packet is marked as OK and packet type is TCP/IP or
847 * UDP/IP, then we can rely on the hardware checksum.
850 likely(efx
->rx_checksum_enabled
) &&
851 (rx_ev_hdr_type
== FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP
||
852 rx_ev_hdr_type
== FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP
);
854 efx_handle_rx_not_ok(rx_queue
, event
, &rx_ev_pkt_ok
, &discard
);
858 /* Detect multicast packets that didn't match the filter */
859 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
860 if (rx_ev_mcast_pkt
) {
861 unsigned int rx_ev_mcast_hash_match
=
862 EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_HASH_MATCH
);
864 if (unlikely(!rx_ev_mcast_hash_match
)) {
865 ++channel
->n_rx_mcast_mismatch
;
870 channel
->irq_mod_score
+= 2;
872 /* Handle received packet */
873 efx_rx_packet(rx_queue
, rx_ev_desc_ptr
, rx_ev_byte_cnt
,
874 checksummed
, discard
);
878 efx_handle_generated_event(struct efx_channel
*channel
, efx_qword_t
*event
)
880 struct efx_nic
*efx
= channel
->efx
;
883 code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRV_GEN_EV_MAGIC
);
884 if (code
== EFX_CHANNEL_MAGIC_TEST(channel
))
885 ++channel
->magic_count
;
886 else if (code
== EFX_CHANNEL_MAGIC_FILL(channel
))
887 /* The queue must be empty, so we won't receive any rx
888 * events, so efx_process_channel() won't refill the
889 * queue. Refill it here */
890 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel
));
892 netif_dbg(efx
, hw
, efx
->net_dev
, "channel %d received "
893 "generated event "EFX_QWORD_FMT
"\n",
894 channel
->channel
, EFX_QWORD_VAL(*event
));
897 /* Global events are basically PHY events */
899 efx_handle_global_event(struct efx_channel
*channel
, efx_qword_t
*event
)
901 struct efx_nic
*efx
= channel
->efx
;
902 bool handled
= false;
904 if (EFX_QWORD_FIELD(*event
, FSF_AB_GLB_EV_G_PHY0_INTR
) ||
905 EFX_QWORD_FIELD(*event
, FSF_AB_GLB_EV_XG_PHY0_INTR
) ||
906 EFX_QWORD_FIELD(*event
, FSF_AB_GLB_EV_XFP_PHY0_INTR
)) {
911 if ((efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) &&
912 EFX_QWORD_FIELD(*event
, FSF_BB_GLB_EV_XG_MGT_INTR
)) {
913 efx
->xmac_poll_required
= true;
917 if (efx_nic_rev(efx
) <= EFX_REV_FALCON_A1
?
918 EFX_QWORD_FIELD(*event
, FSF_AA_GLB_EV_RX_RECOVERY
) :
919 EFX_QWORD_FIELD(*event
, FSF_BB_GLB_EV_RX_RECOVERY
)) {
920 netif_err(efx
, rx_err
, efx
->net_dev
,
921 "channel %d seen global RX_RESET event. Resetting.\n",
924 atomic_inc(&efx
->rx_reset
);
925 efx_schedule_reset(efx
, EFX_WORKAROUND_6555(efx
) ?
926 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
931 netif_err(efx
, hw
, efx
->net_dev
,
932 "channel %d unknown global event "
933 EFX_QWORD_FMT
"\n", channel
->channel
,
934 EFX_QWORD_VAL(*event
));
938 efx_handle_driver_event(struct efx_channel
*channel
, efx_qword_t
*event
)
940 struct efx_nic
*efx
= channel
->efx
;
941 unsigned int ev_sub_code
;
942 unsigned int ev_sub_data
;
944 ev_sub_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBCODE
);
945 ev_sub_data
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
947 switch (ev_sub_code
) {
948 case FSE_AZ_TX_DESCQ_FLS_DONE_EV
:
949 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d TXQ %d flushed\n",
950 channel
->channel
, ev_sub_data
);
952 case FSE_AZ_RX_DESCQ_FLS_DONE_EV
:
953 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d RXQ %d flushed\n",
954 channel
->channel
, ev_sub_data
);
956 case FSE_AZ_EVQ_INIT_DONE_EV
:
957 netif_dbg(efx
, hw
, efx
->net_dev
,
958 "channel %d EVQ %d initialised\n",
959 channel
->channel
, ev_sub_data
);
961 case FSE_AZ_SRM_UPD_DONE_EV
:
962 netif_vdbg(efx
, hw
, efx
->net_dev
,
963 "channel %d SRAM update done\n", channel
->channel
);
965 case FSE_AZ_WAKE_UP_EV
:
966 netif_vdbg(efx
, hw
, efx
->net_dev
,
967 "channel %d RXQ %d wakeup event\n",
968 channel
->channel
, ev_sub_data
);
970 case FSE_AZ_TIMER_EV
:
971 netif_vdbg(efx
, hw
, efx
->net_dev
,
972 "channel %d RX queue %d timer expired\n",
973 channel
->channel
, ev_sub_data
);
975 case FSE_AA_RX_RECOVER_EV
:
976 netif_err(efx
, rx_err
, efx
->net_dev
,
977 "channel %d seen DRIVER RX_RESET event. "
978 "Resetting.\n", channel
->channel
);
979 atomic_inc(&efx
->rx_reset
);
980 efx_schedule_reset(efx
,
981 EFX_WORKAROUND_6555(efx
) ?
982 RESET_TYPE_RX_RECOVERY
:
985 case FSE_BZ_RX_DSC_ERROR_EV
:
986 netif_err(efx
, rx_err
, efx
->net_dev
,
987 "RX DMA Q %d reports descriptor fetch error."
988 " RX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
989 efx_schedule_reset(efx
, RESET_TYPE_RX_DESC_FETCH
);
991 case FSE_BZ_TX_DSC_ERROR_EV
:
992 netif_err(efx
, tx_err
, efx
->net_dev
,
993 "TX DMA Q %d reports descriptor fetch error."
994 " TX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
995 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
998 netif_vdbg(efx
, hw
, efx
->net_dev
,
999 "channel %d unknown driver event code %d "
1000 "data %04x\n", channel
->channel
, ev_sub_code
,
1006 int efx_nic_process_eventq(struct efx_channel
*channel
, int budget
)
1008 struct efx_nic
*efx
= channel
->efx
;
1009 unsigned int read_ptr
;
1010 efx_qword_t event
, *p_event
;
1015 read_ptr
= channel
->eventq_read_ptr
;
1018 p_event
= efx_event(channel
, read_ptr
);
1021 if (!efx_event_present(&event
))
1025 netif_vdbg(channel
->efx
, intr
, channel
->efx
->net_dev
,
1026 "channel %d event is "EFX_QWORD_FMT
"\n",
1027 channel
->channel
, EFX_QWORD_VAL(event
));
1029 /* Clear this event by marking it all ones */
1030 EFX_SET_QWORD(*p_event
);
1032 /* Increment read pointer */
1033 read_ptr
= (read_ptr
+ 1) & channel
->eventq_mask
;
1035 ev_code
= EFX_QWORD_FIELD(event
, FSF_AZ_EV_CODE
);
1038 case FSE_AZ_EV_CODE_RX_EV
:
1039 efx_handle_rx_event(channel
, &event
);
1040 if (++spent
== budget
)
1043 case FSE_AZ_EV_CODE_TX_EV
:
1044 tx_packets
+= efx_handle_tx_event(channel
, &event
);
1045 if (tx_packets
> efx
->txq_entries
) {
1050 case FSE_AZ_EV_CODE_DRV_GEN_EV
:
1051 efx_handle_generated_event(channel
, &event
);
1053 case FSE_AZ_EV_CODE_GLOBAL_EV
:
1054 efx_handle_global_event(channel
, &event
);
1056 case FSE_AZ_EV_CODE_DRIVER_EV
:
1057 efx_handle_driver_event(channel
, &event
);
1059 case FSE_CZ_EV_CODE_MCDI_EV
:
1060 efx_mcdi_process_event(channel
, &event
);
1063 netif_err(channel
->efx
, hw
, channel
->efx
->net_dev
,
1064 "channel %d unknown event type %d (data "
1065 EFX_QWORD_FMT
")\n", channel
->channel
,
1066 ev_code
, EFX_QWORD_VAL(event
));
1071 channel
->eventq_read_ptr
= read_ptr
;
1076 /* Allocate buffer table entries for event queue */
1077 int efx_nic_probe_eventq(struct efx_channel
*channel
)
1079 struct efx_nic
*efx
= channel
->efx
;
1082 entries
= channel
->eventq_mask
+ 1;
1083 return efx_alloc_special_buffer(efx
, &channel
->eventq
,
1084 entries
* sizeof(efx_qword_t
));
1087 void efx_nic_init_eventq(struct efx_channel
*channel
)
1090 struct efx_nic
*efx
= channel
->efx
;
1092 netif_dbg(efx
, hw
, efx
->net_dev
,
1093 "channel %d event queue in special buffers %d-%d\n",
1094 channel
->channel
, channel
->eventq
.index
,
1095 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1097 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
) {
1098 EFX_POPULATE_OWORD_3(reg
,
1099 FRF_CZ_TIMER_Q_EN
, 1,
1100 FRF_CZ_HOST_NOTIFY_MODE
, 0,
1101 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
1102 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1105 /* Pin event queue buffer */
1106 efx_init_special_buffer(efx
, &channel
->eventq
);
1108 /* Fill event queue with all ones (i.e. empty events) */
1109 memset(channel
->eventq
.addr
, 0xff, channel
->eventq
.len
);
1111 /* Push event queue to card */
1112 EFX_POPULATE_OWORD_3(reg
,
1114 FRF_AZ_EVQ_SIZE
, __ffs(channel
->eventq
.entries
),
1115 FRF_AZ_EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1116 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1119 efx
->type
->push_irq_moderation(channel
);
1122 void efx_nic_fini_eventq(struct efx_channel
*channel
)
1125 struct efx_nic
*efx
= channel
->efx
;
1127 /* Remove event queue from card */
1128 EFX_ZERO_OWORD(reg
);
1129 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1131 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1132 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1134 /* Unpin event queue */
1135 efx_fini_special_buffer(efx
, &channel
->eventq
);
1138 /* Free buffers backing event queue */
1139 void efx_nic_remove_eventq(struct efx_channel
*channel
)
1141 efx_free_special_buffer(channel
->efx
, &channel
->eventq
);
1145 void efx_nic_generate_test_event(struct efx_channel
*channel
)
1147 unsigned int magic
= EFX_CHANNEL_MAGIC_TEST(channel
);
1148 efx_qword_t test_event
;
1150 EFX_POPULATE_QWORD_2(test_event
, FSF_AZ_EV_CODE
,
1151 FSE_AZ_EV_CODE_DRV_GEN_EV
,
1152 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
1153 efx_generate_event(channel
, &test_event
);
1156 void efx_nic_generate_fill_event(struct efx_channel
*channel
)
1158 unsigned int magic
= EFX_CHANNEL_MAGIC_FILL(channel
);
1159 efx_qword_t test_event
;
1161 EFX_POPULATE_QWORD_2(test_event
, FSF_AZ_EV_CODE
,
1162 FSE_AZ_EV_CODE_DRV_GEN_EV
,
1163 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
1164 efx_generate_event(channel
, &test_event
);
1167 /**************************************************************************
1171 **************************************************************************/
1174 static void efx_poll_flush_events(struct efx_nic
*efx
)
1176 struct efx_channel
*channel
= efx_get_channel(efx
, 0);
1177 struct efx_tx_queue
*tx_queue
;
1178 struct efx_rx_queue
*rx_queue
;
1179 unsigned int read_ptr
= channel
->eventq_read_ptr
;
1180 unsigned int end_ptr
= (read_ptr
- 1) & channel
->eventq_mask
;
1183 efx_qword_t
*event
= efx_event(channel
, read_ptr
);
1184 int ev_code
, ev_sub_code
, ev_queue
;
1187 if (!efx_event_present(event
))
1190 ev_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_EV_CODE
);
1191 ev_sub_code
= EFX_QWORD_FIELD(*event
,
1192 FSF_AZ_DRIVER_EV_SUBCODE
);
1193 if (ev_code
== FSE_AZ_EV_CODE_DRIVER_EV
&&
1194 ev_sub_code
== FSE_AZ_TX_DESCQ_FLS_DONE_EV
) {
1195 ev_queue
= EFX_QWORD_FIELD(*event
,
1196 FSF_AZ_DRIVER_EV_SUBDATA
);
1197 if (ev_queue
< EFX_TXQ_TYPES
* efx
->n_tx_channels
) {
1198 tx_queue
= efx_get_tx_queue(
1199 efx
, ev_queue
/ EFX_TXQ_TYPES
,
1200 ev_queue
% EFX_TXQ_TYPES
);
1201 tx_queue
->flushed
= FLUSH_DONE
;
1203 } else if (ev_code
== FSE_AZ_EV_CODE_DRIVER_EV
&&
1204 ev_sub_code
== FSE_AZ_RX_DESCQ_FLS_DONE_EV
) {
1205 ev_queue
= EFX_QWORD_FIELD(
1206 *event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1207 ev_failed
= EFX_QWORD_FIELD(
1208 *event
, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1209 if (ev_queue
< efx
->n_rx_channels
) {
1210 rx_queue
= efx_get_rx_queue(efx
, ev_queue
);
1212 ev_failed
? FLUSH_FAILED
: FLUSH_DONE
;
1216 /* We're about to destroy the queue anyway, so
1217 * it's ok to throw away every non-flush event */
1218 EFX_SET_QWORD(*event
);
1220 read_ptr
= (read_ptr
+ 1) & channel
->eventq_mask
;
1221 } while (read_ptr
!= end_ptr
);
1223 channel
->eventq_read_ptr
= read_ptr
;
1226 /* Handle tx and rx flushes at the same time, since they run in
1227 * parallel in the hardware and there's no reason for us to
1229 int efx_nic_flush_queues(struct efx_nic
*efx
)
1231 struct efx_channel
*channel
;
1232 struct efx_rx_queue
*rx_queue
;
1233 struct efx_tx_queue
*tx_queue
;
1234 int i
, tx_pending
, rx_pending
;
1236 /* If necessary prepare the hardware for flushing */
1237 efx
->type
->prepare_flush(efx
);
1239 /* Flush all tx queues in parallel */
1240 efx_for_each_channel(channel
, efx
) {
1241 efx_for_each_channel_tx_queue(tx_queue
, channel
)
1242 efx_flush_tx_queue(tx_queue
);
1245 /* The hardware supports four concurrent rx flushes, each of which may
1246 * need to be retried if there is an outstanding descriptor fetch */
1247 for (i
= 0; i
< EFX_FLUSH_POLL_COUNT
; ++i
) {
1248 rx_pending
= tx_pending
= 0;
1249 efx_for_each_channel(channel
, efx
) {
1250 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
1251 if (rx_queue
->flushed
== FLUSH_PENDING
)
1255 efx_for_each_channel(channel
, efx
) {
1256 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
1257 if (rx_pending
== EFX_RX_FLUSH_COUNT
)
1259 if (rx_queue
->flushed
== FLUSH_FAILED
||
1260 rx_queue
->flushed
== FLUSH_NONE
) {
1261 efx_flush_rx_queue(rx_queue
);
1265 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
1266 if (tx_queue
->flushed
!= FLUSH_DONE
)
1271 if (rx_pending
== 0 && tx_pending
== 0)
1274 msleep(EFX_FLUSH_INTERVAL
);
1275 efx_poll_flush_events(efx
);
1278 /* Mark the queues as all flushed. We're going to return failure
1279 * leading to a reset, or fake up success anyway */
1280 efx_for_each_channel(channel
, efx
) {
1281 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
1282 if (tx_queue
->flushed
!= FLUSH_DONE
)
1283 netif_err(efx
, hw
, efx
->net_dev
,
1284 "tx queue %d flush command timed out\n",
1286 tx_queue
->flushed
= FLUSH_DONE
;
1288 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
1289 if (rx_queue
->flushed
!= FLUSH_DONE
)
1290 netif_err(efx
, hw
, efx
->net_dev
,
1291 "rx queue %d flush command timed out\n",
1292 efx_rx_queue_index(rx_queue
));
1293 rx_queue
->flushed
= FLUSH_DONE
;
1300 /**************************************************************************
1302 * Hardware interrupts
1303 * The hardware interrupt handler does very little work; all the event
1304 * queue processing is carried out by per-channel tasklets.
1306 **************************************************************************/
1308 /* Enable/disable/generate interrupts */
1309 static inline void efx_nic_interrupts(struct efx_nic
*efx
,
1310 bool enabled
, bool force
)
1312 efx_oword_t int_en_reg_ker
;
1314 EFX_POPULATE_OWORD_3(int_en_reg_ker
,
1315 FRF_AZ_KER_INT_LEVE_SEL
, efx
->fatal_irq_level
,
1316 FRF_AZ_KER_INT_KER
, force
,
1317 FRF_AZ_DRV_INT_EN_KER
, enabled
);
1318 efx_writeo(efx
, &int_en_reg_ker
, FR_AZ_INT_EN_KER
);
1321 void efx_nic_enable_interrupts(struct efx_nic
*efx
)
1323 struct efx_channel
*channel
;
1325 EFX_ZERO_OWORD(*((efx_oword_t
*) efx
->irq_status
.addr
));
1326 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1328 /* Enable interrupts */
1329 efx_nic_interrupts(efx
, true, false);
1331 /* Force processing of all the channels to get the EVQ RPTRs up to
1333 efx_for_each_channel(channel
, efx
)
1334 efx_schedule_channel(channel
);
1337 void efx_nic_disable_interrupts(struct efx_nic
*efx
)
1339 /* Disable interrupts */
1340 efx_nic_interrupts(efx
, false, false);
1343 /* Generate a test interrupt
1344 * Interrupt must already have been enabled, otherwise nasty things
1347 void efx_nic_generate_interrupt(struct efx_nic
*efx
)
1349 efx_nic_interrupts(efx
, true, true);
1352 /* Process a fatal interrupt
1353 * Disable bus mastering ASAP and schedule a reset
1355 irqreturn_t
efx_nic_fatal_interrupt(struct efx_nic
*efx
)
1357 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1358 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1359 efx_oword_t fatal_intr
;
1360 int error
, mem_perr
;
1362 efx_reado(efx
, &fatal_intr
, FR_AZ_FATAL_INTR_KER
);
1363 error
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_FATAL_INTR
);
1365 netif_err(efx
, hw
, efx
->net_dev
, "SYSTEM ERROR "EFX_OWORD_FMT
" status "
1366 EFX_OWORD_FMT
": %s\n", EFX_OWORD_VAL(*int_ker
),
1367 EFX_OWORD_VAL(fatal_intr
),
1368 error
? "disabling bus mastering" : "no recognised error");
1370 /* If this is a memory parity error dump which blocks are offending */
1371 mem_perr
= (EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_MEM_PERR_INT_KER
) ||
1372 EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_SRM_PERR_INT_KER
));
1375 efx_reado(efx
, ®
, FR_AZ_MEM_STAT
);
1376 netif_err(efx
, hw
, efx
->net_dev
,
1377 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT
"\n",
1378 EFX_OWORD_VAL(reg
));
1381 /* Disable both devices */
1382 pci_clear_master(efx
->pci_dev
);
1383 if (efx_nic_is_dual_func(efx
))
1384 pci_clear_master(nic_data
->pci_dev2
);
1385 efx_nic_disable_interrupts(efx
);
1387 /* Count errors and reset or disable the NIC accordingly */
1388 if (efx
->int_error_count
== 0 ||
1389 time_after(jiffies
, efx
->int_error_expire
)) {
1390 efx
->int_error_count
= 0;
1391 efx
->int_error_expire
=
1392 jiffies
+ EFX_INT_ERROR_EXPIRE
* HZ
;
1394 if (++efx
->int_error_count
< EFX_MAX_INT_ERRORS
) {
1395 netif_err(efx
, hw
, efx
->net_dev
,
1396 "SYSTEM ERROR - reset scheduled\n");
1397 efx_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1399 netif_err(efx
, hw
, efx
->net_dev
,
1400 "SYSTEM ERROR - max number of errors seen."
1401 "NIC will be disabled\n");
1402 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1408 /* Handle a legacy interrupt
1409 * Acknowledges the interrupt and schedule event queue processing.
1411 static irqreturn_t
efx_legacy_interrupt(int irq
, void *dev_id
)
1413 struct efx_nic
*efx
= dev_id
;
1414 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1415 irqreturn_t result
= IRQ_NONE
;
1416 struct efx_channel
*channel
;
1421 /* Could this be ours? If interrupts are disabled then the
1422 * channel state may not be valid.
1424 if (!efx
->legacy_irq_enabled
)
1427 /* Read the ISR which also ACKs the interrupts */
1428 efx_readd(efx
, ®
, FR_BZ_INT_ISR0
);
1429 queues
= EFX_EXTRACT_DWORD(reg
, 0, 31);
1431 /* Check to see if we have a serious error condition */
1432 if (queues
& (1U << efx
->fatal_irq_level
)) {
1433 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1434 if (unlikely(syserr
))
1435 return efx_nic_fatal_interrupt(efx
);
1439 if (EFX_WORKAROUND_15783(efx
))
1440 efx
->irq_zero_count
= 0;
1442 /* Schedule processing of any interrupting queues */
1443 efx_for_each_channel(channel
, efx
) {
1445 efx_schedule_channel(channel
);
1448 result
= IRQ_HANDLED
;
1450 } else if (EFX_WORKAROUND_15783(efx
)) {
1453 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1454 * because this might be a shared interrupt. */
1455 if (efx
->irq_zero_count
++ == 0)
1456 result
= IRQ_HANDLED
;
1458 /* Ensure we schedule or rearm all event queues */
1459 efx_for_each_channel(channel
, efx
) {
1460 event
= efx_event(channel
, channel
->eventq_read_ptr
);
1461 if (efx_event_present(event
))
1462 efx_schedule_channel(channel
);
1464 efx_nic_eventq_read_ack(channel
);
1468 if (result
== IRQ_HANDLED
) {
1469 efx
->last_irq_cpu
= raw_smp_processor_id();
1470 netif_vdbg(efx
, intr
, efx
->net_dev
,
1471 "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1472 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1478 /* Handle an MSI interrupt
1480 * Handle an MSI hardware interrupt. This routine schedules event
1481 * queue processing. No interrupt acknowledgement cycle is necessary.
1482 * Also, we never need to check that the interrupt is for us, since
1483 * MSI interrupts cannot be shared.
1485 static irqreturn_t
efx_msi_interrupt(int irq
, void *dev_id
)
1487 struct efx_channel
*channel
= *(struct efx_channel
**)dev_id
;
1488 struct efx_nic
*efx
= channel
->efx
;
1489 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1492 efx
->last_irq_cpu
= raw_smp_processor_id();
1493 netif_vdbg(efx
, intr
, efx
->net_dev
,
1494 "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1495 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1497 /* Check to see if we have a serious error condition */
1498 if (channel
->channel
== efx
->fatal_irq_level
) {
1499 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1500 if (unlikely(syserr
))
1501 return efx_nic_fatal_interrupt(efx
);
1504 /* Schedule processing of the channel */
1505 efx_schedule_channel(channel
);
1511 /* Setup RSS indirection table.
1512 * This maps from the hash value of the packet to RXQ
1514 void efx_nic_push_rx_indir_table(struct efx_nic
*efx
)
1519 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
)
1522 BUILD_BUG_ON(ARRAY_SIZE(efx
->rx_indir_table
) !=
1523 FR_BZ_RX_INDIRECTION_TBL_ROWS
);
1525 for (i
= 0; i
< FR_BZ_RX_INDIRECTION_TBL_ROWS
; i
++) {
1526 EFX_POPULATE_DWORD_1(dword
, FRF_BZ_IT_QUEUE
,
1527 efx
->rx_indir_table
[i
]);
1528 efx_writed_table(efx
, &dword
, FR_BZ_RX_INDIRECTION_TBL
, i
);
1532 /* Hook interrupt handler(s)
1533 * Try MSI and then legacy interrupts.
1535 int efx_nic_init_interrupt(struct efx_nic
*efx
)
1537 struct efx_channel
*channel
;
1540 if (!EFX_INT_MODE_USE_MSI(efx
)) {
1541 irq_handler_t handler
;
1542 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1543 handler
= efx_legacy_interrupt
;
1545 handler
= falcon_legacy_interrupt_a1
;
1547 rc
= request_irq(efx
->legacy_irq
, handler
, IRQF_SHARED
,
1550 netif_err(efx
, drv
, efx
->net_dev
,
1551 "failed to hook legacy IRQ %d\n",
1558 /* Hook MSI or MSI-X interrupt */
1559 efx_for_each_channel(channel
, efx
) {
1560 rc
= request_irq(channel
->irq
, efx_msi_interrupt
,
1561 IRQF_PROBE_SHARED
, /* Not shared */
1562 efx
->channel_name
[channel
->channel
],
1563 &efx
->channel
[channel
->channel
]);
1565 netif_err(efx
, drv
, efx
->net_dev
,
1566 "failed to hook IRQ %d\n", channel
->irq
);
1574 efx_for_each_channel(channel
, efx
)
1575 free_irq(channel
->irq
, &efx
->channel
[channel
->channel
]);
1580 void efx_nic_fini_interrupt(struct efx_nic
*efx
)
1582 struct efx_channel
*channel
;
1585 /* Disable MSI/MSI-X interrupts */
1586 efx_for_each_channel(channel
, efx
) {
1588 free_irq(channel
->irq
, &efx
->channel
[channel
->channel
]);
1591 /* ACK legacy interrupt */
1592 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1593 efx_reado(efx
, ®
, FR_BZ_INT_ISR0
);
1595 falcon_irq_ack_a1(efx
);
1597 /* Disable legacy interrupt */
1598 if (efx
->legacy_irq
)
1599 free_irq(efx
->legacy_irq
, efx
);
1602 u32
efx_nic_fpga_ver(struct efx_nic
*efx
)
1604 efx_oword_t altera_build
;
1605 efx_reado(efx
, &altera_build
, FR_AZ_ALTERA_BUILD
);
1606 return EFX_OWORD_FIELD(altera_build
, FRF_AZ_ALTERA_BUILD_VER
);
1609 void efx_nic_init_common(struct efx_nic
*efx
)
1613 /* Set positions of descriptor caches in SRAM. */
1614 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_TX_DC_BASE_ADR
,
1615 efx
->type
->tx_dc_base
/ 8);
1616 efx_writeo(efx
, &temp
, FR_AZ_SRM_TX_DC_CFG
);
1617 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_RX_DC_BASE_ADR
,
1618 efx
->type
->rx_dc_base
/ 8);
1619 efx_writeo(efx
, &temp
, FR_AZ_SRM_RX_DC_CFG
);
1621 /* Set TX descriptor cache size. */
1622 BUILD_BUG_ON(TX_DC_ENTRIES
!= (8 << TX_DC_ENTRIES_ORDER
));
1623 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
1624 efx_writeo(efx
, &temp
, FR_AZ_TX_DC_CFG
);
1626 /* Set RX descriptor cache size. Set low watermark to size-8, as
1627 * this allows most efficient prefetching.
1629 BUILD_BUG_ON(RX_DC_ENTRIES
!= (8 << RX_DC_ENTRIES_ORDER
));
1630 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
1631 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_CFG
);
1632 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
1633 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_PF_WM
);
1635 /* Program INT_KER address */
1636 EFX_POPULATE_OWORD_2(temp
,
1637 FRF_AZ_NORM_INT_VEC_DIS_KER
,
1638 EFX_INT_MODE_USE_MSI(efx
),
1639 FRF_AZ_INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1640 efx_writeo(efx
, &temp
, FR_AZ_INT_ADR_KER
);
1642 if (EFX_WORKAROUND_17213(efx
) && !EFX_INT_MODE_USE_MSI(efx
))
1643 /* Use an interrupt level unused by event queues */
1644 efx
->fatal_irq_level
= 0x1f;
1646 /* Use a valid MSI-X vector */
1647 efx
->fatal_irq_level
= 0;
1649 /* Enable all the genuinely fatal interrupts. (They are still
1650 * masked by the overall interrupt mask, controlled by
1651 * falcon_interrupts()).
1653 * Note: All other fatal interrupts are enabled
1655 EFX_POPULATE_OWORD_3(temp
,
1656 FRF_AZ_ILL_ADR_INT_KER_EN
, 1,
1657 FRF_AZ_RBUF_OWN_INT_KER_EN
, 1,
1658 FRF_AZ_TBUF_OWN_INT_KER_EN
, 1);
1659 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1660 EFX_SET_OWORD_FIELD(temp
, FRF_CZ_SRAM_PERR_INT_P_KER_EN
, 1);
1661 EFX_INVERT_OWORD(temp
);
1662 efx_writeo(efx
, &temp
, FR_AZ_FATAL_INTR_KER
);
1664 efx_nic_push_rx_indir_table(efx
);
1666 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1667 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1669 efx_reado(efx
, &temp
, FR_AZ_TX_RESERVED
);
1670 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER
, 0xfe);
1671 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER_EN
, 1);
1672 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_ONE_PKT_PER_Q
, 1);
1673 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PUSH_EN
, 0);
1674 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_DIS_NON_IP_EV
, 1);
1675 /* Enable SW_EV to inherit in char driver - assume harmless here */
1676 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_SOFT_EVT_EN
, 1);
1677 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1678 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_THRESHOLD
, 2);
1679 /* Disable hardware watchdog which can misfire */
1680 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_WD_TMR
, 0x3fffff);
1681 /* Squash TX of packets of 16 bytes or less */
1682 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1683 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TX_FLUSH_MIN_LEN_EN
, 1);
1684 efx_writeo(efx
, &temp
, FR_AZ_TX_RESERVED
);
1689 #define REGISTER_REVISION_A 1
1690 #define REGISTER_REVISION_B 2
1691 #define REGISTER_REVISION_C 3
1692 #define REGISTER_REVISION_Z 3 /* latest revision */
1694 struct efx_nic_reg
{
1696 u32 min_revision
:2, max_revision
:2;
1699 #define REGISTER(name, min_rev, max_rev) { \
1700 FR_ ## min_rev ## max_rev ## _ ## name, \
1701 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1703 #define REGISTER_AA(name) REGISTER(name, A, A)
1704 #define REGISTER_AB(name) REGISTER(name, A, B)
1705 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1706 #define REGISTER_BB(name) REGISTER(name, B, B)
1707 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1708 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1710 static const struct efx_nic_reg efx_nic_regs
[] = {
1711 REGISTER_AZ(ADR_REGION
),
1712 REGISTER_AZ(INT_EN_KER
),
1713 REGISTER_BZ(INT_EN_CHAR
),
1714 REGISTER_AZ(INT_ADR_KER
),
1715 REGISTER_BZ(INT_ADR_CHAR
),
1716 /* INT_ACK_KER is WO */
1717 /* INT_ISR0 is RC */
1718 REGISTER_AZ(HW_INIT
),
1719 REGISTER_CZ(USR_EV_CFG
),
1720 REGISTER_AB(EE_SPI_HCMD
),
1721 REGISTER_AB(EE_SPI_HADR
),
1722 REGISTER_AB(EE_SPI_HDATA
),
1723 REGISTER_AB(EE_BASE_PAGE
),
1724 REGISTER_AB(EE_VPD_CFG0
),
1725 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1726 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1727 /* PCIE_CORE_INDIRECT is indirect */
1728 REGISTER_AB(NIC_STAT
),
1729 REGISTER_AB(GPIO_CTL
),
1730 REGISTER_AB(GLB_CTL
),
1731 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1732 REGISTER_BZ(DP_CTRL
),
1733 REGISTER_AZ(MEM_STAT
),
1734 REGISTER_AZ(CS_DEBUG
),
1735 REGISTER_AZ(ALTERA_BUILD
),
1736 REGISTER_AZ(CSR_SPARE
),
1737 REGISTER_AB(PCIE_SD_CTL0123
),
1738 REGISTER_AB(PCIE_SD_CTL45
),
1739 REGISTER_AB(PCIE_PCS_CTL_STAT
),
1740 /* DEBUG_DATA_OUT is not used */
1742 REGISTER_AZ(EVQ_CTL
),
1743 REGISTER_AZ(EVQ_CNT1
),
1744 REGISTER_AZ(EVQ_CNT2
),
1745 REGISTER_AZ(BUF_TBL_CFG
),
1746 REGISTER_AZ(SRM_RX_DC_CFG
),
1747 REGISTER_AZ(SRM_TX_DC_CFG
),
1748 REGISTER_AZ(SRM_CFG
),
1749 /* BUF_TBL_UPD is WO */
1750 REGISTER_AZ(SRM_UPD_EVQ
),
1751 REGISTER_AZ(SRAM_PARITY
),
1752 REGISTER_AZ(RX_CFG
),
1753 REGISTER_BZ(RX_FILTER_CTL
),
1754 /* RX_FLUSH_DESCQ is WO */
1755 REGISTER_AZ(RX_DC_CFG
),
1756 REGISTER_AZ(RX_DC_PF_WM
),
1757 REGISTER_BZ(RX_RSS_TKEY
),
1758 /* RX_NODESC_DROP is RC */
1759 REGISTER_AA(RX_SELF_RST
),
1760 /* RX_DEBUG, RX_PUSH_DROP are not used */
1761 REGISTER_CZ(RX_RSS_IPV6_REG1
),
1762 REGISTER_CZ(RX_RSS_IPV6_REG2
),
1763 REGISTER_CZ(RX_RSS_IPV6_REG3
),
1764 /* TX_FLUSH_DESCQ is WO */
1765 REGISTER_AZ(TX_DC_CFG
),
1766 REGISTER_AA(TX_CHKSM_CFG
),
1767 REGISTER_AZ(TX_CFG
),
1768 /* TX_PUSH_DROP is not used */
1769 REGISTER_AZ(TX_RESERVED
),
1770 REGISTER_BZ(TX_PACE
),
1771 /* TX_PACE_DROP_QID is RC */
1772 REGISTER_BB(TX_VLAN
),
1773 REGISTER_BZ(TX_IPFIL_PORTEN
),
1774 REGISTER_AB(MD_TXD
),
1775 REGISTER_AB(MD_RXD
),
1777 REGISTER_AB(MD_PHY_ADR
),
1780 REGISTER_AB(MAC_STAT_DMA
),
1781 REGISTER_AB(MAC_CTRL
),
1782 REGISTER_BB(GEN_MODE
),
1783 REGISTER_AB(MAC_MC_HASH_REG0
),
1784 REGISTER_AB(MAC_MC_HASH_REG1
),
1785 REGISTER_AB(GM_CFG1
),
1786 REGISTER_AB(GM_CFG2
),
1787 /* GM_IPG and GM_HD are not used */
1788 REGISTER_AB(GM_MAX_FLEN
),
1789 /* GM_TEST is not used */
1790 REGISTER_AB(GM_ADR1
),
1791 REGISTER_AB(GM_ADR2
),
1792 REGISTER_AB(GMF_CFG0
),
1793 REGISTER_AB(GMF_CFG1
),
1794 REGISTER_AB(GMF_CFG2
),
1795 REGISTER_AB(GMF_CFG3
),
1796 REGISTER_AB(GMF_CFG4
),
1797 REGISTER_AB(GMF_CFG5
),
1798 REGISTER_BB(TX_SRC_MAC_CTL
),
1799 REGISTER_AB(XM_ADR_LO
),
1800 REGISTER_AB(XM_ADR_HI
),
1801 REGISTER_AB(XM_GLB_CFG
),
1802 REGISTER_AB(XM_TX_CFG
),
1803 REGISTER_AB(XM_RX_CFG
),
1804 REGISTER_AB(XM_MGT_INT_MASK
),
1806 REGISTER_AB(XM_PAUSE_TIME
),
1807 REGISTER_AB(XM_TX_PARAM
),
1808 REGISTER_AB(XM_RX_PARAM
),
1809 /* XM_MGT_INT_MSK (note no 'A') is RC */
1810 REGISTER_AB(XX_PWR_RST
),
1811 REGISTER_AB(XX_SD_CTL
),
1812 REGISTER_AB(XX_TXDRV_CTL
),
1813 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1814 /* XX_CORE_STAT is partly RC */
1817 struct efx_nic_reg_table
{
1819 u32 min_revision
:2, max_revision
:2;
1820 u32 step
:6, rows
:21;
1823 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1825 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1828 #define REGISTER_TABLE(name, min_rev, max_rev) \
1829 REGISTER_TABLE_DIMENSIONS( \
1830 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1832 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1833 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1834 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1835 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1836 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1837 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1838 #define REGISTER_TABLE_BB_CZ(name) \
1839 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1840 FR_BZ_ ## name ## _STEP, \
1841 FR_BB_ ## name ## _ROWS), \
1842 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1843 FR_BZ_ ## name ## _STEP, \
1844 FR_CZ_ ## name ## _ROWS)
1845 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1847 static const struct efx_nic_reg_table efx_nic_reg_tables
[] = {
1848 /* DRIVER is not used */
1849 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1850 REGISTER_TABLE_BB(TX_IPFIL_TBL
),
1851 REGISTER_TABLE_BB(TX_SRC_MAC_TBL
),
1852 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER
),
1853 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL
),
1854 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER
),
1855 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL
),
1856 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER
),
1857 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL
),
1858 /* We can't reasonably read all of the buffer table (up to 8MB!).
1859 * However this driver will only use a few entries. Reading
1860 * 1K entries allows for some expansion of queue count and
1861 * size before we need to change the version. */
1862 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER
, FR_AA_BUF_FULL_TBL_KER
,
1864 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL
, FR_BZ_BUF_FULL_TBL
,
1866 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0
),
1867 REGISTER_TABLE_BB_CZ(TIMER_TBL
),
1868 REGISTER_TABLE_BB_CZ(TX_PACE_TBL
),
1869 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL
),
1870 /* TX_FILTER_TBL0 is huge and not used by this driver */
1871 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0
),
1872 REGISTER_TABLE_CZ(MC_TREG_SMEM
),
1873 /* MSIX_PBA_TABLE is not mapped */
1874 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1875 REGISTER_TABLE_BZ(RX_FILTER_TBL0
),
1878 size_t efx_nic_get_regs_len(struct efx_nic
*efx
)
1880 const struct efx_nic_reg
*reg
;
1881 const struct efx_nic_reg_table
*table
;
1884 for (reg
= efx_nic_regs
;
1885 reg
< efx_nic_regs
+ ARRAY_SIZE(efx_nic_regs
);
1887 if (efx
->type
->revision
>= reg
->min_revision
&&
1888 efx
->type
->revision
<= reg
->max_revision
)
1889 len
+= sizeof(efx_oword_t
);
1891 for (table
= efx_nic_reg_tables
;
1892 table
< efx_nic_reg_tables
+ ARRAY_SIZE(efx_nic_reg_tables
);
1894 if (efx
->type
->revision
>= table
->min_revision
&&
1895 efx
->type
->revision
<= table
->max_revision
)
1896 len
+= table
->rows
* min_t(size_t, table
->step
, 16);
1901 void efx_nic_get_regs(struct efx_nic
*efx
, void *buf
)
1903 const struct efx_nic_reg
*reg
;
1904 const struct efx_nic_reg_table
*table
;
1906 for (reg
= efx_nic_regs
;
1907 reg
< efx_nic_regs
+ ARRAY_SIZE(efx_nic_regs
);
1909 if (efx
->type
->revision
>= reg
->min_revision
&&
1910 efx
->type
->revision
<= reg
->max_revision
) {
1911 efx_reado(efx
, (efx_oword_t
*)buf
, reg
->offset
);
1912 buf
+= sizeof(efx_oword_t
);
1916 for (table
= efx_nic_reg_tables
;
1917 table
< efx_nic_reg_tables
+ ARRAY_SIZE(efx_nic_reg_tables
);
1921 if (!(efx
->type
->revision
>= table
->min_revision
&&
1922 efx
->type
->revision
<= table
->max_revision
))
1925 size
= min_t(size_t, table
->step
, 16);
1927 for (i
= 0; i
< table
->rows
; i
++) {
1928 switch (table
->step
) {
1929 case 4: /* 32-bit register or SRAM */
1930 efx_readd_table(efx
, buf
, table
->offset
, i
);
1932 case 8: /* 64-bit SRAM */
1934 efx
->membase
+ table
->offset
,
1937 case 16: /* 128-bit register */
1938 efx_reado_table(efx
, buf
, table
->offset
, i
);
1940 case 32: /* 128-bit register, interleaved */
1941 efx_reado_table(efx
, buf
, table
->offset
, 2 * i
);