1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include "net_driver.h"
23 #include "workarounds.h"
25 /**************************************************************************
29 **************************************************************************
32 /* This is set to 16 for a good reason. In summary, if larger than
33 * 16, the descriptor cache holds more than a default socket
34 * buffer's worth of packets (for UDP we can only have at most one
35 * socket buffer's worth outstanding). This combined with the fact
36 * that we only get 1 TX event per descriptor cache means the NIC
39 #define TX_DC_ENTRIES 16
40 #define TX_DC_ENTRIES_ORDER 1
42 #define RX_DC_ENTRIES 64
43 #define RX_DC_ENTRIES_ORDER 3
45 /* If EFX_MAX_INT_ERRORS internal errors occur within
46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
49 #define EFX_INT_ERROR_EXPIRE 3600
50 #define EFX_MAX_INT_ERRORS 5
52 /* Depth of RX flush request fifo */
53 #define EFX_RX_FLUSH_COUNT 4
55 /* Driver generated events */
56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
68 efx_rx_queue_index(_rx_queue))
69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
71 efx_rx_queue_index(_rx_queue))
72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
76 static void efx_magic_event(struct efx_channel
*channel
, u32 magic
);
78 /**************************************************************************
80 * Solarstorm hardware access
82 **************************************************************************/
84 static inline void efx_write_buf_tbl(struct efx_nic
*efx
, efx_qword_t
*value
,
87 efx_sram_writeq(efx
, efx
->membase
+ efx
->type
->buf_tbl_base
,
91 /* Read the current event from the event queue */
92 static inline efx_qword_t
*efx_event(struct efx_channel
*channel
,
95 return ((efx_qword_t
*) (channel
->eventq
.addr
)) +
96 (index
& channel
->eventq_mask
);
99 /* See if an event is present
101 * We check both the high and low dword of the event for all ones. We
102 * wrote all ones when we cleared the event, and no valid event can
103 * have all ones in either its high or low dwords. This approach is
104 * robust against reordering.
106 * Note that using a single 64-bit comparison is incorrect; even
107 * though the CPU read will be atomic, the DMA write may not be.
109 static inline int efx_event_present(efx_qword_t
*event
)
111 return !(EFX_DWORD_IS_ALL_ONES(event
->dword
[0]) |
112 EFX_DWORD_IS_ALL_ONES(event
->dword
[1]));
115 static bool efx_masked_compare_oword(const efx_oword_t
*a
, const efx_oword_t
*b
,
116 const efx_oword_t
*mask
)
118 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
119 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
122 int efx_nic_test_registers(struct efx_nic
*efx
,
123 const struct efx_nic_register_test
*regs
,
126 unsigned address
= 0, i
, j
;
127 efx_oword_t mask
, imask
, original
, reg
, buf
;
129 for (i
= 0; i
< n_regs
; ++i
) {
130 address
= regs
[i
].address
;
131 mask
= imask
= regs
[i
].mask
;
132 EFX_INVERT_OWORD(imask
);
134 efx_reado(efx
, &original
, address
);
136 /* bit sweep on and off */
137 for (j
= 0; j
< 128; j
++) {
138 if (!EFX_EXTRACT_OWORD32(mask
, j
, j
))
141 /* Test this testable bit can be set in isolation */
142 EFX_AND_OWORD(reg
, original
, mask
);
143 EFX_SET_OWORD32(reg
, j
, j
, 1);
145 efx_writeo(efx
, ®
, address
);
146 efx_reado(efx
, &buf
, address
);
148 if (efx_masked_compare_oword(®
, &buf
, &mask
))
151 /* Test this testable bit can be cleared in isolation */
152 EFX_OR_OWORD(reg
, original
, mask
);
153 EFX_SET_OWORD32(reg
, j
, j
, 0);
155 efx_writeo(efx
, ®
, address
);
156 efx_reado(efx
, &buf
, address
);
158 if (efx_masked_compare_oword(®
, &buf
, &mask
))
162 efx_writeo(efx
, &original
, address
);
168 netif_err(efx
, hw
, efx
->net_dev
,
169 "wrote "EFX_OWORD_FMT
" read "EFX_OWORD_FMT
170 " at address 0x%x mask "EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
),
171 EFX_OWORD_VAL(buf
), address
, EFX_OWORD_VAL(mask
));
175 /**************************************************************************
177 * Special buffer handling
178 * Special buffers are used for event queues and the TX and RX
181 *************************************************************************/
184 * Initialise a special buffer
186 * This will define a buffer (previously allocated via
187 * efx_alloc_special_buffer()) in the buffer table, allowing
188 * it to be used for event queues, descriptor rings etc.
191 efx_init_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
193 efx_qword_t buf_desc
;
198 EFX_BUG_ON_PARANOID(!buffer
->addr
);
200 /* Write buffer descriptors to NIC */
201 for (i
= 0; i
< buffer
->entries
; i
++) {
202 index
= buffer
->index
+ i
;
203 dma_addr
= buffer
->dma_addr
+ (i
* EFX_BUF_SIZE
);
204 netif_dbg(efx
, probe
, efx
->net_dev
,
205 "mapping special buffer %d at %llx\n",
206 index
, (unsigned long long)dma_addr
);
207 EFX_POPULATE_QWORD_3(buf_desc
,
208 FRF_AZ_BUF_ADR_REGION
, 0,
209 FRF_AZ_BUF_ADR_FBUF
, dma_addr
>> 12,
210 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
211 efx_write_buf_tbl(efx
, &buf_desc
, index
);
215 /* Unmaps a buffer and clears the buffer table entries */
217 efx_fini_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
219 efx_oword_t buf_tbl_upd
;
220 unsigned int start
= buffer
->index
;
221 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
223 if (!buffer
->entries
)
226 netif_dbg(efx
, hw
, efx
->net_dev
, "unmapping special buffers %d-%d\n",
227 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
229 EFX_POPULATE_OWORD_4(buf_tbl_upd
,
230 FRF_AZ_BUF_UPD_CMD
, 0,
231 FRF_AZ_BUF_CLR_CMD
, 1,
232 FRF_AZ_BUF_CLR_END_ID
, end
,
233 FRF_AZ_BUF_CLR_START_ID
, start
);
234 efx_writeo(efx
, &buf_tbl_upd
, FR_AZ_BUF_TBL_UPD
);
238 * Allocate a new special buffer
240 * This allocates memory for a new buffer, clears it and allocates a
241 * new buffer ID range. It does not write into the buffer table.
243 * This call will allocate 4KB buffers, since 8KB buffers can't be
244 * used for event queues and descriptor rings.
246 static int efx_alloc_special_buffer(struct efx_nic
*efx
,
247 struct efx_special_buffer
*buffer
,
250 len
= ALIGN(len
, EFX_BUF_SIZE
);
252 buffer
->addr
= dma_alloc_coherent(&efx
->pci_dev
->dev
, len
,
253 &buffer
->dma_addr
, GFP_KERNEL
);
257 buffer
->entries
= len
/ EFX_BUF_SIZE
;
258 BUG_ON(buffer
->dma_addr
& (EFX_BUF_SIZE
- 1));
260 /* Select new buffer ID */
261 buffer
->index
= efx
->next_buffer_table
;
262 efx
->next_buffer_table
+= buffer
->entries
;
263 #ifdef CONFIG_SFC_SRIOV
264 BUG_ON(efx_sriov_enabled(efx
) &&
265 efx
->vf_buftbl_base
< efx
->next_buffer_table
);
268 netif_dbg(efx
, probe
, efx
->net_dev
,
269 "allocating special buffers %d-%d at %llx+%x "
270 "(virt %p phys %llx)\n", buffer
->index
,
271 buffer
->index
+ buffer
->entries
- 1,
272 (u64
)buffer
->dma_addr
, len
,
273 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
279 efx_free_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
284 netif_dbg(efx
, hw
, efx
->net_dev
,
285 "deallocating special buffers %d-%d at %llx+%x "
286 "(virt %p phys %llx)\n", buffer
->index
,
287 buffer
->index
+ buffer
->entries
- 1,
288 (u64
)buffer
->dma_addr
, buffer
->len
,
289 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
291 dma_free_coherent(&efx
->pci_dev
->dev
, buffer
->len
, buffer
->addr
,
297 /**************************************************************************
299 * Generic buffer handling
300 * These buffers are used for interrupt status, MAC stats, etc.
302 **************************************************************************/
304 int efx_nic_alloc_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
,
307 buffer
->addr
= dma_alloc_coherent(&efx
->pci_dev
->dev
, len
,
309 GFP_ATOMIC
| __GFP_ZERO
);
316 void efx_nic_free_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
)
319 dma_free_coherent(&efx
->pci_dev
->dev
, buffer
->len
,
320 buffer
->addr
, buffer
->dma_addr
);
325 /**************************************************************************
329 **************************************************************************/
331 /* Returns a pointer to the specified transmit descriptor in the TX
332 * descriptor queue belonging to the specified channel.
334 static inline efx_qword_t
*
335 efx_tx_desc(struct efx_tx_queue
*tx_queue
, unsigned int index
)
337 return ((efx_qword_t
*) (tx_queue
->txd
.addr
)) + index
;
340 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
341 static inline void efx_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
346 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
347 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_TX_DESC_WPTR_DWORD
, write_ptr
);
348 efx_writed_page(tx_queue
->efx
, ®
,
349 FR_AZ_TX_DESC_UPD_DWORD_P0
, tx_queue
->queue
);
352 /* Write pointer and first descriptor for TX descriptor ring */
353 static inline void efx_push_tx_desc(struct efx_tx_queue
*tx_queue
,
354 const efx_qword_t
*txd
)
359 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN
!= 0);
360 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER
!= FR_BZ_TX_DESC_UPD_P0
);
362 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
363 EFX_POPULATE_OWORD_2(reg
, FRF_AZ_TX_DESC_PUSH_CMD
, true,
364 FRF_AZ_TX_DESC_WPTR
, write_ptr
);
366 efx_writeo_page(tx_queue
->efx
, ®
,
367 FR_BZ_TX_DESC_UPD_P0
, tx_queue
->queue
);
371 efx_may_push_tx_desc(struct efx_tx_queue
*tx_queue
, unsigned int write_count
)
373 unsigned empty_read_count
= ACCESS_ONCE(tx_queue
->empty_read_count
);
375 if (empty_read_count
== 0)
378 tx_queue
->empty_read_count
= 0;
379 return ((empty_read_count
^ write_count
) & ~EFX_EMPTY_COUNT_VALID
) == 0
380 && tx_queue
->write_count
- write_count
== 1;
383 /* For each entry inserted into the software descriptor ring, create a
384 * descriptor in the hardware TX descriptor ring (in host memory), and
387 void efx_nic_push_buffers(struct efx_tx_queue
*tx_queue
)
390 struct efx_tx_buffer
*buffer
;
393 unsigned old_write_count
= tx_queue
->write_count
;
395 BUG_ON(tx_queue
->write_count
== tx_queue
->insert_count
);
398 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
399 buffer
= &tx_queue
->buffer
[write_ptr
];
400 txd
= efx_tx_desc(tx_queue
, write_ptr
);
401 ++tx_queue
->write_count
;
403 /* Create TX descriptor ring entry */
404 BUILD_BUG_ON(EFX_TX_BUF_CONT
!= 1);
405 EFX_POPULATE_QWORD_4(*txd
,
407 buffer
->flags
& EFX_TX_BUF_CONT
,
408 FSF_AZ_TX_KER_BYTE_COUNT
, buffer
->len
,
409 FSF_AZ_TX_KER_BUF_REGION
, 0,
410 FSF_AZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
411 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
413 wmb(); /* Ensure descriptors are written before they are fetched */
415 if (efx_may_push_tx_desc(tx_queue
, old_write_count
)) {
416 txd
= efx_tx_desc(tx_queue
,
417 old_write_count
& tx_queue
->ptr_mask
);
418 efx_push_tx_desc(tx_queue
, txd
);
421 efx_notify_tx_desc(tx_queue
);
425 /* Allocate hardware resources for a TX queue */
426 int efx_nic_probe_tx(struct efx_tx_queue
*tx_queue
)
428 struct efx_nic
*efx
= tx_queue
->efx
;
431 entries
= tx_queue
->ptr_mask
+ 1;
432 return efx_alloc_special_buffer(efx
, &tx_queue
->txd
,
433 entries
* sizeof(efx_qword_t
));
436 void efx_nic_init_tx(struct efx_tx_queue
*tx_queue
)
438 struct efx_nic
*efx
= tx_queue
->efx
;
441 /* Pin TX descriptor ring */
442 efx_init_special_buffer(efx
, &tx_queue
->txd
);
444 /* Push TX descriptor ring to card */
445 EFX_POPULATE_OWORD_10(reg
,
446 FRF_AZ_TX_DESCQ_EN
, 1,
447 FRF_AZ_TX_ISCSI_DDIG_EN
, 0,
448 FRF_AZ_TX_ISCSI_HDIG_EN
, 0,
449 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
450 FRF_AZ_TX_DESCQ_EVQ_ID
,
451 tx_queue
->channel
->channel
,
452 FRF_AZ_TX_DESCQ_OWNER_ID
, 0,
453 FRF_AZ_TX_DESCQ_LABEL
, tx_queue
->queue
,
454 FRF_AZ_TX_DESCQ_SIZE
,
455 __ffs(tx_queue
->txd
.entries
),
456 FRF_AZ_TX_DESCQ_TYPE
, 0,
457 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
459 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
460 int csum
= tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
;
461 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_IP_CHKSM_DIS
, !csum
);
462 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_TCP_CHKSM_DIS
,
466 efx_writeo_table(efx
, ®
, efx
->type
->txd_ptr_tbl_base
,
469 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
) {
470 /* Only 128 bits in this register */
471 BUILD_BUG_ON(EFX_MAX_TX_QUEUES
> 128);
473 efx_reado(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
474 if (tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
)
475 __clear_bit_le(tx_queue
->queue
, ®
);
477 __set_bit_le(tx_queue
->queue
, ®
);
478 efx_writeo(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
481 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
482 EFX_POPULATE_OWORD_1(reg
,
484 (tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
) ?
486 FFE_BZ_TX_PACE_RESERVED
);
487 efx_writeo_table(efx
, ®
, FR_BZ_TX_PACE_TBL
,
492 static void efx_flush_tx_queue(struct efx_tx_queue
*tx_queue
)
494 struct efx_nic
*efx
= tx_queue
->efx
;
495 efx_oword_t tx_flush_descq
;
497 WARN_ON(atomic_read(&tx_queue
->flush_outstanding
));
498 atomic_set(&tx_queue
->flush_outstanding
, 1);
500 EFX_POPULATE_OWORD_2(tx_flush_descq
,
501 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
502 FRF_AZ_TX_FLUSH_DESCQ
, tx_queue
->queue
);
503 efx_writeo(efx
, &tx_flush_descq
, FR_AZ_TX_FLUSH_DESCQ
);
506 void efx_nic_fini_tx(struct efx_tx_queue
*tx_queue
)
508 struct efx_nic
*efx
= tx_queue
->efx
;
509 efx_oword_t tx_desc_ptr
;
511 /* Remove TX descriptor ring from card */
512 EFX_ZERO_OWORD(tx_desc_ptr
);
513 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
516 /* Unpin TX descriptor ring */
517 efx_fini_special_buffer(efx
, &tx_queue
->txd
);
520 /* Free buffers backing TX queue */
521 void efx_nic_remove_tx(struct efx_tx_queue
*tx_queue
)
523 efx_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
526 /**************************************************************************
530 **************************************************************************/
532 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
533 static inline efx_qword_t
*
534 efx_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned int index
)
536 return ((efx_qword_t
*) (rx_queue
->rxd
.addr
)) + index
;
539 /* This creates an entry in the RX descriptor queue */
541 efx_build_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned index
)
543 struct efx_rx_buffer
*rx_buf
;
546 rxd
= efx_rx_desc(rx_queue
, index
);
547 rx_buf
= efx_rx_buffer(rx_queue
, index
);
548 EFX_POPULATE_QWORD_3(*rxd
,
549 FSF_AZ_RX_KER_BUF_SIZE
,
551 rx_queue
->efx
->type
->rx_buffer_padding
,
552 FSF_AZ_RX_KER_BUF_REGION
, 0,
553 FSF_AZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
556 /* This writes to the RX_DESC_WPTR register for the specified receive
559 void efx_nic_notify_rx_desc(struct efx_rx_queue
*rx_queue
)
561 struct efx_nic
*efx
= rx_queue
->efx
;
565 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
568 rx_queue
->notified_count
& rx_queue
->ptr_mask
);
569 ++rx_queue
->notified_count
;
573 write_ptr
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
574 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_RX_DESC_WPTR_DWORD
, write_ptr
);
575 efx_writed_page(efx
, ®
, FR_AZ_RX_DESC_UPD_DWORD_P0
,
576 efx_rx_queue_index(rx_queue
));
579 int efx_nic_probe_rx(struct efx_rx_queue
*rx_queue
)
581 struct efx_nic
*efx
= rx_queue
->efx
;
584 entries
= rx_queue
->ptr_mask
+ 1;
585 return efx_alloc_special_buffer(efx
, &rx_queue
->rxd
,
586 entries
* sizeof(efx_qword_t
));
589 void efx_nic_init_rx(struct efx_rx_queue
*rx_queue
)
591 efx_oword_t rx_desc_ptr
;
592 struct efx_nic
*efx
= rx_queue
->efx
;
593 bool is_b0
= efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
;
594 bool iscsi_digest_en
= is_b0
;
597 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
598 * DMA to continue after a PCIe page boundary (and scattering
599 * is not possible). In Falcon B0 and Siena, it enables
602 jumbo_en
= !is_b0
|| efx
->rx_scatter
;
604 netif_dbg(efx
, hw
, efx
->net_dev
,
605 "RX queue %d ring in special buffers %d-%d\n",
606 efx_rx_queue_index(rx_queue
), rx_queue
->rxd
.index
,
607 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
609 rx_queue
->scatter_n
= 0;
611 /* Pin RX descriptor ring */
612 efx_init_special_buffer(efx
, &rx_queue
->rxd
);
614 /* Push RX descriptor ring to card */
615 EFX_POPULATE_OWORD_10(rx_desc_ptr
,
616 FRF_AZ_RX_ISCSI_DDIG_EN
, iscsi_digest_en
,
617 FRF_AZ_RX_ISCSI_HDIG_EN
, iscsi_digest_en
,
618 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
619 FRF_AZ_RX_DESCQ_EVQ_ID
,
620 efx_rx_queue_channel(rx_queue
)->channel
,
621 FRF_AZ_RX_DESCQ_OWNER_ID
, 0,
622 FRF_AZ_RX_DESCQ_LABEL
,
623 efx_rx_queue_index(rx_queue
),
624 FRF_AZ_RX_DESCQ_SIZE
,
625 __ffs(rx_queue
->rxd
.entries
),
626 FRF_AZ_RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
627 FRF_AZ_RX_DESCQ_JUMBO
, jumbo_en
,
628 FRF_AZ_RX_DESCQ_EN
, 1);
629 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
630 efx_rx_queue_index(rx_queue
));
633 static void efx_flush_rx_queue(struct efx_rx_queue
*rx_queue
)
635 struct efx_nic
*efx
= rx_queue
->efx
;
636 efx_oword_t rx_flush_descq
;
638 EFX_POPULATE_OWORD_2(rx_flush_descq
,
639 FRF_AZ_RX_FLUSH_DESCQ_CMD
, 1,
640 FRF_AZ_RX_FLUSH_DESCQ
,
641 efx_rx_queue_index(rx_queue
));
642 efx_writeo(efx
, &rx_flush_descq
, FR_AZ_RX_FLUSH_DESCQ
);
645 void efx_nic_fini_rx(struct efx_rx_queue
*rx_queue
)
647 efx_oword_t rx_desc_ptr
;
648 struct efx_nic
*efx
= rx_queue
->efx
;
650 /* Remove RX descriptor ring from card */
651 EFX_ZERO_OWORD(rx_desc_ptr
);
652 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
653 efx_rx_queue_index(rx_queue
));
655 /* Unpin RX descriptor ring */
656 efx_fini_special_buffer(efx
, &rx_queue
->rxd
);
659 /* Free buffers backing RX queue */
660 void efx_nic_remove_rx(struct efx_rx_queue
*rx_queue
)
662 efx_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
665 /**************************************************************************
669 **************************************************************************/
671 /* efx_nic_flush_queues() must be woken up when all flushes are completed,
672 * or more RX flushes can be kicked off.
674 static bool efx_flush_wake(struct efx_nic
*efx
)
676 /* Ensure that all updates are visible to efx_nic_flush_queues() */
679 return (atomic_read(&efx
->drain_pending
) == 0 ||
680 (atomic_read(&efx
->rxq_flush_outstanding
) < EFX_RX_FLUSH_COUNT
681 && atomic_read(&efx
->rxq_flush_pending
) > 0));
684 static bool efx_check_tx_flush_complete(struct efx_nic
*efx
)
687 efx_oword_t txd_ptr_tbl
;
688 struct efx_channel
*channel
;
689 struct efx_tx_queue
*tx_queue
;
691 efx_for_each_channel(channel
, efx
) {
692 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
693 efx_reado_table(efx
, &txd_ptr_tbl
,
694 FR_BZ_TX_DESC_PTR_TBL
, tx_queue
->queue
);
695 if (EFX_OWORD_FIELD(txd_ptr_tbl
,
696 FRF_AZ_TX_DESCQ_FLUSH
) ||
697 EFX_OWORD_FIELD(txd_ptr_tbl
,
698 FRF_AZ_TX_DESCQ_EN
)) {
699 netif_dbg(efx
, hw
, efx
->net_dev
,
700 "flush did not complete on TXQ %d\n",
703 } else if (atomic_cmpxchg(&tx_queue
->flush_outstanding
,
705 /* The flush is complete, but we didn't
706 * receive a flush completion event
708 netif_dbg(efx
, hw
, efx
->net_dev
,
709 "flush complete on TXQ %d, so drain "
710 "the queue\n", tx_queue
->queue
);
711 /* Don't need to increment drain_pending as it
712 * has already been incremented for the queues
713 * which did not drain
715 efx_magic_event(channel
,
716 EFX_CHANNEL_MAGIC_TX_DRAIN(
725 /* Flush all the transmit queues, and continue flushing receive queues until
726 * they're all flushed. Wait for the DRAIN events to be recieved so that there
727 * are no more RX and TX events left on any channel. */
728 int efx_nic_flush_queues(struct efx_nic
*efx
)
730 unsigned timeout
= msecs_to_jiffies(5000); /* 5s for all flushes and drains */
731 struct efx_channel
*channel
;
732 struct efx_rx_queue
*rx_queue
;
733 struct efx_tx_queue
*tx_queue
;
736 efx
->type
->prepare_flush(efx
);
738 efx_for_each_channel(channel
, efx
) {
739 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
740 atomic_inc(&efx
->drain_pending
);
741 efx_flush_tx_queue(tx_queue
);
743 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
744 atomic_inc(&efx
->drain_pending
);
745 rx_queue
->flush_pending
= true;
746 atomic_inc(&efx
->rxq_flush_pending
);
750 while (timeout
&& atomic_read(&efx
->drain_pending
) > 0) {
751 /* If SRIOV is enabled, then offload receive queue flushing to
752 * the firmware (though we will still have to poll for
753 * completion). If that fails, fall back to the old scheme.
755 if (efx_sriov_enabled(efx
)) {
756 rc
= efx_mcdi_flush_rxqs(efx
);
761 /* The hardware supports four concurrent rx flushes, each of
762 * which may need to be retried if there is an outstanding
765 efx_for_each_channel(channel
, efx
) {
766 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
767 if (atomic_read(&efx
->rxq_flush_outstanding
) >=
771 if (rx_queue
->flush_pending
) {
772 rx_queue
->flush_pending
= false;
773 atomic_dec(&efx
->rxq_flush_pending
);
774 atomic_inc(&efx
->rxq_flush_outstanding
);
775 efx_flush_rx_queue(rx_queue
);
781 timeout
= wait_event_timeout(efx
->flush_wq
, efx_flush_wake(efx
),
785 if (atomic_read(&efx
->drain_pending
) &&
786 !efx_check_tx_flush_complete(efx
)) {
787 netif_err(efx
, hw
, efx
->net_dev
, "failed to flush %d queues "
788 "(rx %d+%d)\n", atomic_read(&efx
->drain_pending
),
789 atomic_read(&efx
->rxq_flush_outstanding
),
790 atomic_read(&efx
->rxq_flush_pending
));
793 atomic_set(&efx
->drain_pending
, 0);
794 atomic_set(&efx
->rxq_flush_pending
, 0);
795 atomic_set(&efx
->rxq_flush_outstanding
, 0);
798 efx
->type
->finish_flush(efx
);
803 /**************************************************************************
805 * Event queue processing
806 * Event queues are processed by per-channel tasklets.
808 **************************************************************************/
810 /* Update a channel's event queue's read pointer (RPTR) register
812 * This writes the EVQ_RPTR_REG register for the specified channel's
815 void efx_nic_eventq_read_ack(struct efx_channel
*channel
)
818 struct efx_nic
*efx
= channel
->efx
;
820 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_EVQ_RPTR
,
821 channel
->eventq_read_ptr
& channel
->eventq_mask
);
823 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
824 * of 4 bytes, but it is really 16 bytes just like later revisions.
826 efx_writed(efx
, ®
,
827 efx
->type
->evq_rptr_tbl_base
+
828 FR_BZ_EVQ_RPTR_STEP
* channel
->channel
);
831 /* Use HW to insert a SW defined event */
832 void efx_generate_event(struct efx_nic
*efx
, unsigned int evq
,
835 efx_oword_t drv_ev_reg
;
837 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN
!= 0 ||
838 FRF_AZ_DRV_EV_DATA_WIDTH
!= 64);
839 drv_ev_reg
.u32
[0] = event
->u32
[0];
840 drv_ev_reg
.u32
[1] = event
->u32
[1];
841 drv_ev_reg
.u32
[2] = 0;
842 drv_ev_reg
.u32
[3] = 0;
843 EFX_SET_OWORD_FIELD(drv_ev_reg
, FRF_AZ_DRV_EV_QID
, evq
);
844 efx_writeo(efx
, &drv_ev_reg
, FR_AZ_DRV_EV
);
847 static void efx_magic_event(struct efx_channel
*channel
, u32 magic
)
851 EFX_POPULATE_QWORD_2(event
, FSF_AZ_EV_CODE
,
852 FSE_AZ_EV_CODE_DRV_GEN_EV
,
853 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
854 efx_generate_event(channel
->efx
, channel
->channel
, &event
);
857 /* Handle a transmit completion event
859 * The NIC batches TX completion events; the message we receive is of
860 * the form "complete all TX events up to this index".
863 efx_handle_tx_event(struct efx_channel
*channel
, efx_qword_t
*event
)
865 unsigned int tx_ev_desc_ptr
;
866 unsigned int tx_ev_q_label
;
867 struct efx_tx_queue
*tx_queue
;
868 struct efx_nic
*efx
= channel
->efx
;
871 if (unlikely(ACCESS_ONCE(efx
->reset_pending
)))
874 if (likely(EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_COMP
))) {
875 /* Transmit completion */
876 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_DESC_PTR
);
877 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
878 tx_queue
= efx_channel_get_tx_queue(
879 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
880 tx_packets
= ((tx_ev_desc_ptr
- tx_queue
->read_count
) &
882 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
);
883 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_WQ_FF_FULL
)) {
884 /* Rewrite the FIFO write pointer */
885 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
886 tx_queue
= efx_channel_get_tx_queue(
887 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
889 netif_tx_lock(efx
->net_dev
);
890 efx_notify_tx_desc(tx_queue
);
891 netif_tx_unlock(efx
->net_dev
);
892 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_PKT_ERR
) &&
893 EFX_WORKAROUND_10727(efx
)) {
894 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
896 netif_err(efx
, tx_err
, efx
->net_dev
,
897 "channel %d unexpected TX event "
898 EFX_QWORD_FMT
"\n", channel
->channel
,
899 EFX_QWORD_VAL(*event
));
905 /* Detect errors included in the rx_evt_pkt_ok bit. */
906 static u16
efx_handle_rx_not_ok(struct efx_rx_queue
*rx_queue
,
907 const efx_qword_t
*event
)
909 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
910 struct efx_nic
*efx
= rx_queue
->efx
;
911 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
912 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
913 bool rx_ev_frm_trunc
, rx_ev_drib_nib
, rx_ev_tobe_disc
;
914 bool rx_ev_other_err
, rx_ev_pause_frm
;
915 bool rx_ev_hdr_type
, rx_ev_mcast_pkt
;
916 unsigned rx_ev_pkt_type
;
918 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
919 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
920 rx_ev_tobe_disc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_TOBE_DISC
);
921 rx_ev_pkt_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_TYPE
);
922 rx_ev_buf_owner_id_err
= EFX_QWORD_FIELD(*event
,
923 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR
);
924 rx_ev_ip_hdr_chksum_err
= EFX_QWORD_FIELD(*event
,
925 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR
);
926 rx_ev_tcp_udp_chksum_err
= EFX_QWORD_FIELD(*event
,
927 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR
);
928 rx_ev_eth_crc_err
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_ETH_CRC_ERR
);
929 rx_ev_frm_trunc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_FRM_TRUNC
);
930 rx_ev_drib_nib
= ((efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) ?
931 0 : EFX_QWORD_FIELD(*event
, FSF_AA_RX_EV_DRIB_NIB
));
932 rx_ev_pause_frm
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PAUSE_FRM_ERR
);
934 /* Every error apart from tobe_disc and pause_frm */
935 rx_ev_other_err
= (rx_ev_drib_nib
| rx_ev_tcp_udp_chksum_err
|
936 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
937 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
939 /* Count errors that are not in MAC stats. Ignore expected
940 * checksum errors during self-test. */
942 ++channel
->n_rx_frm_trunc
;
943 else if (rx_ev_tobe_disc
)
944 ++channel
->n_rx_tobe_disc
;
945 else if (!efx
->loopback_selftest
) {
946 if (rx_ev_ip_hdr_chksum_err
)
947 ++channel
->n_rx_ip_hdr_chksum_err
;
948 else if (rx_ev_tcp_udp_chksum_err
)
949 ++channel
->n_rx_tcp_udp_chksum_err
;
952 /* TOBE_DISC is expected on unicast mismatches; don't print out an
953 * error message. FRM_TRUNC indicates RXDP dropped the packet due
954 * to a FIFO overflow.
957 if (rx_ev_other_err
&& net_ratelimit()) {
958 netif_dbg(efx
, rx_err
, efx
->net_dev
,
959 " RX queue %d unexpected RX event "
960 EFX_QWORD_FMT
"%s%s%s%s%s%s%s%s\n",
961 efx_rx_queue_index(rx_queue
), EFX_QWORD_VAL(*event
),
962 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
963 rx_ev_ip_hdr_chksum_err
?
964 " [IP_HDR_CHKSUM_ERR]" : "",
965 rx_ev_tcp_udp_chksum_err
?
966 " [TCP_UDP_CHKSUM_ERR]" : "",
967 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
968 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
969 rx_ev_drib_nib
? " [DRIB_NIB]" : "",
970 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
971 rx_ev_pause_frm
? " [PAUSE]" : "");
975 /* The frame must be discarded if any of these are true. */
976 return (rx_ev_eth_crc_err
| rx_ev_frm_trunc
| rx_ev_drib_nib
|
977 rx_ev_tobe_disc
| rx_ev_pause_frm
) ?
978 EFX_RX_PKT_DISCARD
: 0;
981 /* Handle receive events that are not in-order. Return true if this
982 * can be handled as a partial packet discard, false if it's more
986 efx_handle_rx_bad_index(struct efx_rx_queue
*rx_queue
, unsigned index
)
988 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
989 struct efx_nic
*efx
= rx_queue
->efx
;
990 unsigned expected
, dropped
;
992 if (rx_queue
->scatter_n
&&
993 index
== ((rx_queue
->removed_count
+ rx_queue
->scatter_n
- 1) &
994 rx_queue
->ptr_mask
)) {
995 ++channel
->n_rx_nodesc_trunc
;
999 expected
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
1000 dropped
= (index
- expected
) & rx_queue
->ptr_mask
;
1001 netif_info(efx
, rx_err
, efx
->net_dev
,
1002 "dropped %d events (index=%d expected=%d)\n",
1003 dropped
, index
, expected
);
1005 efx_schedule_reset(efx
, EFX_WORKAROUND_5676(efx
) ?
1006 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
1010 /* Handle a packet received event
1012 * The NIC gives a "discard" flag if it's a unicast packet with the
1013 * wrong destination address
1014 * Also "is multicast" and "matches multicast filter" flags can be used to
1015 * discard non-matching multicast packets.
1018 efx_handle_rx_event(struct efx_channel
*channel
, const efx_qword_t
*event
)
1020 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
1021 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
1022 unsigned expected_ptr
;
1023 bool rx_ev_pkt_ok
, rx_ev_sop
, rx_ev_cont
;
1025 struct efx_rx_queue
*rx_queue
;
1026 struct efx_nic
*efx
= channel
->efx
;
1028 if (unlikely(ACCESS_ONCE(efx
->reset_pending
)))
1031 rx_ev_cont
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_JUMBO_CONT
);
1032 rx_ev_sop
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_SOP
);
1033 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_Q_LABEL
) !=
1036 rx_queue
= efx_channel_get_rx_queue(channel
);
1038 rx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_DESC_PTR
);
1039 expected_ptr
= ((rx_queue
->removed_count
+ rx_queue
->scatter_n
) &
1040 rx_queue
->ptr_mask
);
1042 /* Check for partial drops and other errors */
1043 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
) ||
1044 unlikely(rx_ev_sop
!= (rx_queue
->scatter_n
== 0))) {
1045 if (rx_ev_desc_ptr
!= expected_ptr
&&
1046 !efx_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
))
1049 /* Discard all pending fragments */
1050 if (rx_queue
->scatter_n
) {
1053 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1054 rx_queue
->scatter_n
, 0, EFX_RX_PKT_DISCARD
);
1055 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1056 rx_queue
->scatter_n
= 0;
1059 /* Return if there is no new fragment */
1060 if (rx_ev_desc_ptr
!= expected_ptr
)
1063 /* Discard new fragment if not SOP */
1067 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1068 1, 0, EFX_RX_PKT_DISCARD
);
1069 ++rx_queue
->removed_count
;
1074 ++rx_queue
->scatter_n
;
1078 rx_ev_byte_cnt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_BYTE_CNT
);
1079 rx_ev_pkt_ok
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_OK
);
1080 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
1082 if (likely(rx_ev_pkt_ok
)) {
1083 /* If packet is marked as OK and packet type is TCP/IP or
1084 * UDP/IP, then we can rely on the hardware checksum.
1086 flags
= (rx_ev_hdr_type
== FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP
||
1087 rx_ev_hdr_type
== FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP
) ?
1088 EFX_RX_PKT_CSUMMED
: 0;
1090 flags
= efx_handle_rx_not_ok(rx_queue
, event
);
1093 /* Detect multicast packets that didn't match the filter */
1094 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
1095 if (rx_ev_mcast_pkt
) {
1096 unsigned int rx_ev_mcast_hash_match
=
1097 EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_HASH_MATCH
);
1099 if (unlikely(!rx_ev_mcast_hash_match
)) {
1100 ++channel
->n_rx_mcast_mismatch
;
1101 flags
|= EFX_RX_PKT_DISCARD
;
1105 channel
->irq_mod_score
+= 2;
1107 /* Handle received packet */
1108 efx_rx_packet(rx_queue
,
1109 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1110 rx_queue
->scatter_n
, rx_ev_byte_cnt
, flags
);
1111 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1112 rx_queue
->scatter_n
= 0;
1115 /* If this flush done event corresponds to a &struct efx_tx_queue, then
1116 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1117 * of all transmit completions.
1120 efx_handle_tx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1122 struct efx_tx_queue
*tx_queue
;
1125 qid
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1126 if (qid
< EFX_TXQ_TYPES
* efx
->n_tx_channels
) {
1127 tx_queue
= efx_get_tx_queue(efx
, qid
/ EFX_TXQ_TYPES
,
1128 qid
% EFX_TXQ_TYPES
);
1129 if (atomic_cmpxchg(&tx_queue
->flush_outstanding
, 1, 0)) {
1130 efx_magic_event(tx_queue
->channel
,
1131 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue
));
1136 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1137 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1138 * the RX queue back to the mask of RX queues in need of flushing.
1141 efx_handle_rx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1143 struct efx_channel
*channel
;
1144 struct efx_rx_queue
*rx_queue
;
1148 qid
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1149 failed
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1150 if (qid
>= efx
->n_channels
)
1152 channel
= efx_get_channel(efx
, qid
);
1153 if (!efx_channel_has_rx_queue(channel
))
1155 rx_queue
= efx_channel_get_rx_queue(channel
);
1158 netif_info(efx
, hw
, efx
->net_dev
,
1159 "RXQ %d flush retry\n", qid
);
1160 rx_queue
->flush_pending
= true;
1161 atomic_inc(&efx
->rxq_flush_pending
);
1163 efx_magic_event(efx_rx_queue_channel(rx_queue
),
1164 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue
));
1166 atomic_dec(&efx
->rxq_flush_outstanding
);
1167 if (efx_flush_wake(efx
))
1168 wake_up(&efx
->flush_wq
);
1172 efx_handle_drain_event(struct efx_channel
*channel
)
1174 struct efx_nic
*efx
= channel
->efx
;
1176 WARN_ON(atomic_read(&efx
->drain_pending
) == 0);
1177 atomic_dec(&efx
->drain_pending
);
1178 if (efx_flush_wake(efx
))
1179 wake_up(&efx
->flush_wq
);
1183 efx_handle_generated_event(struct efx_channel
*channel
, efx_qword_t
*event
)
1185 struct efx_nic
*efx
= channel
->efx
;
1186 struct efx_rx_queue
*rx_queue
=
1187 efx_channel_has_rx_queue(channel
) ?
1188 efx_channel_get_rx_queue(channel
) : NULL
;
1189 unsigned magic
, code
;
1191 magic
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRV_GEN_EV_MAGIC
);
1192 code
= _EFX_CHANNEL_MAGIC_CODE(magic
);
1194 if (magic
== EFX_CHANNEL_MAGIC_TEST(channel
)) {
1195 channel
->event_test_cpu
= raw_smp_processor_id();
1196 } else if (rx_queue
&& magic
== EFX_CHANNEL_MAGIC_FILL(rx_queue
)) {
1197 /* The queue must be empty, so we won't receive any rx
1198 * events, so efx_process_channel() won't refill the
1199 * queue. Refill it here */
1200 efx_fast_push_rx_descriptors(rx_queue
);
1201 } else if (rx_queue
&& magic
== EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue
)) {
1202 rx_queue
->enabled
= false;
1203 efx_handle_drain_event(channel
);
1204 } else if (code
== _EFX_CHANNEL_MAGIC_TX_DRAIN
) {
1205 efx_handle_drain_event(channel
);
1207 netif_dbg(efx
, hw
, efx
->net_dev
, "channel %d received "
1208 "generated event "EFX_QWORD_FMT
"\n",
1209 channel
->channel
, EFX_QWORD_VAL(*event
));
1214 efx_handle_driver_event(struct efx_channel
*channel
, efx_qword_t
*event
)
1216 struct efx_nic
*efx
= channel
->efx
;
1217 unsigned int ev_sub_code
;
1218 unsigned int ev_sub_data
;
1220 ev_sub_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBCODE
);
1221 ev_sub_data
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1223 switch (ev_sub_code
) {
1224 case FSE_AZ_TX_DESCQ_FLS_DONE_EV
:
1225 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d TXQ %d flushed\n",
1226 channel
->channel
, ev_sub_data
);
1227 efx_handle_tx_flush_done(efx
, event
);
1228 efx_sriov_tx_flush_done(efx
, event
);
1230 case FSE_AZ_RX_DESCQ_FLS_DONE_EV
:
1231 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d RXQ %d flushed\n",
1232 channel
->channel
, ev_sub_data
);
1233 efx_handle_rx_flush_done(efx
, event
);
1234 efx_sriov_rx_flush_done(efx
, event
);
1236 case FSE_AZ_EVQ_INIT_DONE_EV
:
1237 netif_dbg(efx
, hw
, efx
->net_dev
,
1238 "channel %d EVQ %d initialised\n",
1239 channel
->channel
, ev_sub_data
);
1241 case FSE_AZ_SRM_UPD_DONE_EV
:
1242 netif_vdbg(efx
, hw
, efx
->net_dev
,
1243 "channel %d SRAM update done\n", channel
->channel
);
1245 case FSE_AZ_WAKE_UP_EV
:
1246 netif_vdbg(efx
, hw
, efx
->net_dev
,
1247 "channel %d RXQ %d wakeup event\n",
1248 channel
->channel
, ev_sub_data
);
1250 case FSE_AZ_TIMER_EV
:
1251 netif_vdbg(efx
, hw
, efx
->net_dev
,
1252 "channel %d RX queue %d timer expired\n",
1253 channel
->channel
, ev_sub_data
);
1255 case FSE_AA_RX_RECOVER_EV
:
1256 netif_err(efx
, rx_err
, efx
->net_dev
,
1257 "channel %d seen DRIVER RX_RESET event. "
1258 "Resetting.\n", channel
->channel
);
1259 atomic_inc(&efx
->rx_reset
);
1260 efx_schedule_reset(efx
,
1261 EFX_WORKAROUND_6555(efx
) ?
1262 RESET_TYPE_RX_RECOVERY
:
1263 RESET_TYPE_DISABLE
);
1265 case FSE_BZ_RX_DSC_ERROR_EV
:
1266 if (ev_sub_data
< EFX_VI_BASE
) {
1267 netif_err(efx
, rx_err
, efx
->net_dev
,
1268 "RX DMA Q %d reports descriptor fetch error."
1269 " RX Q %d is disabled.\n", ev_sub_data
,
1271 efx_schedule_reset(efx
, RESET_TYPE_RX_DESC_FETCH
);
1273 efx_sriov_desc_fetch_err(efx
, ev_sub_data
);
1275 case FSE_BZ_TX_DSC_ERROR_EV
:
1276 if (ev_sub_data
< EFX_VI_BASE
) {
1277 netif_err(efx
, tx_err
, efx
->net_dev
,
1278 "TX DMA Q %d reports descriptor fetch error."
1279 " TX Q %d is disabled.\n", ev_sub_data
,
1281 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
1283 efx_sriov_desc_fetch_err(efx
, ev_sub_data
);
1286 netif_vdbg(efx
, hw
, efx
->net_dev
,
1287 "channel %d unknown driver event code %d "
1288 "data %04x\n", channel
->channel
, ev_sub_code
,
1294 int efx_nic_process_eventq(struct efx_channel
*channel
, int budget
)
1296 struct efx_nic
*efx
= channel
->efx
;
1297 unsigned int read_ptr
;
1298 efx_qword_t event
, *p_event
;
1303 read_ptr
= channel
->eventq_read_ptr
;
1306 p_event
= efx_event(channel
, read_ptr
);
1309 if (!efx_event_present(&event
))
1313 netif_vdbg(channel
->efx
, intr
, channel
->efx
->net_dev
,
1314 "channel %d event is "EFX_QWORD_FMT
"\n",
1315 channel
->channel
, EFX_QWORD_VAL(event
));
1317 /* Clear this event by marking it all ones */
1318 EFX_SET_QWORD(*p_event
);
1322 ev_code
= EFX_QWORD_FIELD(event
, FSF_AZ_EV_CODE
);
1325 case FSE_AZ_EV_CODE_RX_EV
:
1326 efx_handle_rx_event(channel
, &event
);
1327 if (++spent
== budget
)
1330 case FSE_AZ_EV_CODE_TX_EV
:
1331 tx_packets
+= efx_handle_tx_event(channel
, &event
);
1332 if (tx_packets
> efx
->txq_entries
) {
1337 case FSE_AZ_EV_CODE_DRV_GEN_EV
:
1338 efx_handle_generated_event(channel
, &event
);
1340 case FSE_AZ_EV_CODE_DRIVER_EV
:
1341 efx_handle_driver_event(channel
, &event
);
1343 case FSE_CZ_EV_CODE_USER_EV
:
1344 efx_sriov_event(channel
, &event
);
1346 case FSE_CZ_EV_CODE_MCDI_EV
:
1347 efx_mcdi_process_event(channel
, &event
);
1349 case FSE_AZ_EV_CODE_GLOBAL_EV
:
1350 if (efx
->type
->handle_global_event
&&
1351 efx
->type
->handle_global_event(channel
, &event
))
1353 /* else fall through */
1355 netif_err(channel
->efx
, hw
, channel
->efx
->net_dev
,
1356 "channel %d unknown event type %d (data "
1357 EFX_QWORD_FMT
")\n", channel
->channel
,
1358 ev_code
, EFX_QWORD_VAL(event
));
1363 channel
->eventq_read_ptr
= read_ptr
;
1367 /* Check whether an event is present in the eventq at the current
1368 * read pointer. Only useful for self-test.
1370 bool efx_nic_event_present(struct efx_channel
*channel
)
1372 return efx_event_present(efx_event(channel
, channel
->eventq_read_ptr
));
1375 /* Allocate buffer table entries for event queue */
1376 int efx_nic_probe_eventq(struct efx_channel
*channel
)
1378 struct efx_nic
*efx
= channel
->efx
;
1381 entries
= channel
->eventq_mask
+ 1;
1382 return efx_alloc_special_buffer(efx
, &channel
->eventq
,
1383 entries
* sizeof(efx_qword_t
));
1386 void efx_nic_init_eventq(struct efx_channel
*channel
)
1389 struct efx_nic
*efx
= channel
->efx
;
1391 netif_dbg(efx
, hw
, efx
->net_dev
,
1392 "channel %d event queue in special buffers %d-%d\n",
1393 channel
->channel
, channel
->eventq
.index
,
1394 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1396 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
) {
1397 EFX_POPULATE_OWORD_3(reg
,
1398 FRF_CZ_TIMER_Q_EN
, 1,
1399 FRF_CZ_HOST_NOTIFY_MODE
, 0,
1400 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
1401 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1404 /* Pin event queue buffer */
1405 efx_init_special_buffer(efx
, &channel
->eventq
);
1407 /* Fill event queue with all ones (i.e. empty events) */
1408 memset(channel
->eventq
.addr
, 0xff, channel
->eventq
.len
);
1410 /* Push event queue to card */
1411 EFX_POPULATE_OWORD_3(reg
,
1413 FRF_AZ_EVQ_SIZE
, __ffs(channel
->eventq
.entries
),
1414 FRF_AZ_EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1415 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1418 efx
->type
->push_irq_moderation(channel
);
1421 void efx_nic_fini_eventq(struct efx_channel
*channel
)
1424 struct efx_nic
*efx
= channel
->efx
;
1426 /* Remove event queue from card */
1427 EFX_ZERO_OWORD(reg
);
1428 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1430 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1431 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1433 /* Unpin event queue */
1434 efx_fini_special_buffer(efx
, &channel
->eventq
);
1437 /* Free buffers backing event queue */
1438 void efx_nic_remove_eventq(struct efx_channel
*channel
)
1440 efx_free_special_buffer(channel
->efx
, &channel
->eventq
);
1444 void efx_nic_event_test_start(struct efx_channel
*channel
)
1446 channel
->event_test_cpu
= -1;
1448 efx_magic_event(channel
, EFX_CHANNEL_MAGIC_TEST(channel
));
1451 void efx_nic_generate_fill_event(struct efx_rx_queue
*rx_queue
)
1453 efx_magic_event(efx_rx_queue_channel(rx_queue
),
1454 EFX_CHANNEL_MAGIC_FILL(rx_queue
));
1457 /**************************************************************************
1459 * Hardware interrupts
1460 * The hardware interrupt handler does very little work; all the event
1461 * queue processing is carried out by per-channel tasklets.
1463 **************************************************************************/
1465 /* Enable/disable/generate interrupts */
1466 static inline void efx_nic_interrupts(struct efx_nic
*efx
,
1467 bool enabled
, bool force
)
1469 efx_oword_t int_en_reg_ker
;
1471 EFX_POPULATE_OWORD_3(int_en_reg_ker
,
1472 FRF_AZ_KER_INT_LEVE_SEL
, efx
->irq_level
,
1473 FRF_AZ_KER_INT_KER
, force
,
1474 FRF_AZ_DRV_INT_EN_KER
, enabled
);
1475 efx_writeo(efx
, &int_en_reg_ker
, FR_AZ_INT_EN_KER
);
1478 void efx_nic_enable_interrupts(struct efx_nic
*efx
)
1480 EFX_ZERO_OWORD(*((efx_oword_t
*) efx
->irq_status
.addr
));
1481 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1483 efx_nic_interrupts(efx
, true, false);
1486 void efx_nic_disable_interrupts(struct efx_nic
*efx
)
1488 /* Disable interrupts */
1489 efx_nic_interrupts(efx
, false, false);
1492 /* Generate a test interrupt
1493 * Interrupt must already have been enabled, otherwise nasty things
1496 void efx_nic_irq_test_start(struct efx_nic
*efx
)
1498 efx
->last_irq_cpu
= -1;
1500 efx_nic_interrupts(efx
, true, true);
1503 /* Process a fatal interrupt
1504 * Disable bus mastering ASAP and schedule a reset
1506 irqreturn_t
efx_nic_fatal_interrupt(struct efx_nic
*efx
)
1508 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1509 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1510 efx_oword_t fatal_intr
;
1511 int error
, mem_perr
;
1513 efx_reado(efx
, &fatal_intr
, FR_AZ_FATAL_INTR_KER
);
1514 error
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_FATAL_INTR
);
1516 netif_err(efx
, hw
, efx
->net_dev
, "SYSTEM ERROR "EFX_OWORD_FMT
" status "
1517 EFX_OWORD_FMT
": %s\n", EFX_OWORD_VAL(*int_ker
),
1518 EFX_OWORD_VAL(fatal_intr
),
1519 error
? "disabling bus mastering" : "no recognised error");
1521 /* If this is a memory parity error dump which blocks are offending */
1522 mem_perr
= (EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_MEM_PERR_INT_KER
) ||
1523 EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_SRM_PERR_INT_KER
));
1526 efx_reado(efx
, ®
, FR_AZ_MEM_STAT
);
1527 netif_err(efx
, hw
, efx
->net_dev
,
1528 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT
"\n",
1529 EFX_OWORD_VAL(reg
));
1532 /* Disable both devices */
1533 pci_clear_master(efx
->pci_dev
);
1534 if (efx_nic_is_dual_func(efx
))
1535 pci_clear_master(nic_data
->pci_dev2
);
1536 efx_nic_disable_interrupts(efx
);
1538 /* Count errors and reset or disable the NIC accordingly */
1539 if (efx
->int_error_count
== 0 ||
1540 time_after(jiffies
, efx
->int_error_expire
)) {
1541 efx
->int_error_count
= 0;
1542 efx
->int_error_expire
=
1543 jiffies
+ EFX_INT_ERROR_EXPIRE
* HZ
;
1545 if (++efx
->int_error_count
< EFX_MAX_INT_ERRORS
) {
1546 netif_err(efx
, hw
, efx
->net_dev
,
1547 "SYSTEM ERROR - reset scheduled\n");
1548 efx_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1550 netif_err(efx
, hw
, efx
->net_dev
,
1551 "SYSTEM ERROR - max number of errors seen."
1552 "NIC will be disabled\n");
1553 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1559 /* Handle a legacy interrupt
1560 * Acknowledges the interrupt and schedule event queue processing.
1562 static irqreturn_t
efx_legacy_interrupt(int irq
, void *dev_id
)
1564 struct efx_nic
*efx
= dev_id
;
1565 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1566 irqreturn_t result
= IRQ_NONE
;
1567 struct efx_channel
*channel
;
1572 /* Could this be ours? If interrupts are disabled then the
1573 * channel state may not be valid.
1575 if (!efx
->legacy_irq_enabled
)
1578 /* Read the ISR which also ACKs the interrupts */
1579 efx_readd(efx
, ®
, FR_BZ_INT_ISR0
);
1580 queues
= EFX_EXTRACT_DWORD(reg
, 0, 31);
1582 /* Handle non-event-queue sources */
1583 if (queues
& (1U << efx
->irq_level
)) {
1584 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1585 if (unlikely(syserr
))
1586 return efx_nic_fatal_interrupt(efx
);
1587 efx
->last_irq_cpu
= raw_smp_processor_id();
1591 if (EFX_WORKAROUND_15783(efx
))
1592 efx
->irq_zero_count
= 0;
1594 /* Schedule processing of any interrupting queues */
1595 efx_for_each_channel(channel
, efx
) {
1597 efx_schedule_channel_irq(channel
);
1600 result
= IRQ_HANDLED
;
1602 } else if (EFX_WORKAROUND_15783(efx
)) {
1605 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1606 * because this might be a shared interrupt. */
1607 if (efx
->irq_zero_count
++ == 0)
1608 result
= IRQ_HANDLED
;
1610 /* Ensure we schedule or rearm all event queues */
1611 efx_for_each_channel(channel
, efx
) {
1612 event
= efx_event(channel
, channel
->eventq_read_ptr
);
1613 if (efx_event_present(event
))
1614 efx_schedule_channel_irq(channel
);
1616 efx_nic_eventq_read_ack(channel
);
1620 if (result
== IRQ_HANDLED
)
1621 netif_vdbg(efx
, intr
, efx
->net_dev
,
1622 "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1623 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1628 /* Handle an MSI interrupt
1630 * Handle an MSI hardware interrupt. This routine schedules event
1631 * queue processing. No interrupt acknowledgement cycle is necessary.
1632 * Also, we never need to check that the interrupt is for us, since
1633 * MSI interrupts cannot be shared.
1635 static irqreturn_t
efx_msi_interrupt(int irq
, void *dev_id
)
1637 struct efx_channel
*channel
= *(struct efx_channel
**)dev_id
;
1638 struct efx_nic
*efx
= channel
->efx
;
1639 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1642 netif_vdbg(efx
, intr
, efx
->net_dev
,
1643 "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1644 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1646 /* Handle non-event-queue sources */
1647 if (channel
->channel
== efx
->irq_level
) {
1648 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1649 if (unlikely(syserr
))
1650 return efx_nic_fatal_interrupt(efx
);
1651 efx
->last_irq_cpu
= raw_smp_processor_id();
1654 /* Schedule processing of the channel */
1655 efx_schedule_channel_irq(channel
);
1661 /* Setup RSS indirection table.
1662 * This maps from the hash value of the packet to RXQ
1664 void efx_nic_push_rx_indir_table(struct efx_nic
*efx
)
1669 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
)
1672 BUILD_BUG_ON(ARRAY_SIZE(efx
->rx_indir_table
) !=
1673 FR_BZ_RX_INDIRECTION_TBL_ROWS
);
1675 for (i
= 0; i
< FR_BZ_RX_INDIRECTION_TBL_ROWS
; i
++) {
1676 EFX_POPULATE_DWORD_1(dword
, FRF_BZ_IT_QUEUE
,
1677 efx
->rx_indir_table
[i
]);
1678 efx_writed(efx
, &dword
,
1679 FR_BZ_RX_INDIRECTION_TBL
+
1680 FR_BZ_RX_INDIRECTION_TBL_STEP
* i
);
1684 /* Hook interrupt handler(s)
1685 * Try MSI and then legacy interrupts.
1687 int efx_nic_init_interrupt(struct efx_nic
*efx
)
1689 struct efx_channel
*channel
;
1692 if (!EFX_INT_MODE_USE_MSI(efx
)) {
1693 irq_handler_t handler
;
1694 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1695 handler
= efx_legacy_interrupt
;
1697 handler
= falcon_legacy_interrupt_a1
;
1699 rc
= request_irq(efx
->legacy_irq
, handler
, IRQF_SHARED
,
1702 netif_err(efx
, drv
, efx
->net_dev
,
1703 "failed to hook legacy IRQ %d\n",
1710 /* Hook MSI or MSI-X interrupt */
1711 efx_for_each_channel(channel
, efx
) {
1712 rc
= request_irq(channel
->irq
, efx_msi_interrupt
,
1713 IRQF_PROBE_SHARED
, /* Not shared */
1714 efx
->channel_name
[channel
->channel
],
1715 &efx
->channel
[channel
->channel
]);
1717 netif_err(efx
, drv
, efx
->net_dev
,
1718 "failed to hook IRQ %d\n", channel
->irq
);
1726 efx_for_each_channel(channel
, efx
)
1727 free_irq(channel
->irq
, &efx
->channel
[channel
->channel
]);
1732 void efx_nic_fini_interrupt(struct efx_nic
*efx
)
1734 struct efx_channel
*channel
;
1737 /* Disable MSI/MSI-X interrupts */
1738 efx_for_each_channel(channel
, efx
) {
1740 free_irq(channel
->irq
, &efx
->channel
[channel
->channel
]);
1743 /* ACK legacy interrupt */
1744 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1745 efx_reado(efx
, ®
, FR_BZ_INT_ISR0
);
1747 falcon_irq_ack_a1(efx
);
1749 /* Disable legacy interrupt */
1750 if (efx
->legacy_irq
)
1751 free_irq(efx
->legacy_irq
, efx
);
1754 /* Looks at available SRAM resources and works out how many queues we
1755 * can support, and where things like descriptor caches should live.
1757 * SRAM is split up as follows:
1758 * 0 buftbl entries for channels
1759 * efx->vf_buftbl_base buftbl entries for SR-IOV
1760 * efx->rx_dc_base RX descriptor caches
1761 * efx->tx_dc_base TX descriptor caches
1763 void efx_nic_dimension_resources(struct efx_nic
*efx
, unsigned sram_lim_qw
)
1765 unsigned vi_count
, buftbl_min
;
1767 /* Account for the buffer table entries backing the datapath channels
1768 * and the descriptor caches for those channels.
1770 buftbl_min
= ((efx
->n_rx_channels
* EFX_MAX_DMAQ_SIZE
+
1771 efx
->n_tx_channels
* EFX_TXQ_TYPES
* EFX_MAX_DMAQ_SIZE
+
1772 efx
->n_channels
* EFX_MAX_EVQ_SIZE
)
1773 * sizeof(efx_qword_t
) / EFX_BUF_SIZE
);
1774 vi_count
= max(efx
->n_channels
, efx
->n_tx_channels
* EFX_TXQ_TYPES
);
1776 #ifdef CONFIG_SFC_SRIOV
1777 if (efx_sriov_wanted(efx
)) {
1778 unsigned vi_dc_entries
, buftbl_free
, entries_per_vf
, vf_limit
;
1780 efx
->vf_buftbl_base
= buftbl_min
;
1782 vi_dc_entries
= RX_DC_ENTRIES
+ TX_DC_ENTRIES
;
1783 vi_count
= max(vi_count
, EFX_VI_BASE
);
1784 buftbl_free
= (sram_lim_qw
- buftbl_min
-
1785 vi_count
* vi_dc_entries
);
1787 entries_per_vf
= ((vi_dc_entries
+ EFX_VF_BUFTBL_PER_VI
) *
1789 vf_limit
= min(buftbl_free
/ entries_per_vf
,
1790 (1024U - EFX_VI_BASE
) >> efx
->vi_scale
);
1792 if (efx
->vf_count
> vf_limit
) {
1793 netif_err(efx
, probe
, efx
->net_dev
,
1794 "Reducing VF count from from %d to %d\n",
1795 efx
->vf_count
, vf_limit
);
1796 efx
->vf_count
= vf_limit
;
1798 vi_count
+= efx
->vf_count
* efx_vf_size(efx
);
1802 efx
->tx_dc_base
= sram_lim_qw
- vi_count
* TX_DC_ENTRIES
;
1803 efx
->rx_dc_base
= efx
->tx_dc_base
- vi_count
* RX_DC_ENTRIES
;
1806 u32
efx_nic_fpga_ver(struct efx_nic
*efx
)
1808 efx_oword_t altera_build
;
1809 efx_reado(efx
, &altera_build
, FR_AZ_ALTERA_BUILD
);
1810 return EFX_OWORD_FIELD(altera_build
, FRF_AZ_ALTERA_BUILD_VER
);
1813 void efx_nic_init_common(struct efx_nic
*efx
)
1817 /* Set positions of descriptor caches in SRAM. */
1818 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_TX_DC_BASE_ADR
, efx
->tx_dc_base
);
1819 efx_writeo(efx
, &temp
, FR_AZ_SRM_TX_DC_CFG
);
1820 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_RX_DC_BASE_ADR
, efx
->rx_dc_base
);
1821 efx_writeo(efx
, &temp
, FR_AZ_SRM_RX_DC_CFG
);
1823 /* Set TX descriptor cache size. */
1824 BUILD_BUG_ON(TX_DC_ENTRIES
!= (8 << TX_DC_ENTRIES_ORDER
));
1825 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
1826 efx_writeo(efx
, &temp
, FR_AZ_TX_DC_CFG
);
1828 /* Set RX descriptor cache size. Set low watermark to size-8, as
1829 * this allows most efficient prefetching.
1831 BUILD_BUG_ON(RX_DC_ENTRIES
!= (8 << RX_DC_ENTRIES_ORDER
));
1832 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
1833 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_CFG
);
1834 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
1835 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_PF_WM
);
1837 /* Program INT_KER address */
1838 EFX_POPULATE_OWORD_2(temp
,
1839 FRF_AZ_NORM_INT_VEC_DIS_KER
,
1840 EFX_INT_MODE_USE_MSI(efx
),
1841 FRF_AZ_INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1842 efx_writeo(efx
, &temp
, FR_AZ_INT_ADR_KER
);
1844 if (EFX_WORKAROUND_17213(efx
) && !EFX_INT_MODE_USE_MSI(efx
))
1845 /* Use an interrupt level unused by event queues */
1846 efx
->irq_level
= 0x1f;
1848 /* Use a valid MSI-X vector */
1851 /* Enable all the genuinely fatal interrupts. (They are still
1852 * masked by the overall interrupt mask, controlled by
1853 * falcon_interrupts()).
1855 * Note: All other fatal interrupts are enabled
1857 EFX_POPULATE_OWORD_3(temp
,
1858 FRF_AZ_ILL_ADR_INT_KER_EN
, 1,
1859 FRF_AZ_RBUF_OWN_INT_KER_EN
, 1,
1860 FRF_AZ_TBUF_OWN_INT_KER_EN
, 1);
1861 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1862 EFX_SET_OWORD_FIELD(temp
, FRF_CZ_SRAM_PERR_INT_P_KER_EN
, 1);
1863 EFX_INVERT_OWORD(temp
);
1864 efx_writeo(efx
, &temp
, FR_AZ_FATAL_INTR_KER
);
1866 efx_nic_push_rx_indir_table(efx
);
1868 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1869 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1871 efx_reado(efx
, &temp
, FR_AZ_TX_RESERVED
);
1872 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER
, 0xfe);
1873 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER_EN
, 1);
1874 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_ONE_PKT_PER_Q
, 1);
1875 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PUSH_EN
, 1);
1876 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_DIS_NON_IP_EV
, 1);
1877 /* Enable SW_EV to inherit in char driver - assume harmless here */
1878 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_SOFT_EVT_EN
, 1);
1879 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1880 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_THRESHOLD
, 2);
1881 /* Disable hardware watchdog which can misfire */
1882 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_WD_TMR
, 0x3fffff);
1883 /* Squash TX of packets of 16 bytes or less */
1884 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1885 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TX_FLUSH_MIN_LEN_EN
, 1);
1886 efx_writeo(efx
, &temp
, FR_AZ_TX_RESERVED
);
1888 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
1889 EFX_POPULATE_OWORD_4(temp
,
1890 /* Default values */
1891 FRF_BZ_TX_PACE_SB_NOT_AF
, 0x15,
1892 FRF_BZ_TX_PACE_SB_AF
, 0xb,
1893 FRF_BZ_TX_PACE_FB_BASE
, 0,
1894 /* Allow large pace values in the
1896 FRF_BZ_TX_PACE_BIN_TH
,
1897 FFE_BZ_TX_PACE_RESERVED
);
1898 efx_writeo(efx
, &temp
, FR_BZ_TX_PACE
);
1904 #define REGISTER_REVISION_A 1
1905 #define REGISTER_REVISION_B 2
1906 #define REGISTER_REVISION_C 3
1907 #define REGISTER_REVISION_Z 3 /* latest revision */
1909 struct efx_nic_reg
{
1911 u32 min_revision
:2, max_revision
:2;
1914 #define REGISTER(name, min_rev, max_rev) { \
1915 FR_ ## min_rev ## max_rev ## _ ## name, \
1916 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1918 #define REGISTER_AA(name) REGISTER(name, A, A)
1919 #define REGISTER_AB(name) REGISTER(name, A, B)
1920 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1921 #define REGISTER_BB(name) REGISTER(name, B, B)
1922 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1923 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1925 static const struct efx_nic_reg efx_nic_regs
[] = {
1926 REGISTER_AZ(ADR_REGION
),
1927 REGISTER_AZ(INT_EN_KER
),
1928 REGISTER_BZ(INT_EN_CHAR
),
1929 REGISTER_AZ(INT_ADR_KER
),
1930 REGISTER_BZ(INT_ADR_CHAR
),
1931 /* INT_ACK_KER is WO */
1932 /* INT_ISR0 is RC */
1933 REGISTER_AZ(HW_INIT
),
1934 REGISTER_CZ(USR_EV_CFG
),
1935 REGISTER_AB(EE_SPI_HCMD
),
1936 REGISTER_AB(EE_SPI_HADR
),
1937 REGISTER_AB(EE_SPI_HDATA
),
1938 REGISTER_AB(EE_BASE_PAGE
),
1939 REGISTER_AB(EE_VPD_CFG0
),
1940 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1941 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1942 /* PCIE_CORE_INDIRECT is indirect */
1943 REGISTER_AB(NIC_STAT
),
1944 REGISTER_AB(GPIO_CTL
),
1945 REGISTER_AB(GLB_CTL
),
1946 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1947 REGISTER_BZ(DP_CTRL
),
1948 REGISTER_AZ(MEM_STAT
),
1949 REGISTER_AZ(CS_DEBUG
),
1950 REGISTER_AZ(ALTERA_BUILD
),
1951 REGISTER_AZ(CSR_SPARE
),
1952 REGISTER_AB(PCIE_SD_CTL0123
),
1953 REGISTER_AB(PCIE_SD_CTL45
),
1954 REGISTER_AB(PCIE_PCS_CTL_STAT
),
1955 /* DEBUG_DATA_OUT is not used */
1957 REGISTER_AZ(EVQ_CTL
),
1958 REGISTER_AZ(EVQ_CNT1
),
1959 REGISTER_AZ(EVQ_CNT2
),
1960 REGISTER_AZ(BUF_TBL_CFG
),
1961 REGISTER_AZ(SRM_RX_DC_CFG
),
1962 REGISTER_AZ(SRM_TX_DC_CFG
),
1963 REGISTER_AZ(SRM_CFG
),
1964 /* BUF_TBL_UPD is WO */
1965 REGISTER_AZ(SRM_UPD_EVQ
),
1966 REGISTER_AZ(SRAM_PARITY
),
1967 REGISTER_AZ(RX_CFG
),
1968 REGISTER_BZ(RX_FILTER_CTL
),
1969 /* RX_FLUSH_DESCQ is WO */
1970 REGISTER_AZ(RX_DC_CFG
),
1971 REGISTER_AZ(RX_DC_PF_WM
),
1972 REGISTER_BZ(RX_RSS_TKEY
),
1973 /* RX_NODESC_DROP is RC */
1974 REGISTER_AA(RX_SELF_RST
),
1975 /* RX_DEBUG, RX_PUSH_DROP are not used */
1976 REGISTER_CZ(RX_RSS_IPV6_REG1
),
1977 REGISTER_CZ(RX_RSS_IPV6_REG2
),
1978 REGISTER_CZ(RX_RSS_IPV6_REG3
),
1979 /* TX_FLUSH_DESCQ is WO */
1980 REGISTER_AZ(TX_DC_CFG
),
1981 REGISTER_AA(TX_CHKSM_CFG
),
1982 REGISTER_AZ(TX_CFG
),
1983 /* TX_PUSH_DROP is not used */
1984 REGISTER_AZ(TX_RESERVED
),
1985 REGISTER_BZ(TX_PACE
),
1986 /* TX_PACE_DROP_QID is RC */
1987 REGISTER_BB(TX_VLAN
),
1988 REGISTER_BZ(TX_IPFIL_PORTEN
),
1989 REGISTER_AB(MD_TXD
),
1990 REGISTER_AB(MD_RXD
),
1992 REGISTER_AB(MD_PHY_ADR
),
1995 REGISTER_AB(MAC_STAT_DMA
),
1996 REGISTER_AB(MAC_CTRL
),
1997 REGISTER_BB(GEN_MODE
),
1998 REGISTER_AB(MAC_MC_HASH_REG0
),
1999 REGISTER_AB(MAC_MC_HASH_REG1
),
2000 REGISTER_AB(GM_CFG1
),
2001 REGISTER_AB(GM_CFG2
),
2002 /* GM_IPG and GM_HD are not used */
2003 REGISTER_AB(GM_MAX_FLEN
),
2004 /* GM_TEST is not used */
2005 REGISTER_AB(GM_ADR1
),
2006 REGISTER_AB(GM_ADR2
),
2007 REGISTER_AB(GMF_CFG0
),
2008 REGISTER_AB(GMF_CFG1
),
2009 REGISTER_AB(GMF_CFG2
),
2010 REGISTER_AB(GMF_CFG3
),
2011 REGISTER_AB(GMF_CFG4
),
2012 REGISTER_AB(GMF_CFG5
),
2013 REGISTER_BB(TX_SRC_MAC_CTL
),
2014 REGISTER_AB(XM_ADR_LO
),
2015 REGISTER_AB(XM_ADR_HI
),
2016 REGISTER_AB(XM_GLB_CFG
),
2017 REGISTER_AB(XM_TX_CFG
),
2018 REGISTER_AB(XM_RX_CFG
),
2019 REGISTER_AB(XM_MGT_INT_MASK
),
2021 REGISTER_AB(XM_PAUSE_TIME
),
2022 REGISTER_AB(XM_TX_PARAM
),
2023 REGISTER_AB(XM_RX_PARAM
),
2024 /* XM_MGT_INT_MSK (note no 'A') is RC */
2025 REGISTER_AB(XX_PWR_RST
),
2026 REGISTER_AB(XX_SD_CTL
),
2027 REGISTER_AB(XX_TXDRV_CTL
),
2028 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
2029 /* XX_CORE_STAT is partly RC */
2032 struct efx_nic_reg_table
{
2034 u32 min_revision
:2, max_revision
:2;
2035 u32 step
:6, rows
:21;
2038 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
2040 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
2043 #define REGISTER_TABLE(name, min_rev, max_rev) \
2044 REGISTER_TABLE_DIMENSIONS( \
2045 name, FR_ ## min_rev ## max_rev ## _ ## name, \
2047 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
2048 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
2049 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
2050 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
2051 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
2052 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
2053 #define REGISTER_TABLE_BB_CZ(name) \
2054 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
2055 FR_BZ_ ## name ## _STEP, \
2056 FR_BB_ ## name ## _ROWS), \
2057 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
2058 FR_BZ_ ## name ## _STEP, \
2059 FR_CZ_ ## name ## _ROWS)
2060 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
2062 static const struct efx_nic_reg_table efx_nic_reg_tables
[] = {
2063 /* DRIVER is not used */
2064 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
2065 REGISTER_TABLE_BB(TX_IPFIL_TBL
),
2066 REGISTER_TABLE_BB(TX_SRC_MAC_TBL
),
2067 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER
),
2068 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL
),
2069 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER
),
2070 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL
),
2071 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER
),
2072 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL
),
2073 /* We can't reasonably read all of the buffer table (up to 8MB!).
2074 * However this driver will only use a few entries. Reading
2075 * 1K entries allows for some expansion of queue count and
2076 * size before we need to change the version. */
2077 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER
, FR_AA_BUF_FULL_TBL_KER
,
2079 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL
, FR_BZ_BUF_FULL_TBL
,
2081 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0
),
2082 REGISTER_TABLE_BB_CZ(TIMER_TBL
),
2083 REGISTER_TABLE_BB_CZ(TX_PACE_TBL
),
2084 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL
),
2085 /* TX_FILTER_TBL0 is huge and not used by this driver */
2086 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0
),
2087 REGISTER_TABLE_CZ(MC_TREG_SMEM
),
2088 /* MSIX_PBA_TABLE is not mapped */
2089 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
2090 REGISTER_TABLE_BZ(RX_FILTER_TBL0
),
2093 size_t efx_nic_get_regs_len(struct efx_nic
*efx
)
2095 const struct efx_nic_reg
*reg
;
2096 const struct efx_nic_reg_table
*table
;
2099 for (reg
= efx_nic_regs
;
2100 reg
< efx_nic_regs
+ ARRAY_SIZE(efx_nic_regs
);
2102 if (efx
->type
->revision
>= reg
->min_revision
&&
2103 efx
->type
->revision
<= reg
->max_revision
)
2104 len
+= sizeof(efx_oword_t
);
2106 for (table
= efx_nic_reg_tables
;
2107 table
< efx_nic_reg_tables
+ ARRAY_SIZE(efx_nic_reg_tables
);
2109 if (efx
->type
->revision
>= table
->min_revision
&&
2110 efx
->type
->revision
<= table
->max_revision
)
2111 len
+= table
->rows
* min_t(size_t, table
->step
, 16);
2116 void efx_nic_get_regs(struct efx_nic
*efx
, void *buf
)
2118 const struct efx_nic_reg
*reg
;
2119 const struct efx_nic_reg_table
*table
;
2121 for (reg
= efx_nic_regs
;
2122 reg
< efx_nic_regs
+ ARRAY_SIZE(efx_nic_regs
);
2124 if (efx
->type
->revision
>= reg
->min_revision
&&
2125 efx
->type
->revision
<= reg
->max_revision
) {
2126 efx_reado(efx
, (efx_oword_t
*)buf
, reg
->offset
);
2127 buf
+= sizeof(efx_oword_t
);
2131 for (table
= efx_nic_reg_tables
;
2132 table
< efx_nic_reg_tables
+ ARRAY_SIZE(efx_nic_reg_tables
);
2136 if (!(efx
->type
->revision
>= table
->min_revision
&&
2137 efx
->type
->revision
<= table
->max_revision
))
2140 size
= min_t(size_t, table
->step
, 16);
2142 for (i
= 0; i
< table
->rows
; i
++) {
2143 switch (table
->step
) {
2144 case 4: /* 32-bit SRAM */
2145 efx_readd(efx
, buf
, table
->offset
+ 4 * i
);
2147 case 8: /* 64-bit SRAM */
2149 efx
->membase
+ table
->offset
,
2152 case 16: /* 128-bit-readable register */
2153 efx_reado_table(efx
, buf
, table
->offset
, i
);
2155 case 32: /* 128-bit register, interleaved */
2156 efx_reado_table(efx
, buf
, table
->offset
, 2 * i
);