1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/cpu_rmap.h>
18 #include "net_driver.h"
24 #include "workarounds.h"
26 /**************************************************************************
30 **************************************************************************
33 /* This is set to 16 for a good reason. In summary, if larger than
34 * 16, the descriptor cache holds more than a default socket
35 * buffer's worth of packets (for UDP we can only have at most one
36 * socket buffer's worth outstanding). This combined with the fact
37 * that we only get 1 TX event per descriptor cache means the NIC
40 #define TX_DC_ENTRIES 16
41 #define TX_DC_ENTRIES_ORDER 1
43 #define RX_DC_ENTRIES 64
44 #define RX_DC_ENTRIES_ORDER 3
46 /* If EFX_MAX_INT_ERRORS internal errors occur within
47 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
50 #define EFX_INT_ERROR_EXPIRE 3600
51 #define EFX_MAX_INT_ERRORS 5
53 /* Depth of RX flush request fifo */
54 #define EFX_RX_FLUSH_COUNT 4
56 /* Driver generated events */
57 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
58 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
59 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
60 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
62 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
63 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
65 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
66 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
67 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
69 efx_rx_queue_index(_rx_queue))
70 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
71 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
72 efx_rx_queue_index(_rx_queue))
73 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
74 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
77 static void efx_magic_event(struct efx_channel
*channel
, u32 magic
);
79 /**************************************************************************
81 * Solarstorm hardware access
83 **************************************************************************/
85 static inline void efx_write_buf_tbl(struct efx_nic
*efx
, efx_qword_t
*value
,
88 efx_sram_writeq(efx
, efx
->membase
+ efx
->type
->buf_tbl_base
,
92 /* Read the current event from the event queue */
93 static inline efx_qword_t
*efx_event(struct efx_channel
*channel
,
96 return ((efx_qword_t
*) (channel
->eventq
.addr
)) +
97 (index
& channel
->eventq_mask
);
100 /* See if an event is present
102 * We check both the high and low dword of the event for all ones. We
103 * wrote all ones when we cleared the event, and no valid event can
104 * have all ones in either its high or low dwords. This approach is
105 * robust against reordering.
107 * Note that using a single 64-bit comparison is incorrect; even
108 * though the CPU read will be atomic, the DMA write may not be.
110 static inline int efx_event_present(efx_qword_t
*event
)
112 return !(EFX_DWORD_IS_ALL_ONES(event
->dword
[0]) |
113 EFX_DWORD_IS_ALL_ONES(event
->dword
[1]));
116 static bool efx_masked_compare_oword(const efx_oword_t
*a
, const efx_oword_t
*b
,
117 const efx_oword_t
*mask
)
119 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
120 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
123 int efx_nic_test_registers(struct efx_nic
*efx
,
124 const struct efx_nic_register_test
*regs
,
127 unsigned address
= 0, i
, j
;
128 efx_oword_t mask
, imask
, original
, reg
, buf
;
130 for (i
= 0; i
< n_regs
; ++i
) {
131 address
= regs
[i
].address
;
132 mask
= imask
= regs
[i
].mask
;
133 EFX_INVERT_OWORD(imask
);
135 efx_reado(efx
, &original
, address
);
137 /* bit sweep on and off */
138 for (j
= 0; j
< 128; j
++) {
139 if (!EFX_EXTRACT_OWORD32(mask
, j
, j
))
142 /* Test this testable bit can be set in isolation */
143 EFX_AND_OWORD(reg
, original
, mask
);
144 EFX_SET_OWORD32(reg
, j
, j
, 1);
146 efx_writeo(efx
, ®
, address
);
147 efx_reado(efx
, &buf
, address
);
149 if (efx_masked_compare_oword(®
, &buf
, &mask
))
152 /* Test this testable bit can be cleared in isolation */
153 EFX_OR_OWORD(reg
, original
, mask
);
154 EFX_SET_OWORD32(reg
, j
, j
, 0);
156 efx_writeo(efx
, ®
, address
);
157 efx_reado(efx
, &buf
, address
);
159 if (efx_masked_compare_oword(®
, &buf
, &mask
))
163 efx_writeo(efx
, &original
, address
);
169 netif_err(efx
, hw
, efx
->net_dev
,
170 "wrote "EFX_OWORD_FMT
" read "EFX_OWORD_FMT
171 " at address 0x%x mask "EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
),
172 EFX_OWORD_VAL(buf
), address
, EFX_OWORD_VAL(mask
));
176 /**************************************************************************
178 * Special buffer handling
179 * Special buffers are used for event queues and the TX and RX
182 *************************************************************************/
185 * Initialise a special buffer
187 * This will define a buffer (previously allocated via
188 * efx_alloc_special_buffer()) in the buffer table, allowing
189 * it to be used for event queues, descriptor rings etc.
192 efx_init_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
194 efx_qword_t buf_desc
;
199 EFX_BUG_ON_PARANOID(!buffer
->addr
);
201 /* Write buffer descriptors to NIC */
202 for (i
= 0; i
< buffer
->entries
; i
++) {
203 index
= buffer
->index
+ i
;
204 dma_addr
= buffer
->dma_addr
+ (i
* EFX_BUF_SIZE
);
205 netif_dbg(efx
, probe
, efx
->net_dev
,
206 "mapping special buffer %d at %llx\n",
207 index
, (unsigned long long)dma_addr
);
208 EFX_POPULATE_QWORD_3(buf_desc
,
209 FRF_AZ_BUF_ADR_REGION
, 0,
210 FRF_AZ_BUF_ADR_FBUF
, dma_addr
>> 12,
211 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
212 efx_write_buf_tbl(efx
, &buf_desc
, index
);
216 /* Unmaps a buffer and clears the buffer table entries */
218 efx_fini_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
220 efx_oword_t buf_tbl_upd
;
221 unsigned int start
= buffer
->index
;
222 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
224 if (!buffer
->entries
)
227 netif_dbg(efx
, hw
, efx
->net_dev
, "unmapping special buffers %d-%d\n",
228 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
230 EFX_POPULATE_OWORD_4(buf_tbl_upd
,
231 FRF_AZ_BUF_UPD_CMD
, 0,
232 FRF_AZ_BUF_CLR_CMD
, 1,
233 FRF_AZ_BUF_CLR_END_ID
, end
,
234 FRF_AZ_BUF_CLR_START_ID
, start
);
235 efx_writeo(efx
, &buf_tbl_upd
, FR_AZ_BUF_TBL_UPD
);
239 * Allocate a new special buffer
241 * This allocates memory for a new buffer, clears it and allocates a
242 * new buffer ID range. It does not write into the buffer table.
244 * This call will allocate 4KB buffers, since 8KB buffers can't be
245 * used for event queues and descriptor rings.
247 static int efx_alloc_special_buffer(struct efx_nic
*efx
,
248 struct efx_special_buffer
*buffer
,
251 len
= ALIGN(len
, EFX_BUF_SIZE
);
253 buffer
->addr
= dma_alloc_coherent(&efx
->pci_dev
->dev
, len
,
254 &buffer
->dma_addr
, GFP_KERNEL
);
258 buffer
->entries
= len
/ EFX_BUF_SIZE
;
259 BUG_ON(buffer
->dma_addr
& (EFX_BUF_SIZE
- 1));
261 /* Select new buffer ID */
262 buffer
->index
= efx
->next_buffer_table
;
263 efx
->next_buffer_table
+= buffer
->entries
;
264 #ifdef CONFIG_SFC_SRIOV
265 BUG_ON(efx_sriov_enabled(efx
) &&
266 efx
->vf_buftbl_base
< efx
->next_buffer_table
);
269 netif_dbg(efx
, probe
, efx
->net_dev
,
270 "allocating special buffers %d-%d at %llx+%x "
271 "(virt %p phys %llx)\n", buffer
->index
,
272 buffer
->index
+ buffer
->entries
- 1,
273 (u64
)buffer
->dma_addr
, len
,
274 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
280 efx_free_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
285 netif_dbg(efx
, hw
, efx
->net_dev
,
286 "deallocating special buffers %d-%d at %llx+%x "
287 "(virt %p phys %llx)\n", buffer
->index
,
288 buffer
->index
+ buffer
->entries
- 1,
289 (u64
)buffer
->dma_addr
, buffer
->len
,
290 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
292 dma_free_coherent(&efx
->pci_dev
->dev
, buffer
->len
, buffer
->addr
,
298 /**************************************************************************
300 * Generic buffer handling
301 * These buffers are used for interrupt status, MAC stats, etc.
303 **************************************************************************/
305 int efx_nic_alloc_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
,
308 buffer
->addr
= dma_alloc_coherent(&efx
->pci_dev
->dev
, len
,
310 GFP_ATOMIC
| __GFP_ZERO
);
317 void efx_nic_free_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
)
320 dma_free_coherent(&efx
->pci_dev
->dev
, buffer
->len
,
321 buffer
->addr
, buffer
->dma_addr
);
326 /**************************************************************************
330 **************************************************************************/
332 /* Returns a pointer to the specified transmit descriptor in the TX
333 * descriptor queue belonging to the specified channel.
335 static inline efx_qword_t
*
336 efx_tx_desc(struct efx_tx_queue
*tx_queue
, unsigned int index
)
338 return ((efx_qword_t
*) (tx_queue
->txd
.addr
)) + index
;
341 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
342 static inline void efx_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
347 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
348 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_TX_DESC_WPTR_DWORD
, write_ptr
);
349 efx_writed_page(tx_queue
->efx
, ®
,
350 FR_AZ_TX_DESC_UPD_DWORD_P0
, tx_queue
->queue
);
353 /* Write pointer and first descriptor for TX descriptor ring */
354 static inline void efx_push_tx_desc(struct efx_tx_queue
*tx_queue
,
355 const efx_qword_t
*txd
)
360 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN
!= 0);
361 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER
!= FR_BZ_TX_DESC_UPD_P0
);
363 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
364 EFX_POPULATE_OWORD_2(reg
, FRF_AZ_TX_DESC_PUSH_CMD
, true,
365 FRF_AZ_TX_DESC_WPTR
, write_ptr
);
367 efx_writeo_page(tx_queue
->efx
, ®
,
368 FR_BZ_TX_DESC_UPD_P0
, tx_queue
->queue
);
372 efx_may_push_tx_desc(struct efx_tx_queue
*tx_queue
, unsigned int write_count
)
374 unsigned empty_read_count
= ACCESS_ONCE(tx_queue
->empty_read_count
);
376 if (empty_read_count
== 0)
379 tx_queue
->empty_read_count
= 0;
380 return ((empty_read_count
^ write_count
) & ~EFX_EMPTY_COUNT_VALID
) == 0
381 && tx_queue
->write_count
- write_count
== 1;
384 /* For each entry inserted into the software descriptor ring, create a
385 * descriptor in the hardware TX descriptor ring (in host memory), and
388 void efx_nic_push_buffers(struct efx_tx_queue
*tx_queue
)
391 struct efx_tx_buffer
*buffer
;
394 unsigned old_write_count
= tx_queue
->write_count
;
396 BUG_ON(tx_queue
->write_count
== tx_queue
->insert_count
);
399 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
400 buffer
= &tx_queue
->buffer
[write_ptr
];
401 txd
= efx_tx_desc(tx_queue
, write_ptr
);
402 ++tx_queue
->write_count
;
404 /* Create TX descriptor ring entry */
405 BUILD_BUG_ON(EFX_TX_BUF_CONT
!= 1);
406 EFX_POPULATE_QWORD_4(*txd
,
408 buffer
->flags
& EFX_TX_BUF_CONT
,
409 FSF_AZ_TX_KER_BYTE_COUNT
, buffer
->len
,
410 FSF_AZ_TX_KER_BUF_REGION
, 0,
411 FSF_AZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
412 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
414 wmb(); /* Ensure descriptors are written before they are fetched */
416 if (efx_may_push_tx_desc(tx_queue
, old_write_count
)) {
417 txd
= efx_tx_desc(tx_queue
,
418 old_write_count
& tx_queue
->ptr_mask
);
419 efx_push_tx_desc(tx_queue
, txd
);
422 efx_notify_tx_desc(tx_queue
);
426 /* Allocate hardware resources for a TX queue */
427 int efx_nic_probe_tx(struct efx_tx_queue
*tx_queue
)
429 struct efx_nic
*efx
= tx_queue
->efx
;
432 entries
= tx_queue
->ptr_mask
+ 1;
433 return efx_alloc_special_buffer(efx
, &tx_queue
->txd
,
434 entries
* sizeof(efx_qword_t
));
437 void efx_nic_init_tx(struct efx_tx_queue
*tx_queue
)
439 struct efx_nic
*efx
= tx_queue
->efx
;
442 /* Pin TX descriptor ring */
443 efx_init_special_buffer(efx
, &tx_queue
->txd
);
445 /* Push TX descriptor ring to card */
446 EFX_POPULATE_OWORD_10(reg
,
447 FRF_AZ_TX_DESCQ_EN
, 1,
448 FRF_AZ_TX_ISCSI_DDIG_EN
, 0,
449 FRF_AZ_TX_ISCSI_HDIG_EN
, 0,
450 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
451 FRF_AZ_TX_DESCQ_EVQ_ID
,
452 tx_queue
->channel
->channel
,
453 FRF_AZ_TX_DESCQ_OWNER_ID
, 0,
454 FRF_AZ_TX_DESCQ_LABEL
, tx_queue
->queue
,
455 FRF_AZ_TX_DESCQ_SIZE
,
456 __ffs(tx_queue
->txd
.entries
),
457 FRF_AZ_TX_DESCQ_TYPE
, 0,
458 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
460 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
461 int csum
= tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
;
462 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_IP_CHKSM_DIS
, !csum
);
463 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_TCP_CHKSM_DIS
,
467 efx_writeo_table(efx
, ®
, efx
->type
->txd_ptr_tbl_base
,
470 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
) {
471 /* Only 128 bits in this register */
472 BUILD_BUG_ON(EFX_MAX_TX_QUEUES
> 128);
474 efx_reado(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
475 if (tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
)
476 __clear_bit_le(tx_queue
->queue
, ®
);
478 __set_bit_le(tx_queue
->queue
, ®
);
479 efx_writeo(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
482 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
483 EFX_POPULATE_OWORD_1(reg
,
485 (tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
) ?
487 FFE_BZ_TX_PACE_RESERVED
);
488 efx_writeo_table(efx
, ®
, FR_BZ_TX_PACE_TBL
,
493 static void efx_flush_tx_queue(struct efx_tx_queue
*tx_queue
)
495 struct efx_nic
*efx
= tx_queue
->efx
;
496 efx_oword_t tx_flush_descq
;
498 WARN_ON(atomic_read(&tx_queue
->flush_outstanding
));
499 atomic_set(&tx_queue
->flush_outstanding
, 1);
501 EFX_POPULATE_OWORD_2(tx_flush_descq
,
502 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
503 FRF_AZ_TX_FLUSH_DESCQ
, tx_queue
->queue
);
504 efx_writeo(efx
, &tx_flush_descq
, FR_AZ_TX_FLUSH_DESCQ
);
507 void efx_nic_fini_tx(struct efx_tx_queue
*tx_queue
)
509 struct efx_nic
*efx
= tx_queue
->efx
;
510 efx_oword_t tx_desc_ptr
;
512 /* Remove TX descriptor ring from card */
513 EFX_ZERO_OWORD(tx_desc_ptr
);
514 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
517 /* Unpin TX descriptor ring */
518 efx_fini_special_buffer(efx
, &tx_queue
->txd
);
521 /* Free buffers backing TX queue */
522 void efx_nic_remove_tx(struct efx_tx_queue
*tx_queue
)
524 efx_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
527 /**************************************************************************
531 **************************************************************************/
533 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
534 static inline efx_qword_t
*
535 efx_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned int index
)
537 return ((efx_qword_t
*) (rx_queue
->rxd
.addr
)) + index
;
540 /* This creates an entry in the RX descriptor queue */
542 efx_build_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned index
)
544 struct efx_rx_buffer
*rx_buf
;
547 rxd
= efx_rx_desc(rx_queue
, index
);
548 rx_buf
= efx_rx_buffer(rx_queue
, index
);
549 EFX_POPULATE_QWORD_3(*rxd
,
550 FSF_AZ_RX_KER_BUF_SIZE
,
552 rx_queue
->efx
->type
->rx_buffer_padding
,
553 FSF_AZ_RX_KER_BUF_REGION
, 0,
554 FSF_AZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
557 /* This writes to the RX_DESC_WPTR register for the specified receive
560 void efx_nic_notify_rx_desc(struct efx_rx_queue
*rx_queue
)
562 struct efx_nic
*efx
= rx_queue
->efx
;
566 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
569 rx_queue
->notified_count
& rx_queue
->ptr_mask
);
570 ++rx_queue
->notified_count
;
574 write_ptr
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
575 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_RX_DESC_WPTR_DWORD
, write_ptr
);
576 efx_writed_page(efx
, ®
, FR_AZ_RX_DESC_UPD_DWORD_P0
,
577 efx_rx_queue_index(rx_queue
));
580 int efx_nic_probe_rx(struct efx_rx_queue
*rx_queue
)
582 struct efx_nic
*efx
= rx_queue
->efx
;
585 entries
= rx_queue
->ptr_mask
+ 1;
586 return efx_alloc_special_buffer(efx
, &rx_queue
->rxd
,
587 entries
* sizeof(efx_qword_t
));
590 void efx_nic_init_rx(struct efx_rx_queue
*rx_queue
)
592 efx_oword_t rx_desc_ptr
;
593 struct efx_nic
*efx
= rx_queue
->efx
;
594 bool is_b0
= efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
;
595 bool iscsi_digest_en
= is_b0
;
598 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
599 * DMA to continue after a PCIe page boundary (and scattering
600 * is not possible). In Falcon B0 and Siena, it enables
603 jumbo_en
= !is_b0
|| efx
->rx_scatter
;
605 netif_dbg(efx
, hw
, efx
->net_dev
,
606 "RX queue %d ring in special buffers %d-%d\n",
607 efx_rx_queue_index(rx_queue
), rx_queue
->rxd
.index
,
608 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
610 rx_queue
->scatter_n
= 0;
612 /* Pin RX descriptor ring */
613 efx_init_special_buffer(efx
, &rx_queue
->rxd
);
615 /* Push RX descriptor ring to card */
616 EFX_POPULATE_OWORD_10(rx_desc_ptr
,
617 FRF_AZ_RX_ISCSI_DDIG_EN
, iscsi_digest_en
,
618 FRF_AZ_RX_ISCSI_HDIG_EN
, iscsi_digest_en
,
619 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
620 FRF_AZ_RX_DESCQ_EVQ_ID
,
621 efx_rx_queue_channel(rx_queue
)->channel
,
622 FRF_AZ_RX_DESCQ_OWNER_ID
, 0,
623 FRF_AZ_RX_DESCQ_LABEL
,
624 efx_rx_queue_index(rx_queue
),
625 FRF_AZ_RX_DESCQ_SIZE
,
626 __ffs(rx_queue
->rxd
.entries
),
627 FRF_AZ_RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
628 FRF_AZ_RX_DESCQ_JUMBO
, jumbo_en
,
629 FRF_AZ_RX_DESCQ_EN
, 1);
630 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
631 efx_rx_queue_index(rx_queue
));
634 static void efx_flush_rx_queue(struct efx_rx_queue
*rx_queue
)
636 struct efx_nic
*efx
= rx_queue
->efx
;
637 efx_oword_t rx_flush_descq
;
639 EFX_POPULATE_OWORD_2(rx_flush_descq
,
640 FRF_AZ_RX_FLUSH_DESCQ_CMD
, 1,
641 FRF_AZ_RX_FLUSH_DESCQ
,
642 efx_rx_queue_index(rx_queue
));
643 efx_writeo(efx
, &rx_flush_descq
, FR_AZ_RX_FLUSH_DESCQ
);
646 void efx_nic_fini_rx(struct efx_rx_queue
*rx_queue
)
648 efx_oword_t rx_desc_ptr
;
649 struct efx_nic
*efx
= rx_queue
->efx
;
651 /* Remove RX descriptor ring from card */
652 EFX_ZERO_OWORD(rx_desc_ptr
);
653 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
654 efx_rx_queue_index(rx_queue
));
656 /* Unpin RX descriptor ring */
657 efx_fini_special_buffer(efx
, &rx_queue
->rxd
);
660 /* Free buffers backing RX queue */
661 void efx_nic_remove_rx(struct efx_rx_queue
*rx_queue
)
663 efx_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
666 /**************************************************************************
670 **************************************************************************/
672 /* efx_nic_flush_queues() must be woken up when all flushes are completed,
673 * or more RX flushes can be kicked off.
675 static bool efx_flush_wake(struct efx_nic
*efx
)
677 /* Ensure that all updates are visible to efx_nic_flush_queues() */
680 return (atomic_read(&efx
->drain_pending
) == 0 ||
681 (atomic_read(&efx
->rxq_flush_outstanding
) < EFX_RX_FLUSH_COUNT
682 && atomic_read(&efx
->rxq_flush_pending
) > 0));
685 static bool efx_check_tx_flush_complete(struct efx_nic
*efx
)
688 efx_oword_t txd_ptr_tbl
;
689 struct efx_channel
*channel
;
690 struct efx_tx_queue
*tx_queue
;
692 efx_for_each_channel(channel
, efx
) {
693 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
694 efx_reado_table(efx
, &txd_ptr_tbl
,
695 FR_BZ_TX_DESC_PTR_TBL
, tx_queue
->queue
);
696 if (EFX_OWORD_FIELD(txd_ptr_tbl
,
697 FRF_AZ_TX_DESCQ_FLUSH
) ||
698 EFX_OWORD_FIELD(txd_ptr_tbl
,
699 FRF_AZ_TX_DESCQ_EN
)) {
700 netif_dbg(efx
, hw
, efx
->net_dev
,
701 "flush did not complete on TXQ %d\n",
704 } else if (atomic_cmpxchg(&tx_queue
->flush_outstanding
,
706 /* The flush is complete, but we didn't
707 * receive a flush completion event
709 netif_dbg(efx
, hw
, efx
->net_dev
,
710 "flush complete on TXQ %d, so drain "
711 "the queue\n", tx_queue
->queue
);
712 /* Don't need to increment drain_pending as it
713 * has already been incremented for the queues
714 * which did not drain
716 efx_magic_event(channel
,
717 EFX_CHANNEL_MAGIC_TX_DRAIN(
726 /* Flush all the transmit queues, and continue flushing receive queues until
727 * they're all flushed. Wait for the DRAIN events to be recieved so that there
728 * are no more RX and TX events left on any channel. */
729 int efx_nic_flush_queues(struct efx_nic
*efx
)
731 unsigned timeout
= msecs_to_jiffies(5000); /* 5s for all flushes and drains */
732 struct efx_channel
*channel
;
733 struct efx_rx_queue
*rx_queue
;
734 struct efx_tx_queue
*tx_queue
;
737 efx
->type
->prepare_flush(efx
);
739 efx_for_each_channel(channel
, efx
) {
740 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
741 atomic_inc(&efx
->drain_pending
);
742 efx_flush_tx_queue(tx_queue
);
744 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
745 atomic_inc(&efx
->drain_pending
);
746 rx_queue
->flush_pending
= true;
747 atomic_inc(&efx
->rxq_flush_pending
);
751 while (timeout
&& atomic_read(&efx
->drain_pending
) > 0) {
752 /* If SRIOV is enabled, then offload receive queue flushing to
753 * the firmware (though we will still have to poll for
754 * completion). If that fails, fall back to the old scheme.
756 if (efx_sriov_enabled(efx
)) {
757 rc
= efx_mcdi_flush_rxqs(efx
);
762 /* The hardware supports four concurrent rx flushes, each of
763 * which may need to be retried if there is an outstanding
766 efx_for_each_channel(channel
, efx
) {
767 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
768 if (atomic_read(&efx
->rxq_flush_outstanding
) >=
772 if (rx_queue
->flush_pending
) {
773 rx_queue
->flush_pending
= false;
774 atomic_dec(&efx
->rxq_flush_pending
);
775 atomic_inc(&efx
->rxq_flush_outstanding
);
776 efx_flush_rx_queue(rx_queue
);
782 timeout
= wait_event_timeout(efx
->flush_wq
, efx_flush_wake(efx
),
786 if (atomic_read(&efx
->drain_pending
) &&
787 !efx_check_tx_flush_complete(efx
)) {
788 netif_err(efx
, hw
, efx
->net_dev
, "failed to flush %d queues "
789 "(rx %d+%d)\n", atomic_read(&efx
->drain_pending
),
790 atomic_read(&efx
->rxq_flush_outstanding
),
791 atomic_read(&efx
->rxq_flush_pending
));
794 atomic_set(&efx
->drain_pending
, 0);
795 atomic_set(&efx
->rxq_flush_pending
, 0);
796 atomic_set(&efx
->rxq_flush_outstanding
, 0);
799 efx
->type
->finish_flush(efx
);
804 /**************************************************************************
806 * Event queue processing
807 * Event queues are processed by per-channel tasklets.
809 **************************************************************************/
811 /* Update a channel's event queue's read pointer (RPTR) register
813 * This writes the EVQ_RPTR_REG register for the specified channel's
816 void efx_nic_eventq_read_ack(struct efx_channel
*channel
)
819 struct efx_nic
*efx
= channel
->efx
;
821 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_EVQ_RPTR
,
822 channel
->eventq_read_ptr
& channel
->eventq_mask
);
824 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
825 * of 4 bytes, but it is really 16 bytes just like later revisions.
827 efx_writed(efx
, ®
,
828 efx
->type
->evq_rptr_tbl_base
+
829 FR_BZ_EVQ_RPTR_STEP
* channel
->channel
);
832 /* Use HW to insert a SW defined event */
833 void efx_generate_event(struct efx_nic
*efx
, unsigned int evq
,
836 efx_oword_t drv_ev_reg
;
838 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN
!= 0 ||
839 FRF_AZ_DRV_EV_DATA_WIDTH
!= 64);
840 drv_ev_reg
.u32
[0] = event
->u32
[0];
841 drv_ev_reg
.u32
[1] = event
->u32
[1];
842 drv_ev_reg
.u32
[2] = 0;
843 drv_ev_reg
.u32
[3] = 0;
844 EFX_SET_OWORD_FIELD(drv_ev_reg
, FRF_AZ_DRV_EV_QID
, evq
);
845 efx_writeo(efx
, &drv_ev_reg
, FR_AZ_DRV_EV
);
848 static void efx_magic_event(struct efx_channel
*channel
, u32 magic
)
852 EFX_POPULATE_QWORD_2(event
, FSF_AZ_EV_CODE
,
853 FSE_AZ_EV_CODE_DRV_GEN_EV
,
854 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
855 efx_generate_event(channel
->efx
, channel
->channel
, &event
);
858 /* Handle a transmit completion event
860 * The NIC batches TX completion events; the message we receive is of
861 * the form "complete all TX events up to this index".
864 efx_handle_tx_event(struct efx_channel
*channel
, efx_qword_t
*event
)
866 unsigned int tx_ev_desc_ptr
;
867 unsigned int tx_ev_q_label
;
868 struct efx_tx_queue
*tx_queue
;
869 struct efx_nic
*efx
= channel
->efx
;
872 if (unlikely(ACCESS_ONCE(efx
->reset_pending
)))
875 if (likely(EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_COMP
))) {
876 /* Transmit completion */
877 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_DESC_PTR
);
878 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
879 tx_queue
= efx_channel_get_tx_queue(
880 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
881 tx_packets
= ((tx_ev_desc_ptr
- tx_queue
->read_count
) &
883 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
);
884 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_WQ_FF_FULL
)) {
885 /* Rewrite the FIFO write pointer */
886 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
887 tx_queue
= efx_channel_get_tx_queue(
888 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
890 netif_tx_lock(efx
->net_dev
);
891 efx_notify_tx_desc(tx_queue
);
892 netif_tx_unlock(efx
->net_dev
);
893 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_PKT_ERR
) &&
894 EFX_WORKAROUND_10727(efx
)) {
895 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
897 netif_err(efx
, tx_err
, efx
->net_dev
,
898 "channel %d unexpected TX event "
899 EFX_QWORD_FMT
"\n", channel
->channel
,
900 EFX_QWORD_VAL(*event
));
906 /* Detect errors included in the rx_evt_pkt_ok bit. */
907 static u16
efx_handle_rx_not_ok(struct efx_rx_queue
*rx_queue
,
908 const efx_qword_t
*event
)
910 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
911 struct efx_nic
*efx
= rx_queue
->efx
;
912 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
913 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
914 bool rx_ev_frm_trunc
, rx_ev_drib_nib
, rx_ev_tobe_disc
;
915 bool rx_ev_other_err
, rx_ev_pause_frm
;
916 bool rx_ev_hdr_type
, rx_ev_mcast_pkt
;
917 unsigned rx_ev_pkt_type
;
919 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
920 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
921 rx_ev_tobe_disc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_TOBE_DISC
);
922 rx_ev_pkt_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_TYPE
);
923 rx_ev_buf_owner_id_err
= EFX_QWORD_FIELD(*event
,
924 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR
);
925 rx_ev_ip_hdr_chksum_err
= EFX_QWORD_FIELD(*event
,
926 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR
);
927 rx_ev_tcp_udp_chksum_err
= EFX_QWORD_FIELD(*event
,
928 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR
);
929 rx_ev_eth_crc_err
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_ETH_CRC_ERR
);
930 rx_ev_frm_trunc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_FRM_TRUNC
);
931 rx_ev_drib_nib
= ((efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) ?
932 0 : EFX_QWORD_FIELD(*event
, FSF_AA_RX_EV_DRIB_NIB
));
933 rx_ev_pause_frm
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PAUSE_FRM_ERR
);
935 /* Every error apart from tobe_disc and pause_frm */
936 rx_ev_other_err
= (rx_ev_drib_nib
| rx_ev_tcp_udp_chksum_err
|
937 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
938 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
940 /* Count errors that are not in MAC stats. Ignore expected
941 * checksum errors during self-test. */
943 ++channel
->n_rx_frm_trunc
;
944 else if (rx_ev_tobe_disc
)
945 ++channel
->n_rx_tobe_disc
;
946 else if (!efx
->loopback_selftest
) {
947 if (rx_ev_ip_hdr_chksum_err
)
948 ++channel
->n_rx_ip_hdr_chksum_err
;
949 else if (rx_ev_tcp_udp_chksum_err
)
950 ++channel
->n_rx_tcp_udp_chksum_err
;
953 /* TOBE_DISC is expected on unicast mismatches; don't print out an
954 * error message. FRM_TRUNC indicates RXDP dropped the packet due
955 * to a FIFO overflow.
958 if (rx_ev_other_err
&& net_ratelimit()) {
959 netif_dbg(efx
, rx_err
, efx
->net_dev
,
960 " RX queue %d unexpected RX event "
961 EFX_QWORD_FMT
"%s%s%s%s%s%s%s%s\n",
962 efx_rx_queue_index(rx_queue
), EFX_QWORD_VAL(*event
),
963 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
964 rx_ev_ip_hdr_chksum_err
?
965 " [IP_HDR_CHKSUM_ERR]" : "",
966 rx_ev_tcp_udp_chksum_err
?
967 " [TCP_UDP_CHKSUM_ERR]" : "",
968 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
969 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
970 rx_ev_drib_nib
? " [DRIB_NIB]" : "",
971 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
972 rx_ev_pause_frm
? " [PAUSE]" : "");
976 /* The frame must be discarded if any of these are true. */
977 return (rx_ev_eth_crc_err
| rx_ev_frm_trunc
| rx_ev_drib_nib
|
978 rx_ev_tobe_disc
| rx_ev_pause_frm
) ?
979 EFX_RX_PKT_DISCARD
: 0;
982 /* Handle receive events that are not in-order. Return true if this
983 * can be handled as a partial packet discard, false if it's more
987 efx_handle_rx_bad_index(struct efx_rx_queue
*rx_queue
, unsigned index
)
989 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
990 struct efx_nic
*efx
= rx_queue
->efx
;
991 unsigned expected
, dropped
;
993 if (rx_queue
->scatter_n
&&
994 index
== ((rx_queue
->removed_count
+ rx_queue
->scatter_n
- 1) &
995 rx_queue
->ptr_mask
)) {
996 ++channel
->n_rx_nodesc_trunc
;
1000 expected
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
1001 dropped
= (index
- expected
) & rx_queue
->ptr_mask
;
1002 netif_info(efx
, rx_err
, efx
->net_dev
,
1003 "dropped %d events (index=%d expected=%d)\n",
1004 dropped
, index
, expected
);
1006 efx_schedule_reset(efx
, EFX_WORKAROUND_5676(efx
) ?
1007 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
1011 /* Handle a packet received event
1013 * The NIC gives a "discard" flag if it's a unicast packet with the
1014 * wrong destination address
1015 * Also "is multicast" and "matches multicast filter" flags can be used to
1016 * discard non-matching multicast packets.
1019 efx_handle_rx_event(struct efx_channel
*channel
, const efx_qword_t
*event
)
1021 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
1022 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
1023 unsigned expected_ptr
;
1024 bool rx_ev_pkt_ok
, rx_ev_sop
, rx_ev_cont
;
1026 struct efx_rx_queue
*rx_queue
;
1027 struct efx_nic
*efx
= channel
->efx
;
1029 if (unlikely(ACCESS_ONCE(efx
->reset_pending
)))
1032 rx_ev_cont
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_JUMBO_CONT
);
1033 rx_ev_sop
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_SOP
);
1034 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_Q_LABEL
) !=
1037 rx_queue
= efx_channel_get_rx_queue(channel
);
1039 rx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_DESC_PTR
);
1040 expected_ptr
= ((rx_queue
->removed_count
+ rx_queue
->scatter_n
) &
1041 rx_queue
->ptr_mask
);
1043 /* Check for partial drops and other errors */
1044 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
) ||
1045 unlikely(rx_ev_sop
!= (rx_queue
->scatter_n
== 0))) {
1046 if (rx_ev_desc_ptr
!= expected_ptr
&&
1047 !efx_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
))
1050 /* Discard all pending fragments */
1051 if (rx_queue
->scatter_n
) {
1054 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1055 rx_queue
->scatter_n
, 0, EFX_RX_PKT_DISCARD
);
1056 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1057 rx_queue
->scatter_n
= 0;
1060 /* Return if there is no new fragment */
1061 if (rx_ev_desc_ptr
!= expected_ptr
)
1064 /* Discard new fragment if not SOP */
1068 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1069 1, 0, EFX_RX_PKT_DISCARD
);
1070 ++rx_queue
->removed_count
;
1075 ++rx_queue
->scatter_n
;
1079 rx_ev_byte_cnt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_BYTE_CNT
);
1080 rx_ev_pkt_ok
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_OK
);
1081 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
1083 if (likely(rx_ev_pkt_ok
)) {
1084 /* If packet is marked as OK then we can rely on the
1085 * hardware checksum and classification.
1088 switch (rx_ev_hdr_type
) {
1089 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP
:
1090 flags
|= EFX_RX_PKT_TCP
;
1092 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP
:
1093 flags
|= EFX_RX_PKT_CSUMMED
;
1095 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER
:
1096 case FSE_AZ_RX_EV_HDR_TYPE_OTHER
:
1100 flags
= efx_handle_rx_not_ok(rx_queue
, event
);
1103 /* Detect multicast packets that didn't match the filter */
1104 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
1105 if (rx_ev_mcast_pkt
) {
1106 unsigned int rx_ev_mcast_hash_match
=
1107 EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_HASH_MATCH
);
1109 if (unlikely(!rx_ev_mcast_hash_match
)) {
1110 ++channel
->n_rx_mcast_mismatch
;
1111 flags
|= EFX_RX_PKT_DISCARD
;
1115 channel
->irq_mod_score
+= 2;
1117 /* Handle received packet */
1118 efx_rx_packet(rx_queue
,
1119 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1120 rx_queue
->scatter_n
, rx_ev_byte_cnt
, flags
);
1121 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1122 rx_queue
->scatter_n
= 0;
1125 /* If this flush done event corresponds to a &struct efx_tx_queue, then
1126 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1127 * of all transmit completions.
1130 efx_handle_tx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1132 struct efx_tx_queue
*tx_queue
;
1135 qid
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1136 if (qid
< EFX_TXQ_TYPES
* efx
->n_tx_channels
) {
1137 tx_queue
= efx_get_tx_queue(efx
, qid
/ EFX_TXQ_TYPES
,
1138 qid
% EFX_TXQ_TYPES
);
1139 if (atomic_cmpxchg(&tx_queue
->flush_outstanding
, 1, 0)) {
1140 efx_magic_event(tx_queue
->channel
,
1141 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue
));
1146 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1147 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1148 * the RX queue back to the mask of RX queues in need of flushing.
1151 efx_handle_rx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1153 struct efx_channel
*channel
;
1154 struct efx_rx_queue
*rx_queue
;
1158 qid
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1159 failed
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1160 if (qid
>= efx
->n_channels
)
1162 channel
= efx_get_channel(efx
, qid
);
1163 if (!efx_channel_has_rx_queue(channel
))
1165 rx_queue
= efx_channel_get_rx_queue(channel
);
1168 netif_info(efx
, hw
, efx
->net_dev
,
1169 "RXQ %d flush retry\n", qid
);
1170 rx_queue
->flush_pending
= true;
1171 atomic_inc(&efx
->rxq_flush_pending
);
1173 efx_magic_event(efx_rx_queue_channel(rx_queue
),
1174 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue
));
1176 atomic_dec(&efx
->rxq_flush_outstanding
);
1177 if (efx_flush_wake(efx
))
1178 wake_up(&efx
->flush_wq
);
1182 efx_handle_drain_event(struct efx_channel
*channel
)
1184 struct efx_nic
*efx
= channel
->efx
;
1186 WARN_ON(atomic_read(&efx
->drain_pending
) == 0);
1187 atomic_dec(&efx
->drain_pending
);
1188 if (efx_flush_wake(efx
))
1189 wake_up(&efx
->flush_wq
);
1193 efx_handle_generated_event(struct efx_channel
*channel
, efx_qword_t
*event
)
1195 struct efx_nic
*efx
= channel
->efx
;
1196 struct efx_rx_queue
*rx_queue
=
1197 efx_channel_has_rx_queue(channel
) ?
1198 efx_channel_get_rx_queue(channel
) : NULL
;
1199 unsigned magic
, code
;
1201 magic
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRV_GEN_EV_MAGIC
);
1202 code
= _EFX_CHANNEL_MAGIC_CODE(magic
);
1204 if (magic
== EFX_CHANNEL_MAGIC_TEST(channel
)) {
1205 channel
->event_test_cpu
= raw_smp_processor_id();
1206 } else if (rx_queue
&& magic
== EFX_CHANNEL_MAGIC_FILL(rx_queue
)) {
1207 /* The queue must be empty, so we won't receive any rx
1208 * events, so efx_process_channel() won't refill the
1209 * queue. Refill it here */
1210 efx_fast_push_rx_descriptors(rx_queue
);
1211 } else if (rx_queue
&& magic
== EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue
)) {
1212 rx_queue
->enabled
= false;
1213 efx_handle_drain_event(channel
);
1214 } else if (code
== _EFX_CHANNEL_MAGIC_TX_DRAIN
) {
1215 efx_handle_drain_event(channel
);
1217 netif_dbg(efx
, hw
, efx
->net_dev
, "channel %d received "
1218 "generated event "EFX_QWORD_FMT
"\n",
1219 channel
->channel
, EFX_QWORD_VAL(*event
));
1224 efx_handle_driver_event(struct efx_channel
*channel
, efx_qword_t
*event
)
1226 struct efx_nic
*efx
= channel
->efx
;
1227 unsigned int ev_sub_code
;
1228 unsigned int ev_sub_data
;
1230 ev_sub_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBCODE
);
1231 ev_sub_data
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1233 switch (ev_sub_code
) {
1234 case FSE_AZ_TX_DESCQ_FLS_DONE_EV
:
1235 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d TXQ %d flushed\n",
1236 channel
->channel
, ev_sub_data
);
1237 efx_handle_tx_flush_done(efx
, event
);
1238 efx_sriov_tx_flush_done(efx
, event
);
1240 case FSE_AZ_RX_DESCQ_FLS_DONE_EV
:
1241 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d RXQ %d flushed\n",
1242 channel
->channel
, ev_sub_data
);
1243 efx_handle_rx_flush_done(efx
, event
);
1244 efx_sriov_rx_flush_done(efx
, event
);
1246 case FSE_AZ_EVQ_INIT_DONE_EV
:
1247 netif_dbg(efx
, hw
, efx
->net_dev
,
1248 "channel %d EVQ %d initialised\n",
1249 channel
->channel
, ev_sub_data
);
1251 case FSE_AZ_SRM_UPD_DONE_EV
:
1252 netif_vdbg(efx
, hw
, efx
->net_dev
,
1253 "channel %d SRAM update done\n", channel
->channel
);
1255 case FSE_AZ_WAKE_UP_EV
:
1256 netif_vdbg(efx
, hw
, efx
->net_dev
,
1257 "channel %d RXQ %d wakeup event\n",
1258 channel
->channel
, ev_sub_data
);
1260 case FSE_AZ_TIMER_EV
:
1261 netif_vdbg(efx
, hw
, efx
->net_dev
,
1262 "channel %d RX queue %d timer expired\n",
1263 channel
->channel
, ev_sub_data
);
1265 case FSE_AA_RX_RECOVER_EV
:
1266 netif_err(efx
, rx_err
, efx
->net_dev
,
1267 "channel %d seen DRIVER RX_RESET event. "
1268 "Resetting.\n", channel
->channel
);
1269 atomic_inc(&efx
->rx_reset
);
1270 efx_schedule_reset(efx
,
1271 EFX_WORKAROUND_6555(efx
) ?
1272 RESET_TYPE_RX_RECOVERY
:
1273 RESET_TYPE_DISABLE
);
1275 case FSE_BZ_RX_DSC_ERROR_EV
:
1276 if (ev_sub_data
< EFX_VI_BASE
) {
1277 netif_err(efx
, rx_err
, efx
->net_dev
,
1278 "RX DMA Q %d reports descriptor fetch error."
1279 " RX Q %d is disabled.\n", ev_sub_data
,
1281 efx_schedule_reset(efx
, RESET_TYPE_RX_DESC_FETCH
);
1283 efx_sriov_desc_fetch_err(efx
, ev_sub_data
);
1285 case FSE_BZ_TX_DSC_ERROR_EV
:
1286 if (ev_sub_data
< EFX_VI_BASE
) {
1287 netif_err(efx
, tx_err
, efx
->net_dev
,
1288 "TX DMA Q %d reports descriptor fetch error."
1289 " TX Q %d is disabled.\n", ev_sub_data
,
1291 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
1293 efx_sriov_desc_fetch_err(efx
, ev_sub_data
);
1296 netif_vdbg(efx
, hw
, efx
->net_dev
,
1297 "channel %d unknown driver event code %d "
1298 "data %04x\n", channel
->channel
, ev_sub_code
,
1304 int efx_nic_process_eventq(struct efx_channel
*channel
, int budget
)
1306 struct efx_nic
*efx
= channel
->efx
;
1307 unsigned int read_ptr
;
1308 efx_qword_t event
, *p_event
;
1313 read_ptr
= channel
->eventq_read_ptr
;
1316 p_event
= efx_event(channel
, read_ptr
);
1319 if (!efx_event_present(&event
))
1323 netif_vdbg(channel
->efx
, intr
, channel
->efx
->net_dev
,
1324 "channel %d event is "EFX_QWORD_FMT
"\n",
1325 channel
->channel
, EFX_QWORD_VAL(event
));
1327 /* Clear this event by marking it all ones */
1328 EFX_SET_QWORD(*p_event
);
1332 ev_code
= EFX_QWORD_FIELD(event
, FSF_AZ_EV_CODE
);
1335 case FSE_AZ_EV_CODE_RX_EV
:
1336 efx_handle_rx_event(channel
, &event
);
1337 if (++spent
== budget
)
1340 case FSE_AZ_EV_CODE_TX_EV
:
1341 tx_packets
+= efx_handle_tx_event(channel
, &event
);
1342 if (tx_packets
> efx
->txq_entries
) {
1347 case FSE_AZ_EV_CODE_DRV_GEN_EV
:
1348 efx_handle_generated_event(channel
, &event
);
1350 case FSE_AZ_EV_CODE_DRIVER_EV
:
1351 efx_handle_driver_event(channel
, &event
);
1353 case FSE_CZ_EV_CODE_USER_EV
:
1354 efx_sriov_event(channel
, &event
);
1356 case FSE_CZ_EV_CODE_MCDI_EV
:
1357 efx_mcdi_process_event(channel
, &event
);
1359 case FSE_AZ_EV_CODE_GLOBAL_EV
:
1360 if (efx
->type
->handle_global_event
&&
1361 efx
->type
->handle_global_event(channel
, &event
))
1363 /* else fall through */
1365 netif_err(channel
->efx
, hw
, channel
->efx
->net_dev
,
1366 "channel %d unknown event type %d (data "
1367 EFX_QWORD_FMT
")\n", channel
->channel
,
1368 ev_code
, EFX_QWORD_VAL(event
));
1373 channel
->eventq_read_ptr
= read_ptr
;
1377 /* Check whether an event is present in the eventq at the current
1378 * read pointer. Only useful for self-test.
1380 bool efx_nic_event_present(struct efx_channel
*channel
)
1382 return efx_event_present(efx_event(channel
, channel
->eventq_read_ptr
));
1385 /* Allocate buffer table entries for event queue */
1386 int efx_nic_probe_eventq(struct efx_channel
*channel
)
1388 struct efx_nic
*efx
= channel
->efx
;
1391 entries
= channel
->eventq_mask
+ 1;
1392 return efx_alloc_special_buffer(efx
, &channel
->eventq
,
1393 entries
* sizeof(efx_qword_t
));
1396 void efx_nic_init_eventq(struct efx_channel
*channel
)
1399 struct efx_nic
*efx
= channel
->efx
;
1401 netif_dbg(efx
, hw
, efx
->net_dev
,
1402 "channel %d event queue in special buffers %d-%d\n",
1403 channel
->channel
, channel
->eventq
.index
,
1404 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1406 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
) {
1407 EFX_POPULATE_OWORD_3(reg
,
1408 FRF_CZ_TIMER_Q_EN
, 1,
1409 FRF_CZ_HOST_NOTIFY_MODE
, 0,
1410 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
1411 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1414 /* Pin event queue buffer */
1415 efx_init_special_buffer(efx
, &channel
->eventq
);
1417 /* Fill event queue with all ones (i.e. empty events) */
1418 memset(channel
->eventq
.addr
, 0xff, channel
->eventq
.len
);
1420 /* Push event queue to card */
1421 EFX_POPULATE_OWORD_3(reg
,
1423 FRF_AZ_EVQ_SIZE
, __ffs(channel
->eventq
.entries
),
1424 FRF_AZ_EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1425 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1428 efx
->type
->push_irq_moderation(channel
);
1431 void efx_nic_fini_eventq(struct efx_channel
*channel
)
1434 struct efx_nic
*efx
= channel
->efx
;
1436 /* Remove event queue from card */
1437 EFX_ZERO_OWORD(reg
);
1438 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1440 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1441 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1443 /* Unpin event queue */
1444 efx_fini_special_buffer(efx
, &channel
->eventq
);
1447 /* Free buffers backing event queue */
1448 void efx_nic_remove_eventq(struct efx_channel
*channel
)
1450 efx_free_special_buffer(channel
->efx
, &channel
->eventq
);
1454 void efx_nic_event_test_start(struct efx_channel
*channel
)
1456 channel
->event_test_cpu
= -1;
1458 efx_magic_event(channel
, EFX_CHANNEL_MAGIC_TEST(channel
));
1461 void efx_nic_generate_fill_event(struct efx_rx_queue
*rx_queue
)
1463 efx_magic_event(efx_rx_queue_channel(rx_queue
),
1464 EFX_CHANNEL_MAGIC_FILL(rx_queue
));
1467 /**************************************************************************
1469 * Hardware interrupts
1470 * The hardware interrupt handler does very little work; all the event
1471 * queue processing is carried out by per-channel tasklets.
1473 **************************************************************************/
1475 /* Enable/disable/generate interrupts */
1476 static inline void efx_nic_interrupts(struct efx_nic
*efx
,
1477 bool enabled
, bool force
)
1479 efx_oword_t int_en_reg_ker
;
1481 EFX_POPULATE_OWORD_3(int_en_reg_ker
,
1482 FRF_AZ_KER_INT_LEVE_SEL
, efx
->irq_level
,
1483 FRF_AZ_KER_INT_KER
, force
,
1484 FRF_AZ_DRV_INT_EN_KER
, enabled
);
1485 efx_writeo(efx
, &int_en_reg_ker
, FR_AZ_INT_EN_KER
);
1488 void efx_nic_enable_interrupts(struct efx_nic
*efx
)
1490 EFX_ZERO_OWORD(*((efx_oword_t
*) efx
->irq_status
.addr
));
1491 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1493 efx_nic_interrupts(efx
, true, false);
1496 void efx_nic_disable_interrupts(struct efx_nic
*efx
)
1498 /* Disable interrupts */
1499 efx_nic_interrupts(efx
, false, false);
1502 /* Generate a test interrupt
1503 * Interrupt must already have been enabled, otherwise nasty things
1506 void efx_nic_irq_test_start(struct efx_nic
*efx
)
1508 efx
->last_irq_cpu
= -1;
1510 efx_nic_interrupts(efx
, true, true);
1513 /* Process a fatal interrupt
1514 * Disable bus mastering ASAP and schedule a reset
1516 irqreturn_t
efx_nic_fatal_interrupt(struct efx_nic
*efx
)
1518 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1519 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1520 efx_oword_t fatal_intr
;
1521 int error
, mem_perr
;
1523 efx_reado(efx
, &fatal_intr
, FR_AZ_FATAL_INTR_KER
);
1524 error
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_FATAL_INTR
);
1526 netif_err(efx
, hw
, efx
->net_dev
, "SYSTEM ERROR "EFX_OWORD_FMT
" status "
1527 EFX_OWORD_FMT
": %s\n", EFX_OWORD_VAL(*int_ker
),
1528 EFX_OWORD_VAL(fatal_intr
),
1529 error
? "disabling bus mastering" : "no recognised error");
1531 /* If this is a memory parity error dump which blocks are offending */
1532 mem_perr
= (EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_MEM_PERR_INT_KER
) ||
1533 EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_SRM_PERR_INT_KER
));
1536 efx_reado(efx
, ®
, FR_AZ_MEM_STAT
);
1537 netif_err(efx
, hw
, efx
->net_dev
,
1538 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT
"\n",
1539 EFX_OWORD_VAL(reg
));
1542 /* Disable both devices */
1543 pci_clear_master(efx
->pci_dev
);
1544 if (efx_nic_is_dual_func(efx
))
1545 pci_clear_master(nic_data
->pci_dev2
);
1546 efx_nic_disable_interrupts(efx
);
1548 /* Count errors and reset or disable the NIC accordingly */
1549 if (efx
->int_error_count
== 0 ||
1550 time_after(jiffies
, efx
->int_error_expire
)) {
1551 efx
->int_error_count
= 0;
1552 efx
->int_error_expire
=
1553 jiffies
+ EFX_INT_ERROR_EXPIRE
* HZ
;
1555 if (++efx
->int_error_count
< EFX_MAX_INT_ERRORS
) {
1556 netif_err(efx
, hw
, efx
->net_dev
,
1557 "SYSTEM ERROR - reset scheduled\n");
1558 efx_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1560 netif_err(efx
, hw
, efx
->net_dev
,
1561 "SYSTEM ERROR - max number of errors seen."
1562 "NIC will be disabled\n");
1563 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1569 /* Handle a legacy interrupt
1570 * Acknowledges the interrupt and schedule event queue processing.
1572 static irqreturn_t
efx_legacy_interrupt(int irq
, void *dev_id
)
1574 struct efx_nic
*efx
= dev_id
;
1575 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1576 irqreturn_t result
= IRQ_NONE
;
1577 struct efx_channel
*channel
;
1582 /* Could this be ours? If interrupts are disabled then the
1583 * channel state may not be valid.
1585 if (!efx
->legacy_irq_enabled
)
1588 /* Read the ISR which also ACKs the interrupts */
1589 efx_readd(efx
, ®
, FR_BZ_INT_ISR0
);
1590 queues
= EFX_EXTRACT_DWORD(reg
, 0, 31);
1592 /* Legacy interrupts are disabled too late by the EEH kernel
1593 * code. Disable them earlier.
1594 * If an EEH error occurred, the read will have returned all ones.
1596 if (EFX_DWORD_IS_ALL_ONES(reg
) && efx_try_recovery(efx
) &&
1597 !efx
->eeh_disabled_legacy_irq
) {
1598 disable_irq_nosync(efx
->legacy_irq
);
1599 efx
->eeh_disabled_legacy_irq
= true;
1602 /* Handle non-event-queue sources */
1603 if (queues
& (1U << efx
->irq_level
)) {
1604 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1605 if (unlikely(syserr
))
1606 return efx_nic_fatal_interrupt(efx
);
1607 efx
->last_irq_cpu
= raw_smp_processor_id();
1611 if (EFX_WORKAROUND_15783(efx
))
1612 efx
->irq_zero_count
= 0;
1614 /* Schedule processing of any interrupting queues */
1615 efx_for_each_channel(channel
, efx
) {
1617 efx_schedule_channel_irq(channel
);
1620 result
= IRQ_HANDLED
;
1622 } else if (EFX_WORKAROUND_15783(efx
)) {
1625 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1626 * because this might be a shared interrupt. */
1627 if (efx
->irq_zero_count
++ == 0)
1628 result
= IRQ_HANDLED
;
1630 /* Ensure we schedule or rearm all event queues */
1631 efx_for_each_channel(channel
, efx
) {
1632 event
= efx_event(channel
, channel
->eventq_read_ptr
);
1633 if (efx_event_present(event
))
1634 efx_schedule_channel_irq(channel
);
1636 efx_nic_eventq_read_ack(channel
);
1640 if (result
== IRQ_HANDLED
)
1641 netif_vdbg(efx
, intr
, efx
->net_dev
,
1642 "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1643 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1648 /* Handle an MSI interrupt
1650 * Handle an MSI hardware interrupt. This routine schedules event
1651 * queue processing. No interrupt acknowledgement cycle is necessary.
1652 * Also, we never need to check that the interrupt is for us, since
1653 * MSI interrupts cannot be shared.
1655 static irqreturn_t
efx_msi_interrupt(int irq
, void *dev_id
)
1657 struct efx_channel
*channel
= *(struct efx_channel
**)dev_id
;
1658 struct efx_nic
*efx
= channel
->efx
;
1659 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1662 netif_vdbg(efx
, intr
, efx
->net_dev
,
1663 "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1664 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1666 /* Handle non-event-queue sources */
1667 if (channel
->channel
== efx
->irq_level
) {
1668 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1669 if (unlikely(syserr
))
1670 return efx_nic_fatal_interrupt(efx
);
1671 efx
->last_irq_cpu
= raw_smp_processor_id();
1674 /* Schedule processing of the channel */
1675 efx_schedule_channel_irq(channel
);
1681 /* Setup RSS indirection table.
1682 * This maps from the hash value of the packet to RXQ
1684 void efx_nic_push_rx_indir_table(struct efx_nic
*efx
)
1689 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
)
1692 BUILD_BUG_ON(ARRAY_SIZE(efx
->rx_indir_table
) !=
1693 FR_BZ_RX_INDIRECTION_TBL_ROWS
);
1695 for (i
= 0; i
< FR_BZ_RX_INDIRECTION_TBL_ROWS
; i
++) {
1696 EFX_POPULATE_DWORD_1(dword
, FRF_BZ_IT_QUEUE
,
1697 efx
->rx_indir_table
[i
]);
1698 efx_writed(efx
, &dword
,
1699 FR_BZ_RX_INDIRECTION_TBL
+
1700 FR_BZ_RX_INDIRECTION_TBL_STEP
* i
);
1704 /* Hook interrupt handler(s)
1705 * Try MSI and then legacy interrupts.
1707 int efx_nic_init_interrupt(struct efx_nic
*efx
)
1709 struct efx_channel
*channel
;
1710 unsigned int n_irqs
;
1713 if (!EFX_INT_MODE_USE_MSI(efx
)) {
1714 irq_handler_t handler
;
1715 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1716 handler
= efx_legacy_interrupt
;
1718 handler
= falcon_legacy_interrupt_a1
;
1720 rc
= request_irq(efx
->legacy_irq
, handler
, IRQF_SHARED
,
1723 netif_err(efx
, drv
, efx
->net_dev
,
1724 "failed to hook legacy IRQ %d\n",
1731 #ifdef CONFIG_RFS_ACCEL
1732 if (efx
->interrupt_mode
== EFX_INT_MODE_MSIX
) {
1733 efx
->net_dev
->rx_cpu_rmap
=
1734 alloc_irq_cpu_rmap(efx
->n_rx_channels
);
1735 if (!efx
->net_dev
->rx_cpu_rmap
) {
1742 /* Hook MSI or MSI-X interrupt */
1744 efx_for_each_channel(channel
, efx
) {
1745 rc
= request_irq(channel
->irq
, efx_msi_interrupt
,
1746 IRQF_PROBE_SHARED
, /* Not shared */
1747 efx
->channel_name
[channel
->channel
],
1748 &efx
->channel
[channel
->channel
]);
1750 netif_err(efx
, drv
, efx
->net_dev
,
1751 "failed to hook IRQ %d\n", channel
->irq
);
1756 #ifdef CONFIG_RFS_ACCEL
1757 if (efx
->interrupt_mode
== EFX_INT_MODE_MSIX
&&
1758 channel
->channel
< efx
->n_rx_channels
) {
1759 rc
= irq_cpu_rmap_add(efx
->net_dev
->rx_cpu_rmap
,
1770 #ifdef CONFIG_RFS_ACCEL
1771 free_irq_cpu_rmap(efx
->net_dev
->rx_cpu_rmap
);
1772 efx
->net_dev
->rx_cpu_rmap
= NULL
;
1774 efx_for_each_channel(channel
, efx
) {
1777 free_irq(channel
->irq
, &efx
->channel
[channel
->channel
]);
1783 void efx_nic_fini_interrupt(struct efx_nic
*efx
)
1785 struct efx_channel
*channel
;
1788 #ifdef CONFIG_RFS_ACCEL
1789 free_irq_cpu_rmap(efx
->net_dev
->rx_cpu_rmap
);
1790 efx
->net_dev
->rx_cpu_rmap
= NULL
;
1793 /* Disable MSI/MSI-X interrupts */
1794 efx_for_each_channel(channel
, efx
)
1795 free_irq(channel
->irq
, &efx
->channel
[channel
->channel
]);
1797 /* ACK legacy interrupt */
1798 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1799 efx_reado(efx
, ®
, FR_BZ_INT_ISR0
);
1801 falcon_irq_ack_a1(efx
);
1803 /* Disable legacy interrupt */
1804 if (efx
->legacy_irq
)
1805 free_irq(efx
->legacy_irq
, efx
);
1808 /* Looks at available SRAM resources and works out how many queues we
1809 * can support, and where things like descriptor caches should live.
1811 * SRAM is split up as follows:
1812 * 0 buftbl entries for channels
1813 * efx->vf_buftbl_base buftbl entries for SR-IOV
1814 * efx->rx_dc_base RX descriptor caches
1815 * efx->tx_dc_base TX descriptor caches
1817 void efx_nic_dimension_resources(struct efx_nic
*efx
, unsigned sram_lim_qw
)
1819 unsigned vi_count
, buftbl_min
;
1821 /* Account for the buffer table entries backing the datapath channels
1822 * and the descriptor caches for those channels.
1824 buftbl_min
= ((efx
->n_rx_channels
* EFX_MAX_DMAQ_SIZE
+
1825 efx
->n_tx_channels
* EFX_TXQ_TYPES
* EFX_MAX_DMAQ_SIZE
+
1826 efx
->n_channels
* EFX_MAX_EVQ_SIZE
)
1827 * sizeof(efx_qword_t
) / EFX_BUF_SIZE
);
1828 vi_count
= max(efx
->n_channels
, efx
->n_tx_channels
* EFX_TXQ_TYPES
);
1830 #ifdef CONFIG_SFC_SRIOV
1831 if (efx_sriov_wanted(efx
)) {
1832 unsigned vi_dc_entries
, buftbl_free
, entries_per_vf
, vf_limit
;
1834 efx
->vf_buftbl_base
= buftbl_min
;
1836 vi_dc_entries
= RX_DC_ENTRIES
+ TX_DC_ENTRIES
;
1837 vi_count
= max(vi_count
, EFX_VI_BASE
);
1838 buftbl_free
= (sram_lim_qw
- buftbl_min
-
1839 vi_count
* vi_dc_entries
);
1841 entries_per_vf
= ((vi_dc_entries
+ EFX_VF_BUFTBL_PER_VI
) *
1843 vf_limit
= min(buftbl_free
/ entries_per_vf
,
1844 (1024U - EFX_VI_BASE
) >> efx
->vi_scale
);
1846 if (efx
->vf_count
> vf_limit
) {
1847 netif_err(efx
, probe
, efx
->net_dev
,
1848 "Reducing VF count from from %d to %d\n",
1849 efx
->vf_count
, vf_limit
);
1850 efx
->vf_count
= vf_limit
;
1852 vi_count
+= efx
->vf_count
* efx_vf_size(efx
);
1856 efx
->tx_dc_base
= sram_lim_qw
- vi_count
* TX_DC_ENTRIES
;
1857 efx
->rx_dc_base
= efx
->tx_dc_base
- vi_count
* RX_DC_ENTRIES
;
1860 u32
efx_nic_fpga_ver(struct efx_nic
*efx
)
1862 efx_oword_t altera_build
;
1863 efx_reado(efx
, &altera_build
, FR_AZ_ALTERA_BUILD
);
1864 return EFX_OWORD_FIELD(altera_build
, FRF_AZ_ALTERA_BUILD_VER
);
1867 void efx_nic_init_common(struct efx_nic
*efx
)
1871 /* Set positions of descriptor caches in SRAM. */
1872 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_TX_DC_BASE_ADR
, efx
->tx_dc_base
);
1873 efx_writeo(efx
, &temp
, FR_AZ_SRM_TX_DC_CFG
);
1874 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_RX_DC_BASE_ADR
, efx
->rx_dc_base
);
1875 efx_writeo(efx
, &temp
, FR_AZ_SRM_RX_DC_CFG
);
1877 /* Set TX descriptor cache size. */
1878 BUILD_BUG_ON(TX_DC_ENTRIES
!= (8 << TX_DC_ENTRIES_ORDER
));
1879 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
1880 efx_writeo(efx
, &temp
, FR_AZ_TX_DC_CFG
);
1882 /* Set RX descriptor cache size. Set low watermark to size-8, as
1883 * this allows most efficient prefetching.
1885 BUILD_BUG_ON(RX_DC_ENTRIES
!= (8 << RX_DC_ENTRIES_ORDER
));
1886 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
1887 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_CFG
);
1888 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
1889 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_PF_WM
);
1891 /* Program INT_KER address */
1892 EFX_POPULATE_OWORD_2(temp
,
1893 FRF_AZ_NORM_INT_VEC_DIS_KER
,
1894 EFX_INT_MODE_USE_MSI(efx
),
1895 FRF_AZ_INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1896 efx_writeo(efx
, &temp
, FR_AZ_INT_ADR_KER
);
1898 if (EFX_WORKAROUND_17213(efx
) && !EFX_INT_MODE_USE_MSI(efx
))
1899 /* Use an interrupt level unused by event queues */
1900 efx
->irq_level
= 0x1f;
1902 /* Use a valid MSI-X vector */
1905 /* Enable all the genuinely fatal interrupts. (They are still
1906 * masked by the overall interrupt mask, controlled by
1907 * falcon_interrupts()).
1909 * Note: All other fatal interrupts are enabled
1911 EFX_POPULATE_OWORD_3(temp
,
1912 FRF_AZ_ILL_ADR_INT_KER_EN
, 1,
1913 FRF_AZ_RBUF_OWN_INT_KER_EN
, 1,
1914 FRF_AZ_TBUF_OWN_INT_KER_EN
, 1);
1915 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1916 EFX_SET_OWORD_FIELD(temp
, FRF_CZ_SRAM_PERR_INT_P_KER_EN
, 1);
1917 EFX_INVERT_OWORD(temp
);
1918 efx_writeo(efx
, &temp
, FR_AZ_FATAL_INTR_KER
);
1920 efx_nic_push_rx_indir_table(efx
);
1922 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1923 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1925 efx_reado(efx
, &temp
, FR_AZ_TX_RESERVED
);
1926 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER
, 0xfe);
1927 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER_EN
, 1);
1928 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_ONE_PKT_PER_Q
, 1);
1929 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PUSH_EN
, 1);
1930 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_DIS_NON_IP_EV
, 1);
1931 /* Enable SW_EV to inherit in char driver - assume harmless here */
1932 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_SOFT_EVT_EN
, 1);
1933 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1934 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_THRESHOLD
, 2);
1935 /* Disable hardware watchdog which can misfire */
1936 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_WD_TMR
, 0x3fffff);
1937 /* Squash TX of packets of 16 bytes or less */
1938 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1939 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TX_FLUSH_MIN_LEN_EN
, 1);
1940 efx_writeo(efx
, &temp
, FR_AZ_TX_RESERVED
);
1942 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
1943 EFX_POPULATE_OWORD_4(temp
,
1944 /* Default values */
1945 FRF_BZ_TX_PACE_SB_NOT_AF
, 0x15,
1946 FRF_BZ_TX_PACE_SB_AF
, 0xb,
1947 FRF_BZ_TX_PACE_FB_BASE
, 0,
1948 /* Allow large pace values in the
1950 FRF_BZ_TX_PACE_BIN_TH
,
1951 FFE_BZ_TX_PACE_RESERVED
);
1952 efx_writeo(efx
, &temp
, FR_BZ_TX_PACE
);
1958 #define REGISTER_REVISION_A 1
1959 #define REGISTER_REVISION_B 2
1960 #define REGISTER_REVISION_C 3
1961 #define REGISTER_REVISION_Z 3 /* latest revision */
1963 struct efx_nic_reg
{
1965 u32 min_revision
:2, max_revision
:2;
1968 #define REGISTER(name, min_rev, max_rev) { \
1969 FR_ ## min_rev ## max_rev ## _ ## name, \
1970 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1972 #define REGISTER_AA(name) REGISTER(name, A, A)
1973 #define REGISTER_AB(name) REGISTER(name, A, B)
1974 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1975 #define REGISTER_BB(name) REGISTER(name, B, B)
1976 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1977 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1979 static const struct efx_nic_reg efx_nic_regs
[] = {
1980 REGISTER_AZ(ADR_REGION
),
1981 REGISTER_AZ(INT_EN_KER
),
1982 REGISTER_BZ(INT_EN_CHAR
),
1983 REGISTER_AZ(INT_ADR_KER
),
1984 REGISTER_BZ(INT_ADR_CHAR
),
1985 /* INT_ACK_KER is WO */
1986 /* INT_ISR0 is RC */
1987 REGISTER_AZ(HW_INIT
),
1988 REGISTER_CZ(USR_EV_CFG
),
1989 REGISTER_AB(EE_SPI_HCMD
),
1990 REGISTER_AB(EE_SPI_HADR
),
1991 REGISTER_AB(EE_SPI_HDATA
),
1992 REGISTER_AB(EE_BASE_PAGE
),
1993 REGISTER_AB(EE_VPD_CFG0
),
1994 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1995 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1996 /* PCIE_CORE_INDIRECT is indirect */
1997 REGISTER_AB(NIC_STAT
),
1998 REGISTER_AB(GPIO_CTL
),
1999 REGISTER_AB(GLB_CTL
),
2000 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
2001 REGISTER_BZ(DP_CTRL
),
2002 REGISTER_AZ(MEM_STAT
),
2003 REGISTER_AZ(CS_DEBUG
),
2004 REGISTER_AZ(ALTERA_BUILD
),
2005 REGISTER_AZ(CSR_SPARE
),
2006 REGISTER_AB(PCIE_SD_CTL0123
),
2007 REGISTER_AB(PCIE_SD_CTL45
),
2008 REGISTER_AB(PCIE_PCS_CTL_STAT
),
2009 /* DEBUG_DATA_OUT is not used */
2011 REGISTER_AZ(EVQ_CTL
),
2012 REGISTER_AZ(EVQ_CNT1
),
2013 REGISTER_AZ(EVQ_CNT2
),
2014 REGISTER_AZ(BUF_TBL_CFG
),
2015 REGISTER_AZ(SRM_RX_DC_CFG
),
2016 REGISTER_AZ(SRM_TX_DC_CFG
),
2017 REGISTER_AZ(SRM_CFG
),
2018 /* BUF_TBL_UPD is WO */
2019 REGISTER_AZ(SRM_UPD_EVQ
),
2020 REGISTER_AZ(SRAM_PARITY
),
2021 REGISTER_AZ(RX_CFG
),
2022 REGISTER_BZ(RX_FILTER_CTL
),
2023 /* RX_FLUSH_DESCQ is WO */
2024 REGISTER_AZ(RX_DC_CFG
),
2025 REGISTER_AZ(RX_DC_PF_WM
),
2026 REGISTER_BZ(RX_RSS_TKEY
),
2027 /* RX_NODESC_DROP is RC */
2028 REGISTER_AA(RX_SELF_RST
),
2029 /* RX_DEBUG, RX_PUSH_DROP are not used */
2030 REGISTER_CZ(RX_RSS_IPV6_REG1
),
2031 REGISTER_CZ(RX_RSS_IPV6_REG2
),
2032 REGISTER_CZ(RX_RSS_IPV6_REG3
),
2033 /* TX_FLUSH_DESCQ is WO */
2034 REGISTER_AZ(TX_DC_CFG
),
2035 REGISTER_AA(TX_CHKSM_CFG
),
2036 REGISTER_AZ(TX_CFG
),
2037 /* TX_PUSH_DROP is not used */
2038 REGISTER_AZ(TX_RESERVED
),
2039 REGISTER_BZ(TX_PACE
),
2040 /* TX_PACE_DROP_QID is RC */
2041 REGISTER_BB(TX_VLAN
),
2042 REGISTER_BZ(TX_IPFIL_PORTEN
),
2043 REGISTER_AB(MD_TXD
),
2044 REGISTER_AB(MD_RXD
),
2046 REGISTER_AB(MD_PHY_ADR
),
2049 REGISTER_AB(MAC_STAT_DMA
),
2050 REGISTER_AB(MAC_CTRL
),
2051 REGISTER_BB(GEN_MODE
),
2052 REGISTER_AB(MAC_MC_HASH_REG0
),
2053 REGISTER_AB(MAC_MC_HASH_REG1
),
2054 REGISTER_AB(GM_CFG1
),
2055 REGISTER_AB(GM_CFG2
),
2056 /* GM_IPG and GM_HD are not used */
2057 REGISTER_AB(GM_MAX_FLEN
),
2058 /* GM_TEST is not used */
2059 REGISTER_AB(GM_ADR1
),
2060 REGISTER_AB(GM_ADR2
),
2061 REGISTER_AB(GMF_CFG0
),
2062 REGISTER_AB(GMF_CFG1
),
2063 REGISTER_AB(GMF_CFG2
),
2064 REGISTER_AB(GMF_CFG3
),
2065 REGISTER_AB(GMF_CFG4
),
2066 REGISTER_AB(GMF_CFG5
),
2067 REGISTER_BB(TX_SRC_MAC_CTL
),
2068 REGISTER_AB(XM_ADR_LO
),
2069 REGISTER_AB(XM_ADR_HI
),
2070 REGISTER_AB(XM_GLB_CFG
),
2071 REGISTER_AB(XM_TX_CFG
),
2072 REGISTER_AB(XM_RX_CFG
),
2073 REGISTER_AB(XM_MGT_INT_MASK
),
2075 REGISTER_AB(XM_PAUSE_TIME
),
2076 REGISTER_AB(XM_TX_PARAM
),
2077 REGISTER_AB(XM_RX_PARAM
),
2078 /* XM_MGT_INT_MSK (note no 'A') is RC */
2079 REGISTER_AB(XX_PWR_RST
),
2080 REGISTER_AB(XX_SD_CTL
),
2081 REGISTER_AB(XX_TXDRV_CTL
),
2082 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
2083 /* XX_CORE_STAT is partly RC */
2086 struct efx_nic_reg_table
{
2088 u32 min_revision
:2, max_revision
:2;
2089 u32 step
:6, rows
:21;
2092 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
2094 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
2097 #define REGISTER_TABLE(name, min_rev, max_rev) \
2098 REGISTER_TABLE_DIMENSIONS( \
2099 name, FR_ ## min_rev ## max_rev ## _ ## name, \
2101 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
2102 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
2103 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
2104 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
2105 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
2106 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
2107 #define REGISTER_TABLE_BB_CZ(name) \
2108 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
2109 FR_BZ_ ## name ## _STEP, \
2110 FR_BB_ ## name ## _ROWS), \
2111 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
2112 FR_BZ_ ## name ## _STEP, \
2113 FR_CZ_ ## name ## _ROWS)
2114 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
2116 static const struct efx_nic_reg_table efx_nic_reg_tables
[] = {
2117 /* DRIVER is not used */
2118 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
2119 REGISTER_TABLE_BB(TX_IPFIL_TBL
),
2120 REGISTER_TABLE_BB(TX_SRC_MAC_TBL
),
2121 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER
),
2122 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL
),
2123 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER
),
2124 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL
),
2125 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER
),
2126 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL
),
2127 /* We can't reasonably read all of the buffer table (up to 8MB!).
2128 * However this driver will only use a few entries. Reading
2129 * 1K entries allows for some expansion of queue count and
2130 * size before we need to change the version. */
2131 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER
, FR_AA_BUF_FULL_TBL_KER
,
2133 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL
, FR_BZ_BUF_FULL_TBL
,
2135 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0
),
2136 REGISTER_TABLE_BB_CZ(TIMER_TBL
),
2137 REGISTER_TABLE_BB_CZ(TX_PACE_TBL
),
2138 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL
),
2139 /* TX_FILTER_TBL0 is huge and not used by this driver */
2140 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0
),
2141 REGISTER_TABLE_CZ(MC_TREG_SMEM
),
2142 /* MSIX_PBA_TABLE is not mapped */
2143 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
2144 REGISTER_TABLE_BZ(RX_FILTER_TBL0
),
2147 size_t efx_nic_get_regs_len(struct efx_nic
*efx
)
2149 const struct efx_nic_reg
*reg
;
2150 const struct efx_nic_reg_table
*table
;
2153 for (reg
= efx_nic_regs
;
2154 reg
< efx_nic_regs
+ ARRAY_SIZE(efx_nic_regs
);
2156 if (efx
->type
->revision
>= reg
->min_revision
&&
2157 efx
->type
->revision
<= reg
->max_revision
)
2158 len
+= sizeof(efx_oword_t
);
2160 for (table
= efx_nic_reg_tables
;
2161 table
< efx_nic_reg_tables
+ ARRAY_SIZE(efx_nic_reg_tables
);
2163 if (efx
->type
->revision
>= table
->min_revision
&&
2164 efx
->type
->revision
<= table
->max_revision
)
2165 len
+= table
->rows
* min_t(size_t, table
->step
, 16);
2170 void efx_nic_get_regs(struct efx_nic
*efx
, void *buf
)
2172 const struct efx_nic_reg
*reg
;
2173 const struct efx_nic_reg_table
*table
;
2175 for (reg
= efx_nic_regs
;
2176 reg
< efx_nic_regs
+ ARRAY_SIZE(efx_nic_regs
);
2178 if (efx
->type
->revision
>= reg
->min_revision
&&
2179 efx
->type
->revision
<= reg
->max_revision
) {
2180 efx_reado(efx
, (efx_oword_t
*)buf
, reg
->offset
);
2181 buf
+= sizeof(efx_oword_t
);
2185 for (table
= efx_nic_reg_tables
;
2186 table
< efx_nic_reg_tables
+ ARRAY_SIZE(efx_nic_reg_tables
);
2190 if (!(efx
->type
->revision
>= table
->min_revision
&&
2191 efx
->type
->revision
<= table
->max_revision
))
2194 size
= min_t(size_t, table
->step
, 16);
2196 for (i
= 0; i
< table
->rows
; i
++) {
2197 switch (table
->step
) {
2198 case 4: /* 32-bit SRAM */
2199 efx_readd(efx
, buf
, table
->offset
+ 4 * i
);
2201 case 8: /* 64-bit SRAM */
2203 efx
->membase
+ table
->offset
,
2206 case 16: /* 128-bit-readable register */
2207 efx_reado_table(efx
, buf
, table
->offset
, i
);
2209 case 32: /* 128-bit register, interleaved */
2210 efx_reado_table(efx
, buf
, table
->offset
, 2 * i
);