sfc: Work-around flush timeout when flushes have completed
[deliverable/linux.git] / drivers / net / ethernet / sfc / nic.c
CommitLineData
8e730c15
BH
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
0a6f40c6 4 * Copyright 2006-2011 Solarflare Communications Inc.
8e730c15
BH
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
a6b7a407 13#include <linux/interrupt.h>
8e730c15
BH
14#include <linux/pci.h>
15#include <linux/module.h>
16#include <linux/seq_file.h>
17#include "net_driver.h"
18#include "bitfield.h"
19#include "efx.h"
20#include "nic.h"
21#include "regs.h"
22#include "io.h"
23#include "workarounds.h"
24
25/**************************************************************************
26 *
27 * Configurable values
28 *
29 **************************************************************************
30 */
31
32/* This is set to 16 for a good reason. In summary, if larger than
33 * 16, the descriptor cache holds more than a default socket
34 * buffer's worth of packets (for UDP we can only have at most one
35 * socket buffer's worth outstanding). This combined with the fact
36 * that we only get 1 TX event per descriptor cache means the NIC
37 * goes idle.
38 */
39#define TX_DC_ENTRIES 16
40#define TX_DC_ENTRIES_ORDER 1
41
42#define RX_DC_ENTRIES 64
43#define RX_DC_ENTRIES_ORDER 3
44
8e730c15
BH
45/* If EFX_MAX_INT_ERRORS internal errors occur within
46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
47 * disable it.
48 */
49#define EFX_INT_ERROR_EXPIRE 3600
50#define EFX_MAX_INT_ERRORS 5
51
8e730c15
BH
52/* Depth of RX flush request fifo */
53#define EFX_RX_FLUSH_COUNT 4
54
4ef594eb
BH
55/* Driver generated events */
56#define _EFX_CHANNEL_MAGIC_TEST 0x000101
57#define _EFX_CHANNEL_MAGIC_FILL 0x000102
9f2cb71c
BH
58#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
59#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
d730dc52 60
4ef594eb
BH
61#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
62#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
63
64#define EFX_CHANNEL_MAGIC_TEST(_channel) \
65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
2ae75dac
BH
66#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
68 efx_rx_queue_index(_rx_queue))
9f2cb71c
BH
69#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
71 efx_rx_queue_index(_rx_queue))
72#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
74 (_tx_queue)->queue)
90d683af 75
525d9e82
DP
76static void efx_magic_event(struct efx_channel *channel, u32 magic);
77
8e730c15
BH
78/**************************************************************************
79 *
80 * Solarstorm hardware access
81 *
82 **************************************************************************/
83
84static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
85 unsigned int index)
86{
87 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
88 value, index);
89}
90
91/* Read the current event from the event queue */
92static inline efx_qword_t *efx_event(struct efx_channel *channel,
93 unsigned int index)
94{
d4fabcc8
BH
95 return ((efx_qword_t *) (channel->eventq.addr)) +
96 (index & channel->eventq_mask);
8e730c15
BH
97}
98
99/* See if an event is present
100 *
101 * We check both the high and low dword of the event for all ones. We
102 * wrote all ones when we cleared the event, and no valid event can
103 * have all ones in either its high or low dwords. This approach is
104 * robust against reordering.
105 *
106 * Note that using a single 64-bit comparison is incorrect; even
107 * though the CPU read will be atomic, the DMA write may not be.
108 */
109static inline int efx_event_present(efx_qword_t *event)
110{
807540ba
ED
111 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
112 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
8e730c15
BH
113}
114
115static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
116 const efx_oword_t *mask)
117{
118 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
119 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
120}
121
122int efx_nic_test_registers(struct efx_nic *efx,
123 const struct efx_nic_register_test *regs,
124 size_t n_regs)
125{
126 unsigned address = 0, i, j;
127 efx_oword_t mask, imask, original, reg, buf;
128
8e730c15
BH
129 for (i = 0; i < n_regs; ++i) {
130 address = regs[i].address;
131 mask = imask = regs[i].mask;
132 EFX_INVERT_OWORD(imask);
133
134 efx_reado(efx, &original, address);
135
136 /* bit sweep on and off */
137 for (j = 0; j < 128; j++) {
138 if (!EFX_EXTRACT_OWORD32(mask, j, j))
139 continue;
140
141 /* Test this testable bit can be set in isolation */
142 EFX_AND_OWORD(reg, original, mask);
143 EFX_SET_OWORD32(reg, j, j, 1);
144
145 efx_writeo(efx, &reg, address);
146 efx_reado(efx, &buf, address);
147
148 if (efx_masked_compare_oword(&reg, &buf, &mask))
149 goto fail;
150
151 /* Test this testable bit can be cleared in isolation */
152 EFX_OR_OWORD(reg, original, mask);
153 EFX_SET_OWORD32(reg, j, j, 0);
154
155 efx_writeo(efx, &reg, address);
156 efx_reado(efx, &buf, address);
157
158 if (efx_masked_compare_oword(&reg, &buf, &mask))
159 goto fail;
160 }
161
162 efx_writeo(efx, &original, address);
163 }
164
165 return 0;
166
167fail:
62776d03
BH
168 netif_err(efx, hw, efx->net_dev,
169 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
170 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
171 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
8e730c15
BH
172 return -EIO;
173}
174
175/**************************************************************************
176 *
177 * Special buffer handling
178 * Special buffers are used for event queues and the TX and RX
179 * descriptor rings.
180 *
181 *************************************************************************/
182
183/*
184 * Initialise a special buffer
185 *
186 * This will define a buffer (previously allocated via
187 * efx_alloc_special_buffer()) in the buffer table, allowing
188 * it to be used for event queues, descriptor rings etc.
189 */
190static void
191efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
192{
193 efx_qword_t buf_desc;
5bbe2f4f 194 unsigned int index;
8e730c15
BH
195 dma_addr_t dma_addr;
196 int i;
197
198 EFX_BUG_ON_PARANOID(!buffer->addr);
199
200 /* Write buffer descriptors to NIC */
201 for (i = 0; i < buffer->entries; i++) {
202 index = buffer->index + i;
5b6262d0 203 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
62776d03
BH
204 netif_dbg(efx, probe, efx->net_dev,
205 "mapping special buffer %d at %llx\n",
206 index, (unsigned long long)dma_addr);
8e730c15
BH
207 EFX_POPULATE_QWORD_3(buf_desc,
208 FRF_AZ_BUF_ADR_REGION, 0,
209 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
210 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
211 efx_write_buf_tbl(efx, &buf_desc, index);
212 }
213}
214
215/* Unmaps a buffer and clears the buffer table entries */
216static void
217efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
218{
219 efx_oword_t buf_tbl_upd;
220 unsigned int start = buffer->index;
221 unsigned int end = (buffer->index + buffer->entries - 1);
222
223 if (!buffer->entries)
224 return;
225
62776d03
BH
226 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
227 buffer->index, buffer->index + buffer->entries - 1);
8e730c15
BH
228
229 EFX_POPULATE_OWORD_4(buf_tbl_upd,
230 FRF_AZ_BUF_UPD_CMD, 0,
231 FRF_AZ_BUF_CLR_CMD, 1,
232 FRF_AZ_BUF_CLR_END_ID, end,
233 FRF_AZ_BUF_CLR_START_ID, start);
234 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
235}
236
237/*
238 * Allocate a new special buffer
239 *
240 * This allocates memory for a new buffer, clears it and allocates a
241 * new buffer ID range. It does not write into the buffer table.
242 *
243 * This call will allocate 4KB buffers, since 8KB buffers can't be
244 * used for event queues and descriptor rings.
245 */
246static int efx_alloc_special_buffer(struct efx_nic *efx,
247 struct efx_special_buffer *buffer,
248 unsigned int len)
249{
250 len = ALIGN(len, EFX_BUF_SIZE);
251
58758aa5
BH
252 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
253 &buffer->dma_addr, GFP_KERNEL);
8e730c15
BH
254 if (!buffer->addr)
255 return -ENOMEM;
256 buffer->len = len;
257 buffer->entries = len / EFX_BUF_SIZE;
258 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
259
8e730c15
BH
260 /* Select new buffer ID */
261 buffer->index = efx->next_buffer_table;
262 efx->next_buffer_table += buffer->entries;
cd2d5b52
BH
263#ifdef CONFIG_SFC_SRIOV
264 BUG_ON(efx_sriov_enabled(efx) &&
265 efx->vf_buftbl_base < efx->next_buffer_table);
266#endif
8e730c15 267
62776d03
BH
268 netif_dbg(efx, probe, efx->net_dev,
269 "allocating special buffers %d-%d at %llx+%x "
270 "(virt %p phys %llx)\n", buffer->index,
271 buffer->index + buffer->entries - 1,
272 (u64)buffer->dma_addr, len,
273 buffer->addr, (u64)virt_to_phys(buffer->addr));
8e730c15
BH
274
275 return 0;
276}
277
278static void
279efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
280{
281 if (!buffer->addr)
282 return;
283
62776d03
BH
284 netif_dbg(efx, hw, efx->net_dev,
285 "deallocating special buffers %d-%d at %llx+%x "
286 "(virt %p phys %llx)\n", buffer->index,
287 buffer->index + buffer->entries - 1,
288 (u64)buffer->dma_addr, buffer->len,
289 buffer->addr, (u64)virt_to_phys(buffer->addr));
8e730c15 290
58758aa5
BH
291 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
292 buffer->dma_addr);
8e730c15
BH
293 buffer->addr = NULL;
294 buffer->entries = 0;
295}
296
297/**************************************************************************
298 *
299 * Generic buffer handling
f7251a9c 300 * These buffers are used for interrupt status, MAC stats, etc.
8e730c15
BH
301 *
302 **************************************************************************/
303
304int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
305 unsigned int len)
306{
0e33d870
BH
307 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
308 &buffer->dma_addr, GFP_ATOMIC);
8e730c15
BH
309 if (!buffer->addr)
310 return -ENOMEM;
311 buffer->len = len;
312 memset(buffer->addr, 0, len);
313 return 0;
314}
315
316void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
317{
318 if (buffer->addr) {
0e33d870
BH
319 dma_free_coherent(&efx->pci_dev->dev, buffer->len,
320 buffer->addr, buffer->dma_addr);
8e730c15
BH
321 buffer->addr = NULL;
322 }
323}
324
325/**************************************************************************
326 *
327 * TX path
328 *
329 **************************************************************************/
330
331/* Returns a pointer to the specified transmit descriptor in the TX
332 * descriptor queue belonging to the specified channel.
333 */
334static inline efx_qword_t *
335efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
336{
807540ba 337 return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
8e730c15
BH
338}
339
340/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
341static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
342{
343 unsigned write_ptr;
344 efx_dword_t reg;
345
ecc910f5 346 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
8e730c15
BH
347 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
348 efx_writed_page(tx_queue->efx, &reg,
349 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
350}
351
cd38557d
BH
352/* Write pointer and first descriptor for TX descriptor ring */
353static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
354 const efx_qword_t *txd)
355{
356 unsigned write_ptr;
357 efx_oword_t reg;
358
359 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
360 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
361
362 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
363 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
364 FRF_AZ_TX_DESC_WPTR, write_ptr);
365 reg.qword[0] = *txd;
366 efx_writeo_page(tx_queue->efx, &reg,
367 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
368}
369
370static inline bool
371efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
372{
373 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
374
375 if (empty_read_count == 0)
376 return false;
377
378 tx_queue->empty_read_count = 0;
379 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
380}
8e730c15
BH
381
382/* For each entry inserted into the software descriptor ring, create a
383 * descriptor in the hardware TX descriptor ring (in host memory), and
384 * write a doorbell.
385 */
386void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
387{
388
389 struct efx_tx_buffer *buffer;
390 efx_qword_t *txd;
391 unsigned write_ptr;
cd38557d 392 unsigned old_write_count = tx_queue->write_count;
8e730c15
BH
393
394 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
395
396 do {
ecc910f5 397 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
8e730c15
BH
398 buffer = &tx_queue->buffer[write_ptr];
399 txd = efx_tx_desc(tx_queue, write_ptr);
400 ++tx_queue->write_count;
401
402 /* Create TX descriptor ring entry */
7668ff9c 403 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
8e730c15 404 EFX_POPULATE_QWORD_4(*txd,
7668ff9c
BH
405 FSF_AZ_TX_KER_CONT,
406 buffer->flags & EFX_TX_BUF_CONT,
8e730c15
BH
407 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
408 FSF_AZ_TX_KER_BUF_REGION, 0,
409 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
410 } while (tx_queue->write_count != tx_queue->insert_count);
411
412 wmb(); /* Ensure descriptors are written before they are fetched */
cd38557d
BH
413
414 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
415 txd = efx_tx_desc(tx_queue,
416 old_write_count & tx_queue->ptr_mask);
417 efx_push_tx_desc(tx_queue, txd);
418 ++tx_queue->pushes;
419 } else {
420 efx_notify_tx_desc(tx_queue);
421 }
8e730c15
BH
422}
423
424/* Allocate hardware resources for a TX queue */
425int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
426{
427 struct efx_nic *efx = tx_queue->efx;
ecc910f5
SH
428 unsigned entries;
429
430 entries = tx_queue->ptr_mask + 1;
8e730c15 431 return efx_alloc_special_buffer(efx, &tx_queue->txd,
ecc910f5 432 entries * sizeof(efx_qword_t));
8e730c15
BH
433}
434
435void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
436{
8e730c15 437 struct efx_nic *efx = tx_queue->efx;
94b274bf 438 efx_oword_t reg;
8e730c15 439
8e730c15
BH
440 /* Pin TX descriptor ring */
441 efx_init_special_buffer(efx, &tx_queue->txd);
442
443 /* Push TX descriptor ring to card */
94b274bf 444 EFX_POPULATE_OWORD_10(reg,
8e730c15
BH
445 FRF_AZ_TX_DESCQ_EN, 1,
446 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
447 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
448 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
449 FRF_AZ_TX_DESCQ_EVQ_ID,
450 tx_queue->channel->channel,
451 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
452 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
453 FRF_AZ_TX_DESCQ_SIZE,
454 __ffs(tx_queue->txd.entries),
455 FRF_AZ_TX_DESCQ_TYPE, 0,
456 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
457
458 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
a4900ac9 459 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
94b274bf
BH
460 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
461 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
8e730c15
BH
462 !csum);
463 }
464
94b274bf 465 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
8e730c15
BH
466 tx_queue->queue);
467
468 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
8e730c15 469 /* Only 128 bits in this register */
a4900ac9 470 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
8e730c15
BH
471
472 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
a4900ac9 473 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
32766ec8 474 __clear_bit_le(tx_queue->queue, &reg);
8e730c15 475 else
32766ec8 476 __set_bit_le(tx_queue->queue, &reg);
8e730c15
BH
477 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
478 }
94b274bf
BH
479
480 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
481 EFX_POPULATE_OWORD_1(reg,
482 FRF_BZ_TX_PACE,
483 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
484 FFE_BZ_TX_PACE_OFF :
485 FFE_BZ_TX_PACE_RESERVED);
486 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
487 tx_queue->queue);
488 }
8e730c15
BH
489}
490
491static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
492{
493 struct efx_nic *efx = tx_queue->efx;
494 efx_oword_t tx_flush_descq;
495
525d9e82
DP
496 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
497 atomic_set(&tx_queue->flush_outstanding, 1);
498
8e730c15
BH
499 EFX_POPULATE_OWORD_2(tx_flush_descq,
500 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
501 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
502 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
503}
504
505void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
506{
507 struct efx_nic *efx = tx_queue->efx;
508 efx_oword_t tx_desc_ptr;
509
8e730c15
BH
510 /* Remove TX descriptor ring from card */
511 EFX_ZERO_OWORD(tx_desc_ptr);
512 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
513 tx_queue->queue);
514
515 /* Unpin TX descriptor ring */
516 efx_fini_special_buffer(efx, &tx_queue->txd);
517}
518
519/* Free buffers backing TX queue */
520void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
521{
522 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
523}
524
525/**************************************************************************
526 *
527 * RX path
528 *
529 **************************************************************************/
530
531/* Returns a pointer to the specified descriptor in the RX descriptor queue */
532static inline efx_qword_t *
533efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
534{
807540ba 535 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
8e730c15
BH
536}
537
538/* This creates an entry in the RX descriptor queue */
539static inline void
540efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
541{
542 struct efx_rx_buffer *rx_buf;
543 efx_qword_t *rxd;
544
545 rxd = efx_rx_desc(rx_queue, index);
546 rx_buf = efx_rx_buffer(rx_queue, index);
547 EFX_POPULATE_QWORD_3(*rxd,
548 FSF_AZ_RX_KER_BUF_SIZE,
549 rx_buf->len -
550 rx_queue->efx->type->rx_buffer_padding,
551 FSF_AZ_RX_KER_BUF_REGION, 0,
552 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
553}
554
555/* This writes to the RX_DESC_WPTR register for the specified receive
556 * descriptor ring.
557 */
558void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
559{
ecc910f5 560 struct efx_nic *efx = rx_queue->efx;
8e730c15
BH
561 efx_dword_t reg;
562 unsigned write_ptr;
563
564 while (rx_queue->notified_count != rx_queue->added_count) {
ecc910f5
SH
565 efx_build_rx_desc(
566 rx_queue,
567 rx_queue->notified_count & rx_queue->ptr_mask);
8e730c15
BH
568 ++rx_queue->notified_count;
569 }
570
571 wmb();
ecc910f5 572 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
8e730c15 573 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
ecc910f5 574 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
ba1e8a35 575 efx_rx_queue_index(rx_queue));
8e730c15
BH
576}
577
578int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
579{
580 struct efx_nic *efx = rx_queue->efx;
ecc910f5
SH
581 unsigned entries;
582
583 entries = rx_queue->ptr_mask + 1;
8e730c15 584 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
ecc910f5 585 entries * sizeof(efx_qword_t));
8e730c15
BH
586}
587
588void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
589{
590 efx_oword_t rx_desc_ptr;
591 struct efx_nic *efx = rx_queue->efx;
592 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
593 bool iscsi_digest_en = is_b0;
594
62776d03
BH
595 netif_dbg(efx, hw, efx->net_dev,
596 "RX queue %d ring in special buffers %d-%d\n",
ba1e8a35 597 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
62776d03 598 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
8e730c15 599
8e730c15
BH
600 /* Pin RX descriptor ring */
601 efx_init_special_buffer(efx, &rx_queue->rxd);
602
603 /* Push RX descriptor ring to card */
604 EFX_POPULATE_OWORD_10(rx_desc_ptr,
605 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
606 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
607 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
608 FRF_AZ_RX_DESCQ_EVQ_ID,
ba1e8a35 609 efx_rx_queue_channel(rx_queue)->channel,
8e730c15 610 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
ba1e8a35
BH
611 FRF_AZ_RX_DESCQ_LABEL,
612 efx_rx_queue_index(rx_queue),
8e730c15
BH
613 FRF_AZ_RX_DESCQ_SIZE,
614 __ffs(rx_queue->rxd.entries),
615 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
616 /* For >=B0 this is scatter so disable */
617 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
618 FRF_AZ_RX_DESCQ_EN, 1);
619 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
ba1e8a35 620 efx_rx_queue_index(rx_queue));
8e730c15
BH
621}
622
623static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
624{
625 struct efx_nic *efx = rx_queue->efx;
626 efx_oword_t rx_flush_descq;
627
8e730c15
BH
628 EFX_POPULATE_OWORD_2(rx_flush_descq,
629 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
ba1e8a35
BH
630 FRF_AZ_RX_FLUSH_DESCQ,
631 efx_rx_queue_index(rx_queue));
8e730c15
BH
632 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
633}
634
635void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
636{
637 efx_oword_t rx_desc_ptr;
638 struct efx_nic *efx = rx_queue->efx;
639
8e730c15
BH
640 /* Remove RX descriptor ring from card */
641 EFX_ZERO_OWORD(rx_desc_ptr);
642 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
ba1e8a35 643 efx_rx_queue_index(rx_queue));
8e730c15
BH
644
645 /* Unpin RX descriptor ring */
646 efx_fini_special_buffer(efx, &rx_queue->rxd);
647}
648
649/* Free buffers backing RX queue */
650void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
651{
652 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
653}
654
9f2cb71c
BH
655/**************************************************************************
656 *
657 * Flush handling
658 *
659 **************************************************************************/
660
661/* efx_nic_flush_queues() must be woken up when all flushes are completed,
662 * or more RX flushes can be kicked off.
663 */
664static bool efx_flush_wake(struct efx_nic *efx)
665{
666 /* Ensure that all updates are visible to efx_nic_flush_queues() */
667 smp_mb();
668
669 return (atomic_read(&efx->drain_pending) == 0 ||
670 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
671 && atomic_read(&efx->rxq_flush_pending) > 0));
672}
673
525d9e82
DP
674static bool efx_check_tx_flush_complete(struct efx_nic *efx)
675{
676 bool i = true;
677 efx_oword_t txd_ptr_tbl;
678 struct efx_channel *channel;
679 struct efx_tx_queue *tx_queue;
680
681 efx_for_each_channel(channel, efx) {
682 efx_for_each_channel_tx_queue(tx_queue, channel) {
683 efx_reado_table(efx, &txd_ptr_tbl,
684 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
685 if (EFX_OWORD_FIELD(txd_ptr_tbl,
686 FRF_AZ_TX_DESCQ_FLUSH) ||
687 EFX_OWORD_FIELD(txd_ptr_tbl,
688 FRF_AZ_TX_DESCQ_EN)) {
689 netif_dbg(efx, hw, efx->net_dev,
690 "flush did not complete on TXQ %d\n",
691 tx_queue->queue);
692 i = false;
693 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
694 1, 0)) {
695 /* The flush is complete, but we didn't
696 * receive a flush completion event
697 */
698 netif_dbg(efx, hw, efx->net_dev,
699 "flush complete on TXQ %d, so drain "
700 "the queue\n", tx_queue->queue);
701 /* Don't need to increment drain_pending as it
702 * has already been incremented for the queues
703 * which did not drain
704 */
705 efx_magic_event(channel,
706 EFX_CHANNEL_MAGIC_TX_DRAIN(
707 tx_queue));
708 }
709 }
710 }
711
712 return i;
713}
714
9f2cb71c
BH
715/* Flush all the transmit queues, and continue flushing receive queues until
716 * they're all flushed. Wait for the DRAIN events to be recieved so that there
717 * are no more RX and TX events left on any channel. */
718int efx_nic_flush_queues(struct efx_nic *efx)
719{
720 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
721 struct efx_channel *channel;
722 struct efx_rx_queue *rx_queue;
723 struct efx_tx_queue *tx_queue;
724 int rc = 0;
725
726 efx->type->prepare_flush(efx);
727
728 efx_for_each_channel(channel, efx) {
729 efx_for_each_channel_tx_queue(tx_queue, channel) {
730 atomic_inc(&efx->drain_pending);
731 efx_flush_tx_queue(tx_queue);
732 }
733 efx_for_each_channel_rx_queue(rx_queue, channel) {
734 atomic_inc(&efx->drain_pending);
735 rx_queue->flush_pending = true;
736 atomic_inc(&efx->rxq_flush_pending);
737 }
738 }
739
740 while (timeout && atomic_read(&efx->drain_pending) > 0) {
cd2d5b52
BH
741 /* If SRIOV is enabled, then offload receive queue flushing to
742 * the firmware (though we will still have to poll for
743 * completion). If that fails, fall back to the old scheme.
744 */
745 if (efx_sriov_enabled(efx)) {
746 rc = efx_mcdi_flush_rxqs(efx);
747 if (!rc)
748 goto wait;
749 }
750
9f2cb71c
BH
751 /* The hardware supports four concurrent rx flushes, each of
752 * which may need to be retried if there is an outstanding
753 * descriptor fetch
754 */
755 efx_for_each_channel(channel, efx) {
756 efx_for_each_channel_rx_queue(rx_queue, channel) {
757 if (atomic_read(&efx->rxq_flush_outstanding) >=
758 EFX_RX_FLUSH_COUNT)
759 break;
760
761 if (rx_queue->flush_pending) {
762 rx_queue->flush_pending = false;
763 atomic_dec(&efx->rxq_flush_pending);
764 atomic_inc(&efx->rxq_flush_outstanding);
765 efx_flush_rx_queue(rx_queue);
766 }
767 }
768 }
769
cd2d5b52 770 wait:
9f2cb71c
BH
771 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
772 timeout);
773 }
774
525d9e82
DP
775 if (atomic_read(&efx->drain_pending) &&
776 !efx_check_tx_flush_complete(efx)) {
9f2cb71c
BH
777 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
778 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
779 atomic_read(&efx->rxq_flush_outstanding),
780 atomic_read(&efx->rxq_flush_pending));
781 rc = -ETIMEDOUT;
782
783 atomic_set(&efx->drain_pending, 0);
784 atomic_set(&efx->rxq_flush_pending, 0);
785 atomic_set(&efx->rxq_flush_outstanding, 0);
786 }
787
d5e8cc6c 788 efx->type->finish_flush(efx);
a606f432 789
9f2cb71c
BH
790 return rc;
791}
792
8e730c15
BH
793/**************************************************************************
794 *
795 * Event queue processing
796 * Event queues are processed by per-channel tasklets.
797 *
798 **************************************************************************/
799
800/* Update a channel's event queue's read pointer (RPTR) register
801 *
802 * This writes the EVQ_RPTR_REG register for the specified channel's
803 * event queue.
8e730c15
BH
804 */
805void efx_nic_eventq_read_ack(struct efx_channel *channel)
806{
807 efx_dword_t reg;
808 struct efx_nic *efx = channel->efx;
809
d4fabcc8
BH
810 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
811 channel->eventq_read_ptr & channel->eventq_mask);
778cdaf6
BH
812
813 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
814 * of 4 bytes, but it is really 16 bytes just like later revisions.
815 */
816 efx_writed(efx, &reg,
817 efx->type->evq_rptr_tbl_base +
818 FR_BZ_EVQ_RPTR_STEP * channel->channel);
8e730c15
BH
819}
820
821/* Use HW to insert a SW defined event */
90893000
BH
822void efx_generate_event(struct efx_nic *efx, unsigned int evq,
823 efx_qword_t *event)
8e730c15
BH
824{
825 efx_oword_t drv_ev_reg;
826
827 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
828 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
829 drv_ev_reg.u32[0] = event->u32[0];
830 drv_ev_reg.u32[1] = event->u32[1];
831 drv_ev_reg.u32[2] = 0;
832 drv_ev_reg.u32[3] = 0;
90893000
BH
833 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
834 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
8e730c15
BH
835}
836
4ef594eb
BH
837static void efx_magic_event(struct efx_channel *channel, u32 magic)
838{
839 efx_qword_t event;
840
841 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
842 FSE_AZ_EV_CODE_DRV_GEN_EV,
843 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
90893000 844 efx_generate_event(channel->efx, channel->channel, &event);
4ef594eb
BH
845}
846
8e730c15
BH
847/* Handle a transmit completion event
848 *
849 * The NIC batches TX completion events; the message we receive is of
850 * the form "complete all TX events up to this index".
851 */
fa236e18 852static int
8e730c15
BH
853efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
854{
855 unsigned int tx_ev_desc_ptr;
856 unsigned int tx_ev_q_label;
857 struct efx_tx_queue *tx_queue;
858 struct efx_nic *efx = channel->efx;
fa236e18 859 int tx_packets = 0;
8e730c15 860
9f2cb71c
BH
861 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
862 return 0;
863
8e730c15
BH
864 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
865 /* Transmit completion */
866 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
867 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
f7d12cdc
BH
868 tx_queue = efx_channel_get_tx_queue(
869 channel, tx_ev_q_label % EFX_TXQ_TYPES);
fa236e18 870 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
ecc910f5 871 tx_queue->ptr_mask);
8e730c15
BH
872 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
873 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
874 /* Rewrite the FIFO write pointer */
875 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
f7d12cdc
BH
876 tx_queue = efx_channel_get_tx_queue(
877 channel, tx_ev_q_label % EFX_TXQ_TYPES);
8e730c15 878
73ba7b68 879 netif_tx_lock(efx->net_dev);
8e730c15 880 efx_notify_tx_desc(tx_queue);
73ba7b68 881 netif_tx_unlock(efx->net_dev);
8e730c15
BH
882 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
883 EFX_WORKAROUND_10727(efx)) {
884 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
885 } else {
62776d03
BH
886 netif_err(efx, tx_err, efx->net_dev,
887 "channel %d unexpected TX event "
888 EFX_QWORD_FMT"\n", channel->channel,
889 EFX_QWORD_VAL(*event));
8e730c15 890 }
fa236e18
BH
891
892 return tx_packets;
8e730c15
BH
893}
894
895/* Detect errors included in the rx_evt_pkt_ok bit. */
db339569
BH
896static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
897 const efx_qword_t *event)
8e730c15 898{
ba1e8a35 899 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
8e730c15
BH
900 struct efx_nic *efx = rx_queue->efx;
901 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
902 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
903 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
904 bool rx_ev_other_err, rx_ev_pause_frm;
905 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
906 unsigned rx_ev_pkt_type;
907
908 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
909 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
910 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
911 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
912 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
913 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
914 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
915 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
916 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
917 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
918 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
919 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
920 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
921 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
922 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
923
924 /* Every error apart from tobe_disc and pause_frm */
925 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
926 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
927 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
928
929 /* Count errors that are not in MAC stats. Ignore expected
930 * checksum errors during self-test. */
931 if (rx_ev_frm_trunc)
ba1e8a35 932 ++channel->n_rx_frm_trunc;
8e730c15 933 else if (rx_ev_tobe_disc)
ba1e8a35 934 ++channel->n_rx_tobe_disc;
8e730c15
BH
935 else if (!efx->loopback_selftest) {
936 if (rx_ev_ip_hdr_chksum_err)
ba1e8a35 937 ++channel->n_rx_ip_hdr_chksum_err;
8e730c15 938 else if (rx_ev_tcp_udp_chksum_err)
ba1e8a35 939 ++channel->n_rx_tcp_udp_chksum_err;
8e730c15
BH
940 }
941
8e730c15
BH
942 /* TOBE_DISC is expected on unicast mismatches; don't print out an
943 * error message. FRM_TRUNC indicates RXDP dropped the packet due
944 * to a FIFO overflow.
945 */
5f3f9d6c 946#ifdef DEBUG
62776d03
BH
947 if (rx_ev_other_err && net_ratelimit()) {
948 netif_dbg(efx, rx_err, efx->net_dev,
949 " RX queue %d unexpected RX event "
950 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
ba1e8a35 951 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
62776d03
BH
952 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
953 rx_ev_ip_hdr_chksum_err ?
954 " [IP_HDR_CHKSUM_ERR]" : "",
955 rx_ev_tcp_udp_chksum_err ?
956 " [TCP_UDP_CHKSUM_ERR]" : "",
957 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
958 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
959 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
960 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
961 rx_ev_pause_frm ? " [PAUSE]" : "");
8e730c15
BH
962 }
963#endif
db339569
BH
964
965 /* The frame must be discarded if any of these are true. */
966 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
967 rx_ev_tobe_disc | rx_ev_pause_frm) ?
968 EFX_RX_PKT_DISCARD : 0;
8e730c15
BH
969}
970
971/* Handle receive events that are not in-order. */
972static void
973efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
974{
975 struct efx_nic *efx = rx_queue->efx;
976 unsigned expected, dropped;
977
ecc910f5
SH
978 expected = rx_queue->removed_count & rx_queue->ptr_mask;
979 dropped = (index - expected) & rx_queue->ptr_mask;
62776d03
BH
980 netif_info(efx, rx_err, efx->net_dev,
981 "dropped %d events (index=%d expected=%d)\n",
982 dropped, index, expected);
8e730c15
BH
983
984 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
985 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
986}
987
988/* Handle a packet received event
989 *
990 * The NIC gives a "discard" flag if it's a unicast packet with the
991 * wrong destination address
992 * Also "is multicast" and "matches multicast filter" flags can be used to
993 * discard non-matching multicast packets.
994 */
995static void
996efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
997{
998 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
999 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
1000 unsigned expected_ptr;
db339569
BH
1001 bool rx_ev_pkt_ok;
1002 u16 flags;
8e730c15 1003 struct efx_rx_queue *rx_queue;
9f2cb71c
BH
1004 struct efx_nic *efx = channel->efx;
1005
1006 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1007 return;
8e730c15
BH
1008
1009 /* Basic packet information */
1010 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1011 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1012 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1013 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
1014 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
1015 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
1016 channel->channel);
1017
f7d12cdc 1018 rx_queue = efx_channel_get_rx_queue(channel);
8e730c15
BH
1019
1020 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
ecc910f5 1021 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
8e730c15
BH
1022 if (unlikely(rx_ev_desc_ptr != expected_ptr))
1023 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
1024
1025 if (likely(rx_ev_pkt_ok)) {
1026 /* If packet is marked as OK and packet type is TCP/IP or
1027 * UDP/IP, then we can rely on the hardware checksum.
1028 */
db339569
BH
1029 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
1030 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
1031 EFX_RX_PKT_CSUMMED : 0;
8e730c15 1032 } else {
db339569 1033 flags = efx_handle_rx_not_ok(rx_queue, event);
8e730c15
BH
1034 }
1035
1036 /* Detect multicast packets that didn't match the filter */
1037 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1038 if (rx_ev_mcast_pkt) {
1039 unsigned int rx_ev_mcast_hash_match =
1040 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1041
1042 if (unlikely(!rx_ev_mcast_hash_match)) {
1043 ++channel->n_rx_mcast_mismatch;
db339569 1044 flags |= EFX_RX_PKT_DISCARD;
8e730c15
BH
1045 }
1046 }
1047
1048 channel->irq_mod_score += 2;
1049
1050 /* Handle received packet */
db339569 1051 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
8e730c15
BH
1052}
1053
9f2cb71c
BH
1054/* If this flush done event corresponds to a &struct efx_tx_queue, then
1055 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1056 * of all transmit completions.
1057 */
1058static void
1059efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1060{
1061 struct efx_tx_queue *tx_queue;
1062 int qid;
1063
1064 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1065 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1066 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1067 qid % EFX_TXQ_TYPES);
525d9e82
DP
1068 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1069 efx_magic_event(tx_queue->channel,
1070 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1071 }
9f2cb71c
BH
1072 }
1073}
1074
1075/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1076 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1077 * the RX queue back to the mask of RX queues in need of flushing.
1078 */
1079static void
1080efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1081{
1082 struct efx_channel *channel;
1083 struct efx_rx_queue *rx_queue;
1084 int qid;
1085 bool failed;
1086
1087 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1088 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1089 if (qid >= efx->n_channels)
1090 return;
1091 channel = efx_get_channel(efx, qid);
1092 if (!efx_channel_has_rx_queue(channel))
1093 return;
1094 rx_queue = efx_channel_get_rx_queue(channel);
1095
1096 if (failed) {
1097 netif_info(efx, hw, efx->net_dev,
1098 "RXQ %d flush retry\n", qid);
1099 rx_queue->flush_pending = true;
1100 atomic_inc(&efx->rxq_flush_pending);
1101 } else {
1102 efx_magic_event(efx_rx_queue_channel(rx_queue),
1103 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1104 }
1105 atomic_dec(&efx->rxq_flush_outstanding);
1106 if (efx_flush_wake(efx))
1107 wake_up(&efx->flush_wq);
1108}
1109
1110static void
1111efx_handle_drain_event(struct efx_channel *channel)
1112{
1113 struct efx_nic *efx = channel->efx;
1114
1115 WARN_ON(atomic_read(&efx->drain_pending) == 0);
1116 atomic_dec(&efx->drain_pending);
1117 if (efx_flush_wake(efx))
1118 wake_up(&efx->flush_wq);
1119}
1120
90d683af
SH
1121static void
1122efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
1123{
1124 struct efx_nic *efx = channel->efx;
2ae75dac
BH
1125 struct efx_rx_queue *rx_queue =
1126 efx_channel_has_rx_queue(channel) ?
1127 efx_channel_get_rx_queue(channel) : NULL;
9f2cb71c 1128 unsigned magic, code;
90d683af 1129
4ef594eb 1130 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
9f2cb71c 1131 code = _EFX_CHANNEL_MAGIC_CODE(magic);
4ef594eb 1132
9f2cb71c 1133 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
dd40781e 1134 channel->event_test_cpu = raw_smp_processor_id();
9f2cb71c 1135 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
90d683af
SH
1136 /* The queue must be empty, so we won't receive any rx
1137 * events, so efx_process_channel() won't refill the
1138 * queue. Refill it here */
2ae75dac 1139 efx_fast_push_rx_descriptors(rx_queue);
9f2cb71c
BH
1140 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1141 rx_queue->enabled = false;
1142 efx_handle_drain_event(channel);
1143 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1144 efx_handle_drain_event(channel);
1145 } else {
62776d03
BH
1146 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1147 "generated event "EFX_QWORD_FMT"\n",
1148 channel->channel, EFX_QWORD_VAL(*event));
9f2cb71c 1149 }
90d683af
SH
1150}
1151
8e730c15
BH
1152static void
1153efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1154{
1155 struct efx_nic *efx = channel->efx;
1156 unsigned int ev_sub_code;
1157 unsigned int ev_sub_data;
1158
1159 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1160 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1161
1162 switch (ev_sub_code) {
1163 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
62776d03
BH
1164 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1165 channel->channel, ev_sub_data);
9f2cb71c 1166 efx_handle_tx_flush_done(efx, event);
cd2d5b52 1167 efx_sriov_tx_flush_done(efx, event);
8e730c15
BH
1168 break;
1169 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
62776d03
BH
1170 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1171 channel->channel, ev_sub_data);
9f2cb71c 1172 efx_handle_rx_flush_done(efx, event);
cd2d5b52 1173 efx_sriov_rx_flush_done(efx, event);
8e730c15
BH
1174 break;
1175 case FSE_AZ_EVQ_INIT_DONE_EV:
62776d03
BH
1176 netif_dbg(efx, hw, efx->net_dev,
1177 "channel %d EVQ %d initialised\n",
1178 channel->channel, ev_sub_data);
8e730c15
BH
1179 break;
1180 case FSE_AZ_SRM_UPD_DONE_EV:
62776d03
BH
1181 netif_vdbg(efx, hw, efx->net_dev,
1182 "channel %d SRAM update done\n", channel->channel);
8e730c15
BH
1183 break;
1184 case FSE_AZ_WAKE_UP_EV:
62776d03
BH
1185 netif_vdbg(efx, hw, efx->net_dev,
1186 "channel %d RXQ %d wakeup event\n",
1187 channel->channel, ev_sub_data);
8e730c15
BH
1188 break;
1189 case FSE_AZ_TIMER_EV:
62776d03
BH
1190 netif_vdbg(efx, hw, efx->net_dev,
1191 "channel %d RX queue %d timer expired\n",
1192 channel->channel, ev_sub_data);
8e730c15
BH
1193 break;
1194 case FSE_AA_RX_RECOVER_EV:
62776d03
BH
1195 netif_err(efx, rx_err, efx->net_dev,
1196 "channel %d seen DRIVER RX_RESET event. "
8e730c15
BH
1197 "Resetting.\n", channel->channel);
1198 atomic_inc(&efx->rx_reset);
1199 efx_schedule_reset(efx,
1200 EFX_WORKAROUND_6555(efx) ?
1201 RESET_TYPE_RX_RECOVERY :
1202 RESET_TYPE_DISABLE);
1203 break;
1204 case FSE_BZ_RX_DSC_ERROR_EV:
cd2d5b52
BH
1205 if (ev_sub_data < EFX_VI_BASE) {
1206 netif_err(efx, rx_err, efx->net_dev,
1207 "RX DMA Q %d reports descriptor fetch error."
1208 " RX Q %d is disabled.\n", ev_sub_data,
1209 ev_sub_data);
1210 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1211 } else
1212 efx_sriov_desc_fetch_err(efx, ev_sub_data);
8e730c15
BH
1213 break;
1214 case FSE_BZ_TX_DSC_ERROR_EV:
cd2d5b52
BH
1215 if (ev_sub_data < EFX_VI_BASE) {
1216 netif_err(efx, tx_err, efx->net_dev,
1217 "TX DMA Q %d reports descriptor fetch error."
1218 " TX Q %d is disabled.\n", ev_sub_data,
1219 ev_sub_data);
1220 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1221 } else
1222 efx_sriov_desc_fetch_err(efx, ev_sub_data);
8e730c15
BH
1223 break;
1224 default:
62776d03
BH
1225 netif_vdbg(efx, hw, efx->net_dev,
1226 "channel %d unknown driver event code %d "
1227 "data %04x\n", channel->channel, ev_sub_code,
1228 ev_sub_data);
8e730c15
BH
1229 break;
1230 }
1231}
1232
fa236e18 1233int efx_nic_process_eventq(struct efx_channel *channel, int budget)
8e730c15 1234{
ecc910f5 1235 struct efx_nic *efx = channel->efx;
8e730c15
BH
1236 unsigned int read_ptr;
1237 efx_qword_t event, *p_event;
1238 int ev_code;
fa236e18
BH
1239 int tx_packets = 0;
1240 int spent = 0;
8e730c15
BH
1241
1242 read_ptr = channel->eventq_read_ptr;
1243
fa236e18 1244 for (;;) {
8e730c15
BH
1245 p_event = efx_event(channel, read_ptr);
1246 event = *p_event;
1247
1248 if (!efx_event_present(&event))
1249 /* End of events */
1250 break;
1251
62776d03
BH
1252 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1253 "channel %d event is "EFX_QWORD_FMT"\n",
1254 channel->channel, EFX_QWORD_VAL(event));
8e730c15
BH
1255
1256 /* Clear this event by marking it all ones */
1257 EFX_SET_QWORD(*p_event);
1258
d4fabcc8 1259 ++read_ptr;
fa236e18 1260
8e730c15
BH
1261 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1262
1263 switch (ev_code) {
1264 case FSE_AZ_EV_CODE_RX_EV:
1265 efx_handle_rx_event(channel, &event);
fa236e18
BH
1266 if (++spent == budget)
1267 goto out;
8e730c15
BH
1268 break;
1269 case FSE_AZ_EV_CODE_TX_EV:
fa236e18 1270 tx_packets += efx_handle_tx_event(channel, &event);
ecc910f5 1271 if (tx_packets > efx->txq_entries) {
fa236e18
BH
1272 spent = budget;
1273 goto out;
1274 }
8e730c15
BH
1275 break;
1276 case FSE_AZ_EV_CODE_DRV_GEN_EV:
90d683af 1277 efx_handle_generated_event(channel, &event);
8e730c15 1278 break;
8e730c15
BH
1279 case FSE_AZ_EV_CODE_DRIVER_EV:
1280 efx_handle_driver_event(channel, &event);
1281 break;
cd2d5b52
BH
1282 case FSE_CZ_EV_CODE_USER_EV:
1283 efx_sriov_event(channel, &event);
1284 break;
8880f4ec
BH
1285 case FSE_CZ_EV_CODE_MCDI_EV:
1286 efx_mcdi_process_event(channel, &event);
1287 break;
40641ed9
BH
1288 case FSE_AZ_EV_CODE_GLOBAL_EV:
1289 if (efx->type->handle_global_event &&
1290 efx->type->handle_global_event(channel, &event))
1291 break;
1292 /* else fall through */
8e730c15 1293 default:
62776d03
BH
1294 netif_err(channel->efx, hw, channel->efx->net_dev,
1295 "channel %d unknown event type %d (data "
1296 EFX_QWORD_FMT ")\n", channel->channel,
1297 ev_code, EFX_QWORD_VAL(event));
8e730c15 1298 }
fa236e18 1299 }
8e730c15 1300
fa236e18 1301out:
8e730c15 1302 channel->eventq_read_ptr = read_ptr;
fa236e18 1303 return spent;
8e730c15
BH
1304}
1305
d4fabcc8
BH
1306/* Check whether an event is present in the eventq at the current
1307 * read pointer. Only useful for self-test.
1308 */
1309bool efx_nic_event_present(struct efx_channel *channel)
1310{
1311 return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1312}
8e730c15
BH
1313
1314/* Allocate buffer table entries for event queue */
1315int efx_nic_probe_eventq(struct efx_channel *channel)
1316{
1317 struct efx_nic *efx = channel->efx;
ecc910f5
SH
1318 unsigned entries;
1319
1320 entries = channel->eventq_mask + 1;
8e730c15 1321 return efx_alloc_special_buffer(efx, &channel->eventq,
ecc910f5 1322 entries * sizeof(efx_qword_t));
8e730c15
BH
1323}
1324
1325void efx_nic_init_eventq(struct efx_channel *channel)
1326{
8880f4ec 1327 efx_oword_t reg;
8e730c15
BH
1328 struct efx_nic *efx = channel->efx;
1329
62776d03
BH
1330 netif_dbg(efx, hw, efx->net_dev,
1331 "channel %d event queue in special buffers %d-%d\n",
1332 channel->channel, channel->eventq.index,
1333 channel->eventq.index + channel->eventq.entries - 1);
8e730c15 1334
8880f4ec
BH
1335 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1336 EFX_POPULATE_OWORD_3(reg,
1337 FRF_CZ_TIMER_Q_EN, 1,
1338 FRF_CZ_HOST_NOTIFY_MODE, 0,
1339 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1340 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1341 }
1342
8e730c15
BH
1343 /* Pin event queue buffer */
1344 efx_init_special_buffer(efx, &channel->eventq);
1345
1346 /* Fill event queue with all ones (i.e. empty events) */
1347 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1348
1349 /* Push event queue to card */
8880f4ec 1350 EFX_POPULATE_OWORD_3(reg,
8e730c15
BH
1351 FRF_AZ_EVQ_EN, 1,
1352 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1353 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
8880f4ec 1354 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
8e730c15
BH
1355 channel->channel);
1356
1357 efx->type->push_irq_moderation(channel);
1358}
1359
1360void efx_nic_fini_eventq(struct efx_channel *channel)
1361{
8880f4ec 1362 efx_oword_t reg;
8e730c15
BH
1363 struct efx_nic *efx = channel->efx;
1364
1365 /* Remove event queue from card */
8880f4ec
BH
1366 EFX_ZERO_OWORD(reg);
1367 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
8e730c15 1368 channel->channel);
8880f4ec
BH
1369 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1370 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
8e730c15
BH
1371
1372 /* Unpin event queue */
1373 efx_fini_special_buffer(efx, &channel->eventq);
1374}
1375
1376/* Free buffers backing event queue */
1377void efx_nic_remove_eventq(struct efx_channel *channel)
1378{
1379 efx_free_special_buffer(channel->efx, &channel->eventq);
1380}
1381
1382
eee6f6a9 1383void efx_nic_event_test_start(struct efx_channel *channel)
8e730c15 1384{
dd40781e 1385 channel->event_test_cpu = -1;
eee6f6a9 1386 smp_wmb();
4ef594eb 1387 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
90d683af
SH
1388}
1389
2ae75dac 1390void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
90d683af 1391{
2ae75dac
BH
1392 efx_magic_event(efx_rx_queue_channel(rx_queue),
1393 EFX_CHANNEL_MAGIC_FILL(rx_queue));
8e730c15
BH
1394}
1395
8e730c15
BH
1396/**************************************************************************
1397 *
1398 * Hardware interrupts
1399 * The hardware interrupt handler does very little work; all the event
1400 * queue processing is carried out by per-channel tasklets.
1401 *
1402 **************************************************************************/
1403
1404/* Enable/disable/generate interrupts */
1405static inline void efx_nic_interrupts(struct efx_nic *efx,
1406 bool enabled, bool force)
1407{
1408 efx_oword_t int_en_reg_ker;
8880f4ec
BH
1409
1410 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1646a6f3 1411 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
8e730c15
BH
1412 FRF_AZ_KER_INT_KER, force,
1413 FRF_AZ_DRV_INT_EN_KER, enabled);
1414 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1415}
1416
1417void efx_nic_enable_interrupts(struct efx_nic *efx)
1418{
8e730c15
BH
1419 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1420 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1421
8e730c15 1422 efx_nic_interrupts(efx, true, false);
8e730c15
BH
1423}
1424
1425void efx_nic_disable_interrupts(struct efx_nic *efx)
1426{
1427 /* Disable interrupts */
1428 efx_nic_interrupts(efx, false, false);
1429}
1430
1431/* Generate a test interrupt
1432 * Interrupt must already have been enabled, otherwise nasty things
1433 * may happen.
1434 */
eee6f6a9 1435void efx_nic_irq_test_start(struct efx_nic *efx)
8e730c15 1436{
eee6f6a9
BH
1437 efx->last_irq_cpu = -1;
1438 smp_wmb();
8e730c15
BH
1439 efx_nic_interrupts(efx, true, true);
1440}
1441
1442/* Process a fatal interrupt
1443 * Disable bus mastering ASAP and schedule a reset
1444 */
1445irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1446{
1447 struct falcon_nic_data *nic_data = efx->nic_data;
1448 efx_oword_t *int_ker = efx->irq_status.addr;
1449 efx_oword_t fatal_intr;
1450 int error, mem_perr;
1451
1452 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1453 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1454
62776d03
BH
1455 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1456 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1457 EFX_OWORD_VAL(fatal_intr),
1458 error ? "disabling bus mastering" : "no recognised error");
8e730c15
BH
1459
1460 /* If this is a memory parity error dump which blocks are offending */
97e1eaa0
SH
1461 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1462 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
8e730c15
BH
1463 if (mem_perr) {
1464 efx_oword_t reg;
1465 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
62776d03
BH
1466 netif_err(efx, hw, efx->net_dev,
1467 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1468 EFX_OWORD_VAL(reg));
8e730c15
BH
1469 }
1470
1471 /* Disable both devices */
1472 pci_clear_master(efx->pci_dev);
1473 if (efx_nic_is_dual_func(efx))
1474 pci_clear_master(nic_data->pci_dev2);
1475 efx_nic_disable_interrupts(efx);
1476
1477 /* Count errors and reset or disable the NIC accordingly */
1478 if (efx->int_error_count == 0 ||
1479 time_after(jiffies, efx->int_error_expire)) {
1480 efx->int_error_count = 0;
1481 efx->int_error_expire =
1482 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1483 }
1484 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
62776d03
BH
1485 netif_err(efx, hw, efx->net_dev,
1486 "SYSTEM ERROR - reset scheduled\n");
8e730c15
BH
1487 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1488 } else {
62776d03
BH
1489 netif_err(efx, hw, efx->net_dev,
1490 "SYSTEM ERROR - max number of errors seen."
1491 "NIC will be disabled\n");
8e730c15
BH
1492 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1493 }
63695459 1494
8e730c15
BH
1495 return IRQ_HANDLED;
1496}
1497
1498/* Handle a legacy interrupt
1499 * Acknowledges the interrupt and schedule event queue processing.
1500 */
1501static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1502{
1503 struct efx_nic *efx = dev_id;
1504 efx_oword_t *int_ker = efx->irq_status.addr;
1505 irqreturn_t result = IRQ_NONE;
1506 struct efx_channel *channel;
1507 efx_dword_t reg;
1508 u32 queues;
1509 int syserr;
1510
94dec6a2
BH
1511 /* Could this be ours? If interrupts are disabled then the
1512 * channel state may not be valid.
1513 */
1514 if (!efx->legacy_irq_enabled)
1515 return result;
1516
8e730c15
BH
1517 /* Read the ISR which also ACKs the interrupts */
1518 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1519 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1520
1646a6f3
BH
1521 /* Handle non-event-queue sources */
1522 if (queues & (1U << efx->irq_level)) {
63695459
SH
1523 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1524 if (unlikely(syserr))
1525 return efx_nic_fatal_interrupt(efx);
1646a6f3 1526 efx->last_irq_cpu = raw_smp_processor_id();
63695459 1527 }
8e730c15 1528
8880f4ec
BH
1529 if (queues != 0) {
1530 if (EFX_WORKAROUND_15783(efx))
1531 efx->irq_zero_count = 0;
1532
1533 /* Schedule processing of any interrupting queues */
1534 efx_for_each_channel(channel, efx) {
1535 if (queues & 1)
1646a6f3 1536 efx_schedule_channel_irq(channel);
8880f4ec 1537 queues >>= 1;
8e730c15 1538 }
8880f4ec
BH
1539 result = IRQ_HANDLED;
1540
41b7e4c3 1541 } else if (EFX_WORKAROUND_15783(efx)) {
8880f4ec
BH
1542 efx_qword_t *event;
1543
41b7e4c3
SH
1544 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1545 * because this might be a shared interrupt. */
1546 if (efx->irq_zero_count++ == 0)
1547 result = IRQ_HANDLED;
1548
1549 /* Ensure we schedule or rearm all event queues */
8880f4ec
BH
1550 efx_for_each_channel(channel, efx) {
1551 event = efx_event(channel, channel->eventq_read_ptr);
1552 if (efx_event_present(event))
1646a6f3 1553 efx_schedule_channel_irq(channel);
41b7e4c3
SH
1554 else
1555 efx_nic_eventq_read_ack(channel);
8880f4ec 1556 }
8e730c15
BH
1557 }
1558
1646a6f3 1559 if (result == IRQ_HANDLED)
62776d03
BH
1560 netif_vdbg(efx, intr, efx->net_dev,
1561 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1562 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
8e730c15
BH
1563
1564 return result;
1565}
1566
1567/* Handle an MSI interrupt
1568 *
1569 * Handle an MSI hardware interrupt. This routine schedules event
1570 * queue processing. No interrupt acknowledgement cycle is necessary.
1571 * Also, we never need to check that the interrupt is for us, since
1572 * MSI interrupts cannot be shared.
1573 */
1574static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1575{
4642610c 1576 struct efx_channel *channel = *(struct efx_channel **)dev_id;
8e730c15
BH
1577 struct efx_nic *efx = channel->efx;
1578 efx_oword_t *int_ker = efx->irq_status.addr;
1579 int syserr;
1580
62776d03
BH
1581 netif_vdbg(efx, intr, efx->net_dev,
1582 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1583 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
8e730c15 1584
1646a6f3
BH
1585 /* Handle non-event-queue sources */
1586 if (channel->channel == efx->irq_level) {
63695459
SH
1587 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1588 if (unlikely(syserr))
1589 return efx_nic_fatal_interrupt(efx);
1646a6f3 1590 efx->last_irq_cpu = raw_smp_processor_id();
63695459 1591 }
8e730c15
BH
1592
1593 /* Schedule processing of the channel */
1646a6f3 1594 efx_schedule_channel_irq(channel);
8e730c15
BH
1595
1596 return IRQ_HANDLED;
1597}
1598
1599
1600/* Setup RSS indirection table.
1601 * This maps from the hash value of the packet to RXQ
1602 */
765c9f46 1603void efx_nic_push_rx_indir_table(struct efx_nic *efx)
8e730c15 1604{
765c9f46 1605 size_t i = 0;
8e730c15
BH
1606 efx_dword_t dword;
1607
1608 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1609 return;
1610
765c9f46
BH
1611 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1612 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1613
1614 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
8e730c15 1615 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
765c9f46 1616 efx->rx_indir_table[i]);
778cdaf6
BH
1617 efx_writed(efx, &dword,
1618 FR_BZ_RX_INDIRECTION_TBL +
1619 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
8e730c15
BH
1620 }
1621}
1622
1623/* Hook interrupt handler(s)
1624 * Try MSI and then legacy interrupts.
1625 */
1626int efx_nic_init_interrupt(struct efx_nic *efx)
1627{
1628 struct efx_channel *channel;
1629 int rc;
1630
1631 if (!EFX_INT_MODE_USE_MSI(efx)) {
1632 irq_handler_t handler;
1633 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1634 handler = efx_legacy_interrupt;
1635 else
1636 handler = falcon_legacy_interrupt_a1;
1637
1638 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1639 efx->name, efx);
1640 if (rc) {
62776d03
BH
1641 netif_err(efx, drv, efx->net_dev,
1642 "failed to hook legacy IRQ %d\n",
1643 efx->pci_dev->irq);
8e730c15
BH
1644 goto fail1;
1645 }
1646 return 0;
1647 }
1648
1649 /* Hook MSI or MSI-X interrupt */
1650 efx_for_each_channel(channel, efx) {
1651 rc = request_irq(channel->irq, efx_msi_interrupt,
1652 IRQF_PROBE_SHARED, /* Not shared */
4642610c
BH
1653 efx->channel_name[channel->channel],
1654 &efx->channel[channel->channel]);
8e730c15 1655 if (rc) {
62776d03
BH
1656 netif_err(efx, drv, efx->net_dev,
1657 "failed to hook IRQ %d\n", channel->irq);
8e730c15
BH
1658 goto fail2;
1659 }
1660 }
1661
1662 return 0;
1663
1664 fail2:
1665 efx_for_each_channel(channel, efx)
4642610c 1666 free_irq(channel->irq, &efx->channel[channel->channel]);
8e730c15
BH
1667 fail1:
1668 return rc;
1669}
1670
1671void efx_nic_fini_interrupt(struct efx_nic *efx)
1672{
1673 struct efx_channel *channel;
1674 efx_oword_t reg;
1675
1676 /* Disable MSI/MSI-X interrupts */
1677 efx_for_each_channel(channel, efx) {
1678 if (channel->irq)
4642610c 1679 free_irq(channel->irq, &efx->channel[channel->channel]);
8e730c15
BH
1680 }
1681
1682 /* ACK legacy interrupt */
1683 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1684 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1685 else
1686 falcon_irq_ack_a1(efx);
1687
1688 /* Disable legacy interrupt */
1689 if (efx->legacy_irq)
1690 free_irq(efx->legacy_irq, efx);
1691}
1692
cd2d5b52
BH
1693/* Looks at available SRAM resources and works out how many queues we
1694 * can support, and where things like descriptor caches should live.
1695 *
1696 * SRAM is split up as follows:
1697 * 0 buftbl entries for channels
1698 * efx->vf_buftbl_base buftbl entries for SR-IOV
1699 * efx->rx_dc_base RX descriptor caches
1700 * efx->tx_dc_base TX descriptor caches
1701 */
28e47c49
BH
1702void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1703{
1704 unsigned vi_count, buftbl_min;
1705
1706 /* Account for the buffer table entries backing the datapath channels
1707 * and the descriptor caches for those channels.
1708 */
1709 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1710 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1711 efx->n_channels * EFX_MAX_EVQ_SIZE)
1712 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1713 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1714
cd2d5b52
BH
1715#ifdef CONFIG_SFC_SRIOV
1716 if (efx_sriov_wanted(efx)) {
1717 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1718
1719 efx->vf_buftbl_base = buftbl_min;
1720
1721 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1722 vi_count = max(vi_count, EFX_VI_BASE);
1723 buftbl_free = (sram_lim_qw - buftbl_min -
1724 vi_count * vi_dc_entries);
1725
1726 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1727 efx_vf_size(efx));
1728 vf_limit = min(buftbl_free / entries_per_vf,
1729 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1730
1731 if (efx->vf_count > vf_limit) {
1732 netif_err(efx, probe, efx->net_dev,
1733 "Reducing VF count from from %d to %d\n",
1734 efx->vf_count, vf_limit);
1735 efx->vf_count = vf_limit;
1736 }
1737 vi_count += efx->vf_count * efx_vf_size(efx);
1738 }
1739#endif
1740
28e47c49
BH
1741 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1742 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1743}
1744
8e730c15
BH
1745u32 efx_nic_fpga_ver(struct efx_nic *efx)
1746{
1747 efx_oword_t altera_build;
1748 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1749 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1750}
1751
1752void efx_nic_init_common(struct efx_nic *efx)
1753{
1754 efx_oword_t temp;
1755
1756 /* Set positions of descriptor caches in SRAM. */
28e47c49 1757 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
8e730c15 1758 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
28e47c49 1759 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
8e730c15
BH
1760 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1761
1762 /* Set TX descriptor cache size. */
1763 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1764 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1765 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1766
1767 /* Set RX descriptor cache size. Set low watermark to size-8, as
1768 * this allows most efficient prefetching.
1769 */
1770 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1771 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1772 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1773 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1774 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1775
1776 /* Program INT_KER address */
1777 EFX_POPULATE_OWORD_2(temp,
1778 FRF_AZ_NORM_INT_VEC_DIS_KER,
1779 EFX_INT_MODE_USE_MSI(efx),
1780 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1781 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1782
63695459
SH
1783 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1784 /* Use an interrupt level unused by event queues */
1646a6f3 1785 efx->irq_level = 0x1f;
63695459
SH
1786 else
1787 /* Use a valid MSI-X vector */
1646a6f3 1788 efx->irq_level = 0;
63695459 1789
8e730c15
BH
1790 /* Enable all the genuinely fatal interrupts. (They are still
1791 * masked by the overall interrupt mask, controlled by
1792 * falcon_interrupts()).
1793 *
1794 * Note: All other fatal interrupts are enabled
1795 */
1796 EFX_POPULATE_OWORD_3(temp,
1797 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1798 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1799 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
b17424b0
SH
1800 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1801 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
8e730c15
BH
1802 EFX_INVERT_OWORD(temp);
1803 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1804
765c9f46 1805 efx_nic_push_rx_indir_table(efx);
8e730c15
BH
1806
1807 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1808 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1809 */
1810 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1811 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1812 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1813 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
cd38557d 1814 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
8e730c15
BH
1815 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1816 /* Enable SW_EV to inherit in char driver - assume harmless here */
1817 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1818 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1819 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
286d47ba
BH
1820 /* Disable hardware watchdog which can misfire */
1821 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
8e730c15
BH
1822 /* Squash TX of packets of 16 bytes or less */
1823 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1824 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1825 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
94b274bf
BH
1826
1827 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1828 EFX_POPULATE_OWORD_4(temp,
1829 /* Default values */
1830 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1831 FRF_BZ_TX_PACE_SB_AF, 0xb,
1832 FRF_BZ_TX_PACE_FB_BASE, 0,
1833 /* Allow large pace values in the
1834 * fast bin. */
1835 FRF_BZ_TX_PACE_BIN_TH,
1836 FFE_BZ_TX_PACE_RESERVED);
1837 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1838 }
8e730c15 1839}
5b98c1bf
BH
1840
1841/* Register dump */
1842
1843#define REGISTER_REVISION_A 1
1844#define REGISTER_REVISION_B 2
1845#define REGISTER_REVISION_C 3
1846#define REGISTER_REVISION_Z 3 /* latest revision */
1847
1848struct efx_nic_reg {
1849 u32 offset:24;
1850 u32 min_revision:2, max_revision:2;
1851};
1852
1853#define REGISTER(name, min_rev, max_rev) { \
1854 FR_ ## min_rev ## max_rev ## _ ## name, \
1855 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1856}
1857#define REGISTER_AA(name) REGISTER(name, A, A)
1858#define REGISTER_AB(name) REGISTER(name, A, B)
1859#define REGISTER_AZ(name) REGISTER(name, A, Z)
1860#define REGISTER_BB(name) REGISTER(name, B, B)
1861#define REGISTER_BZ(name) REGISTER(name, B, Z)
1862#define REGISTER_CZ(name) REGISTER(name, C, Z)
1863
1864static const struct efx_nic_reg efx_nic_regs[] = {
1865 REGISTER_AZ(ADR_REGION),
1866 REGISTER_AZ(INT_EN_KER),
1867 REGISTER_BZ(INT_EN_CHAR),
1868 REGISTER_AZ(INT_ADR_KER),
1869 REGISTER_BZ(INT_ADR_CHAR),
1870 /* INT_ACK_KER is WO */
1871 /* INT_ISR0 is RC */
1872 REGISTER_AZ(HW_INIT),
1873 REGISTER_CZ(USR_EV_CFG),
1874 REGISTER_AB(EE_SPI_HCMD),
1875 REGISTER_AB(EE_SPI_HADR),
1876 REGISTER_AB(EE_SPI_HDATA),
1877 REGISTER_AB(EE_BASE_PAGE),
1878 REGISTER_AB(EE_VPD_CFG0),
1879 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1880 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1881 /* PCIE_CORE_INDIRECT is indirect */
1882 REGISTER_AB(NIC_STAT),
1883 REGISTER_AB(GPIO_CTL),
1884 REGISTER_AB(GLB_CTL),
1885 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1886 REGISTER_BZ(DP_CTRL),
1887 REGISTER_AZ(MEM_STAT),
1888 REGISTER_AZ(CS_DEBUG),
1889 REGISTER_AZ(ALTERA_BUILD),
1890 REGISTER_AZ(CSR_SPARE),
1891 REGISTER_AB(PCIE_SD_CTL0123),
1892 REGISTER_AB(PCIE_SD_CTL45),
1893 REGISTER_AB(PCIE_PCS_CTL_STAT),
1894 /* DEBUG_DATA_OUT is not used */
1895 /* DRV_EV is WO */
1896 REGISTER_AZ(EVQ_CTL),
1897 REGISTER_AZ(EVQ_CNT1),
1898 REGISTER_AZ(EVQ_CNT2),
1899 REGISTER_AZ(BUF_TBL_CFG),
1900 REGISTER_AZ(SRM_RX_DC_CFG),
1901 REGISTER_AZ(SRM_TX_DC_CFG),
1902 REGISTER_AZ(SRM_CFG),
1903 /* BUF_TBL_UPD is WO */
1904 REGISTER_AZ(SRM_UPD_EVQ),
1905 REGISTER_AZ(SRAM_PARITY),
1906 REGISTER_AZ(RX_CFG),
1907 REGISTER_BZ(RX_FILTER_CTL),
1908 /* RX_FLUSH_DESCQ is WO */
1909 REGISTER_AZ(RX_DC_CFG),
1910 REGISTER_AZ(RX_DC_PF_WM),
1911 REGISTER_BZ(RX_RSS_TKEY),
1912 /* RX_NODESC_DROP is RC */
1913 REGISTER_AA(RX_SELF_RST),
1914 /* RX_DEBUG, RX_PUSH_DROP are not used */
1915 REGISTER_CZ(RX_RSS_IPV6_REG1),
1916 REGISTER_CZ(RX_RSS_IPV6_REG2),
1917 REGISTER_CZ(RX_RSS_IPV6_REG3),
1918 /* TX_FLUSH_DESCQ is WO */
1919 REGISTER_AZ(TX_DC_CFG),
1920 REGISTER_AA(TX_CHKSM_CFG),
1921 REGISTER_AZ(TX_CFG),
1922 /* TX_PUSH_DROP is not used */
1923 REGISTER_AZ(TX_RESERVED),
1924 REGISTER_BZ(TX_PACE),
1925 /* TX_PACE_DROP_QID is RC */
1926 REGISTER_BB(TX_VLAN),
1927 REGISTER_BZ(TX_IPFIL_PORTEN),
1928 REGISTER_AB(MD_TXD),
1929 REGISTER_AB(MD_RXD),
1930 REGISTER_AB(MD_CS),
1931 REGISTER_AB(MD_PHY_ADR),
1932 REGISTER_AB(MD_ID),
1933 /* MD_STAT is RC */
1934 REGISTER_AB(MAC_STAT_DMA),
1935 REGISTER_AB(MAC_CTRL),
1936 REGISTER_BB(GEN_MODE),
1937 REGISTER_AB(MAC_MC_HASH_REG0),
1938 REGISTER_AB(MAC_MC_HASH_REG1),
1939 REGISTER_AB(GM_CFG1),
1940 REGISTER_AB(GM_CFG2),
1941 /* GM_IPG and GM_HD are not used */
1942 REGISTER_AB(GM_MAX_FLEN),
1943 /* GM_TEST is not used */
1944 REGISTER_AB(GM_ADR1),
1945 REGISTER_AB(GM_ADR2),
1946 REGISTER_AB(GMF_CFG0),
1947 REGISTER_AB(GMF_CFG1),
1948 REGISTER_AB(GMF_CFG2),
1949 REGISTER_AB(GMF_CFG3),
1950 REGISTER_AB(GMF_CFG4),
1951 REGISTER_AB(GMF_CFG5),
1952 REGISTER_BB(TX_SRC_MAC_CTL),
1953 REGISTER_AB(XM_ADR_LO),
1954 REGISTER_AB(XM_ADR_HI),
1955 REGISTER_AB(XM_GLB_CFG),
1956 REGISTER_AB(XM_TX_CFG),
1957 REGISTER_AB(XM_RX_CFG),
1958 REGISTER_AB(XM_MGT_INT_MASK),
1959 REGISTER_AB(XM_FC),
1960 REGISTER_AB(XM_PAUSE_TIME),
1961 REGISTER_AB(XM_TX_PARAM),
1962 REGISTER_AB(XM_RX_PARAM),
1963 /* XM_MGT_INT_MSK (note no 'A') is RC */
1964 REGISTER_AB(XX_PWR_RST),
1965 REGISTER_AB(XX_SD_CTL),
1966 REGISTER_AB(XX_TXDRV_CTL),
1967 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1968 /* XX_CORE_STAT is partly RC */
1969};
1970
1971struct efx_nic_reg_table {
1972 u32 offset:24;
1973 u32 min_revision:2, max_revision:2;
1974 u32 step:6, rows:21;
1975};
1976
1977#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1978 offset, \
1979 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1980 step, rows \
1981}
9c636baf 1982#define REGISTER_TABLE(name, min_rev, max_rev) \
5b98c1bf
BH
1983 REGISTER_TABLE_DIMENSIONS( \
1984 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1985 min_rev, max_rev, \
1986 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1987 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1988#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1989#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1990#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1991#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1992#define REGISTER_TABLE_BB_CZ(name) \
1993 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1994 FR_BZ_ ## name ## _STEP, \
1995 FR_BB_ ## name ## _ROWS), \
1996 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1997 FR_BZ_ ## name ## _STEP, \
1998 FR_CZ_ ## name ## _ROWS)
1999#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
2000
2001static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
2002 /* DRIVER is not used */
2003 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
2004 REGISTER_TABLE_BB(TX_IPFIL_TBL),
2005 REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
2006 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
2007 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
2008 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
2009 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
2010 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
2011 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
75abc51c 2012 /* We can't reasonably read all of the buffer table (up to 8MB!).
5b98c1bf
BH
2013 * However this driver will only use a few entries. Reading
2014 * 1K entries allows for some expansion of queue count and
2015 * size before we need to change the version. */
2016 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
2017 A, A, 8, 1024),
2018 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
2019 B, Z, 8, 1024),
5b98c1bf
BH
2020 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
2021 REGISTER_TABLE_BB_CZ(TIMER_TBL),
2022 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
2023 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
2024 /* TX_FILTER_TBL0 is huge and not used by this driver */
2025 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
2026 REGISTER_TABLE_CZ(MC_TREG_SMEM),
2027 /* MSIX_PBA_TABLE is not mapped */
2028 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
75abc51c 2029 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
5b98c1bf
BH
2030};
2031
2032size_t efx_nic_get_regs_len(struct efx_nic *efx)
2033{
2034 const struct efx_nic_reg *reg;
2035 const struct efx_nic_reg_table *table;
2036 size_t len = 0;
2037
2038 for (reg = efx_nic_regs;
2039 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
2040 reg++)
2041 if (efx->type->revision >= reg->min_revision &&
2042 efx->type->revision <= reg->max_revision)
2043 len += sizeof(efx_oword_t);
2044
2045 for (table = efx_nic_reg_tables;
2046 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
2047 table++)
2048 if (efx->type->revision >= table->min_revision &&
2049 efx->type->revision <= table->max_revision)
2050 len += table->rows * min_t(size_t, table->step, 16);
2051
2052 return len;
2053}
2054
2055void efx_nic_get_regs(struct efx_nic *efx, void *buf)
2056{
2057 const struct efx_nic_reg *reg;
2058 const struct efx_nic_reg_table *table;
2059
2060 for (reg = efx_nic_regs;
2061 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
2062 reg++) {
2063 if (efx->type->revision >= reg->min_revision &&
2064 efx->type->revision <= reg->max_revision) {
2065 efx_reado(efx, (efx_oword_t *)buf, reg->offset);
2066 buf += sizeof(efx_oword_t);
2067 }
2068 }
2069
2070 for (table = efx_nic_reg_tables;
2071 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
2072 table++) {
2073 size_t size, i;
2074
2075 if (!(efx->type->revision >= table->min_revision &&
2076 efx->type->revision <= table->max_revision))
2077 continue;
2078
2079 size = min_t(size_t, table->step, 16);
2080
2081 for (i = 0; i < table->rows; i++) {
2082 switch (table->step) {
778cdaf6
BH
2083 case 4: /* 32-bit SRAM */
2084 efx_readd(efx, buf, table->offset + 4 * i);
5b98c1bf
BH
2085 break;
2086 case 8: /* 64-bit SRAM */
2087 efx_sram_readq(efx,
2088 efx->membase + table->offset,
2089 buf, i);
2090 break;
778cdaf6 2091 case 16: /* 128-bit-readable register */
5b98c1bf
BH
2092 efx_reado_table(efx, buf, table->offset, i);
2093 break;
2094 case 32: /* 128-bit register, interleaved */
2095 efx_reado_table(efx, buf, table->offset, 2 * i);
2096 break;
2097 default:
2098 WARN_ON(1);
2099 return;
2100 }
2101 buf += size;
2102 }
2103 }
2104}
This page took 0.456837 seconds and 5 git commands to generate.