1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
10 #include "net_driver.h"
11 #include "ef10_regs.h"
14 #include "mcdi_pcol.h"
16 #include "workarounds.h"
19 #include <linux/jhash.h>
20 #include <linux/wait.h>
21 #include <linux/workqueue.h>
23 /* Hardware control for EF10 architecture including 'Huntington'. */
25 #define EFX_EF10_DRVGEN_EV 7
31 /* The reserved RSS context value */
32 #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
34 /* The filter table(s) are managed by firmware and we have write-only
35 * access. When removing filters we must identify them to the
36 * firmware by a 64-bit handle, but this is too wide for Linux kernel
37 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
38 * be able to tell in advance whether a requested insertion will
39 * replace an existing filter. Therefore we maintain a software hash
40 * table, which should be at least as large as the hardware hash
43 * Huntington has a single 8K filter table shared between all filter
44 * types and both ports.
46 #define HUNT_FILTER_TBL_ROWS 8192
48 struct efx_ef10_filter_table
{
49 /* The RX match field masks supported by this fw & hw, in order of priority */
50 enum efx_filter_match_flags rx_match_flags
[
51 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM
];
52 unsigned int rx_match_count
;
55 unsigned long spec
; /* pointer to spec plus flag bits */
56 /* BUSY flag indicates that an update is in progress. STACK_OLD is
57 * used to mark and sweep stack-owned MAC filters.
59 #define EFX_EF10_FILTER_FLAG_BUSY 1UL
60 #define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
61 #define EFX_EF10_FILTER_FLAGS 3UL
62 u64 handle
; /* firmware handle */
64 wait_queue_head_t waitq
;
65 /* Shadow of net_device address lists, guarded by mac_lock */
66 #define EFX_EF10_FILTER_STACK_UC_MAX 32
67 #define EFX_EF10_FILTER_STACK_MC_MAX 256
71 } stack_uc_list
[EFX_EF10_FILTER_STACK_UC_MAX
],
72 stack_mc_list
[EFX_EF10_FILTER_STACK_MC_MAX
];
73 int stack_uc_count
; /* negative for PROMISC */
74 int stack_mc_count
; /* negative for PROMISC/ALLMULTI */
77 /* An arbitrary search limit for the software hash table */
78 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
80 static void efx_ef10_rx_push_indir_table(struct efx_nic
*efx
);
81 static void efx_ef10_rx_free_indir_table(struct efx_nic
*efx
);
82 static void efx_ef10_filter_table_remove(struct efx_nic
*efx
);
84 static int efx_ef10_get_warm_boot_count(struct efx_nic
*efx
)
88 efx_readd(efx
, ®
, ER_DZ_BIU_MC_SFT_STATUS
);
89 return EFX_DWORD_FIELD(reg
, EFX_WORD_1
) == 0xb007 ?
90 EFX_DWORD_FIELD(reg
, EFX_WORD_0
) : -EIO
;
93 static unsigned int efx_ef10_mem_map_size(struct efx_nic
*efx
)
95 return resource_size(&efx
->pci_dev
->resource
[EFX_MEM_BAR
]);
98 static int efx_ef10_init_datapath_caps(struct efx_nic
*efx
)
100 MCDI_DECLARE_BUF(outbuf
, MC_CMD_GET_CAPABILITIES_OUT_LEN
);
101 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
105 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN
!= 0);
107 rc
= efx_mcdi_rpc(efx
, MC_CMD_GET_CAPABILITIES
, NULL
, 0,
108 outbuf
, sizeof(outbuf
), &outlen
);
111 if (outlen
< sizeof(outbuf
)) {
112 netif_err(efx
, drv
, efx
->net_dev
,
113 "unable to read datapath firmware capabilities\n");
117 nic_data
->datapath_caps
=
118 MCDI_DWORD(outbuf
, GET_CAPABILITIES_OUT_FLAGS1
);
120 if (!(nic_data
->datapath_caps
&
121 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN
))) {
122 netif_err(efx
, drv
, efx
->net_dev
,
123 "current firmware does not support TSO\n");
127 if (!(nic_data
->datapath_caps
&
128 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN
))) {
129 netif_err(efx
, probe
, efx
->net_dev
,
130 "current firmware does not support an RX prefix\n");
137 static int efx_ef10_get_sysclk_freq(struct efx_nic
*efx
)
139 MCDI_DECLARE_BUF(outbuf
, MC_CMD_GET_CLOCK_OUT_LEN
);
142 rc
= efx_mcdi_rpc(efx
, MC_CMD_GET_CLOCK
, NULL
, 0,
143 outbuf
, sizeof(outbuf
), NULL
);
146 rc
= MCDI_DWORD(outbuf
, GET_CLOCK_OUT_SYS_FREQ
);
147 return rc
> 0 ? rc
: -ERANGE
;
150 static int efx_ef10_get_mac_address(struct efx_nic
*efx
, u8
*mac_address
)
152 MCDI_DECLARE_BUF(outbuf
, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN
);
156 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN
!= 0);
158 rc
= efx_mcdi_rpc(efx
, MC_CMD_GET_MAC_ADDRESSES
, NULL
, 0,
159 outbuf
, sizeof(outbuf
), &outlen
);
162 if (outlen
< MC_CMD_GET_MAC_ADDRESSES_OUT_LEN
)
166 MCDI_PTR(outbuf
, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE
), ETH_ALEN
);
170 static int efx_ef10_probe(struct efx_nic
*efx
)
172 struct efx_ef10_nic_data
*nic_data
;
175 /* We can have one VI for each 8K region. However we need
176 * multiple TX queues per channel.
181 resource_size(&efx
->pci_dev
->resource
[EFX_MEM_BAR
]) /
182 (EFX_VI_PAGE_SIZE
* EFX_TXQ_TYPES
));
183 BUG_ON(efx
->max_channels
== 0);
185 nic_data
= kzalloc(sizeof(*nic_data
), GFP_KERNEL
);
188 efx
->nic_data
= nic_data
;
190 rc
= efx_nic_alloc_buffer(efx
, &nic_data
->mcdi_buf
,
191 8 + MCDI_CTL_SDU_LEN_MAX_V2
, GFP_KERNEL
);
195 /* Get the MC's warm boot count. In case it's rebooting right
196 * now, be prepared to retry.
200 rc
= efx_ef10_get_warm_boot_count(efx
);
207 nic_data
->warm_boot_count
= rc
;
209 nic_data
->rx_rss_context
= EFX_EF10_RSS_CONTEXT_INVALID
;
211 /* In case we're recovering from a crash (kexec), we want to
212 * cancel any outstanding request by the previous user of this
213 * function. We send a special message using the least
214 * significant bits of the 'high' (doorbell) register.
216 _efx_writed(efx
, cpu_to_le32(1), ER_DZ_MC_DB_HWRD
);
218 rc
= efx_mcdi_init(efx
);
222 /* Reset (most) configuration for this function */
223 rc
= efx_mcdi_reset(efx
, RESET_TYPE_ALL
);
227 /* Enable event logging */
228 rc
= efx_mcdi_log_ctrl(efx
, true, false, 0);
232 rc
= efx_ef10_init_datapath_caps(efx
);
236 efx
->rx_packet_len_offset
=
237 ES_DZ_RX_PREFIX_PKTLEN_OFST
- ES_DZ_RX_PREFIX_SIZE
;
239 rc
= efx_mcdi_port_get_number(efx
);
244 rc
= efx_ef10_get_mac_address(efx
, efx
->net_dev
->perm_addr
);
248 rc
= efx_ef10_get_sysclk_freq(efx
);
251 efx
->timer_quantum_ns
= 1536000 / rc
; /* 1536 cycles */
253 /* Check whether firmware supports bug 35388 workaround */
254 rc
= efx_mcdi_set_workaround(efx
, MC_CMD_WORKAROUND_BUG35388
, true);
256 nic_data
->workaround_35388
= true;
257 else if (rc
!= -ENOSYS
&& rc
!= -ENOENT
)
259 netif_dbg(efx
, probe
, efx
->net_dev
,
260 "workaround for bug 35388 is %sabled\n",
261 nic_data
->workaround_35388
? "en" : "dis");
263 rc
= efx_mcdi_mon_probe(efx
);
272 efx_nic_free_buffer(efx
, &nic_data
->mcdi_buf
);
275 efx
->nic_data
= NULL
;
279 static int efx_ef10_free_vis(struct efx_nic
*efx
)
281 int rc
= efx_mcdi_rpc(efx
, MC_CMD_FREE_VIS
, NULL
, 0, NULL
, 0, NULL
);
283 /* -EALREADY means nothing to free, so ignore */
291 static void efx_ef10_free_piobufs(struct efx_nic
*efx
)
293 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
294 MCDI_DECLARE_BUF(inbuf
, MC_CMD_FREE_PIOBUF_IN_LEN
);
298 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN
!= 0);
300 for (i
= 0; i
< nic_data
->n_piobufs
; i
++) {
301 MCDI_SET_DWORD(inbuf
, FREE_PIOBUF_IN_PIOBUF_HANDLE
,
302 nic_data
->piobuf_handle
[i
]);
303 rc
= efx_mcdi_rpc(efx
, MC_CMD_FREE_PIOBUF
, inbuf
, sizeof(inbuf
),
308 nic_data
->n_piobufs
= 0;
311 static int efx_ef10_alloc_piobufs(struct efx_nic
*efx
, unsigned int n
)
313 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
314 MCDI_DECLARE_BUF(outbuf
, MC_CMD_ALLOC_PIOBUF_OUT_LEN
);
319 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN
!= 0);
321 for (i
= 0; i
< n
; i
++) {
322 rc
= efx_mcdi_rpc(efx
, MC_CMD_ALLOC_PIOBUF
, NULL
, 0,
323 outbuf
, sizeof(outbuf
), &outlen
);
326 if (outlen
< MC_CMD_ALLOC_PIOBUF_OUT_LEN
) {
330 nic_data
->piobuf_handle
[i
] =
331 MCDI_DWORD(outbuf
, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE
);
332 netif_dbg(efx
, probe
, efx
->net_dev
,
333 "allocated PIO buffer %u handle %x\n", i
,
334 nic_data
->piobuf_handle
[i
]);
337 nic_data
->n_piobufs
= i
;
339 efx_ef10_free_piobufs(efx
);
343 static int efx_ef10_link_piobufs(struct efx_nic
*efx
)
345 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
346 MCDI_DECLARE_BUF(inbuf
,
347 max(MC_CMD_LINK_PIOBUF_IN_LEN
,
348 MC_CMD_UNLINK_PIOBUF_IN_LEN
));
349 struct efx_channel
*channel
;
350 struct efx_tx_queue
*tx_queue
;
351 unsigned int offset
, index
;
354 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN
!= 0);
355 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN
!= 0);
357 /* Link a buffer to each VI in the write-combining mapping */
358 for (index
= 0; index
< nic_data
->n_piobufs
; ++index
) {
359 MCDI_SET_DWORD(inbuf
, LINK_PIOBUF_IN_PIOBUF_HANDLE
,
360 nic_data
->piobuf_handle
[index
]);
361 MCDI_SET_DWORD(inbuf
, LINK_PIOBUF_IN_TXQ_INSTANCE
,
362 nic_data
->pio_write_vi_base
+ index
);
363 rc
= efx_mcdi_rpc(efx
, MC_CMD_LINK_PIOBUF
,
364 inbuf
, MC_CMD_LINK_PIOBUF_IN_LEN
,
367 netif_err(efx
, drv
, efx
->net_dev
,
368 "failed to link VI %u to PIO buffer %u (%d)\n",
369 nic_data
->pio_write_vi_base
+ index
, index
,
373 netif_dbg(efx
, probe
, efx
->net_dev
,
374 "linked VI %u to PIO buffer %u\n",
375 nic_data
->pio_write_vi_base
+ index
, index
);
378 /* Link a buffer to each TX queue */
379 efx_for_each_channel(channel
, efx
) {
380 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
381 /* We assign the PIO buffers to queues in
382 * reverse order to allow for the following
385 offset
= ((efx
->tx_channel_offset
+ efx
->n_tx_channels
-
386 tx_queue
->channel
->channel
- 1) *
388 index
= offset
/ ER_DZ_TX_PIOBUF_SIZE
;
389 offset
= offset
% ER_DZ_TX_PIOBUF_SIZE
;
391 /* When the host page size is 4K, the first
392 * host page in the WC mapping may be within
393 * the same VI page as the last TX queue. We
394 * can only link one buffer to each VI.
396 if (tx_queue
->queue
== nic_data
->pio_write_vi_base
) {
400 MCDI_SET_DWORD(inbuf
,
401 LINK_PIOBUF_IN_PIOBUF_HANDLE
,
402 nic_data
->piobuf_handle
[index
]);
403 MCDI_SET_DWORD(inbuf
,
404 LINK_PIOBUF_IN_TXQ_INSTANCE
,
406 rc
= efx_mcdi_rpc(efx
, MC_CMD_LINK_PIOBUF
,
407 inbuf
, MC_CMD_LINK_PIOBUF_IN_LEN
,
412 /* This is non-fatal; the TX path just
413 * won't use PIO for this queue
415 netif_err(efx
, drv
, efx
->net_dev
,
416 "failed to link VI %u to PIO buffer %u (%d)\n",
417 tx_queue
->queue
, index
, rc
);
418 tx_queue
->piobuf
= NULL
;
421 nic_data
->pio_write_base
+
422 index
* EFX_VI_PAGE_SIZE
+ offset
;
423 tx_queue
->piobuf_offset
= offset
;
424 netif_dbg(efx
, probe
, efx
->net_dev
,
425 "linked VI %u to PIO buffer %u offset %x addr %p\n",
426 tx_queue
->queue
, index
,
427 tx_queue
->piobuf_offset
,
437 MCDI_SET_DWORD(inbuf
, UNLINK_PIOBUF_IN_TXQ_INSTANCE
,
438 nic_data
->pio_write_vi_base
+ index
);
439 efx_mcdi_rpc(efx
, MC_CMD_UNLINK_PIOBUF
,
440 inbuf
, MC_CMD_UNLINK_PIOBUF_IN_LEN
,
446 #else /* !EFX_USE_PIO */
448 static int efx_ef10_alloc_piobufs(struct efx_nic
*efx
, unsigned int n
)
450 return n
== 0 ? 0 : -ENOBUFS
;
453 static int efx_ef10_link_piobufs(struct efx_nic
*efx
)
458 static void efx_ef10_free_piobufs(struct efx_nic
*efx
)
462 #endif /* EFX_USE_PIO */
464 static void efx_ef10_remove(struct efx_nic
*efx
)
466 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
469 efx_mcdi_mon_remove(efx
);
471 /* This needs to be after efx_ptp_remove_channel() with no filters */
472 efx_ef10_rx_free_indir_table(efx
);
474 if (nic_data
->wc_membase
)
475 iounmap(nic_data
->wc_membase
);
477 rc
= efx_ef10_free_vis(efx
);
480 if (!nic_data
->must_restore_piobufs
)
481 efx_ef10_free_piobufs(efx
);
484 efx_nic_free_buffer(efx
, &nic_data
->mcdi_buf
);
488 static int efx_ef10_alloc_vis(struct efx_nic
*efx
,
489 unsigned int min_vis
, unsigned int max_vis
)
491 MCDI_DECLARE_BUF(inbuf
, MC_CMD_ALLOC_VIS_IN_LEN
);
492 MCDI_DECLARE_BUF(outbuf
, MC_CMD_ALLOC_VIS_OUT_LEN
);
493 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
497 MCDI_SET_DWORD(inbuf
, ALLOC_VIS_IN_MIN_VI_COUNT
, min_vis
);
498 MCDI_SET_DWORD(inbuf
, ALLOC_VIS_IN_MAX_VI_COUNT
, max_vis
);
499 rc
= efx_mcdi_rpc(efx
, MC_CMD_ALLOC_VIS
, inbuf
, sizeof(inbuf
),
500 outbuf
, sizeof(outbuf
), &outlen
);
504 if (outlen
< MC_CMD_ALLOC_VIS_OUT_LEN
)
507 netif_dbg(efx
, drv
, efx
->net_dev
, "base VI is A0x%03x\n",
508 MCDI_DWORD(outbuf
, ALLOC_VIS_OUT_VI_BASE
));
510 nic_data
->vi_base
= MCDI_DWORD(outbuf
, ALLOC_VIS_OUT_VI_BASE
);
511 nic_data
->n_allocated_vis
= MCDI_DWORD(outbuf
, ALLOC_VIS_OUT_VI_COUNT
);
515 /* Note that the failure path of this function does not free
516 * resources, as this will be done by efx_ef10_remove().
518 static int efx_ef10_dimension_resources(struct efx_nic
*efx
)
520 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
521 unsigned int uc_mem_map_size
, wc_mem_map_size
;
522 unsigned int min_vis
, pio_write_vi_base
, max_vis
;
523 void __iomem
*membase
;
526 min_vis
= max(efx
->n_channels
, efx
->n_tx_channels
* EFX_TXQ_TYPES
);
529 /* Try to allocate PIO buffers if wanted and if the full
530 * number of PIO buffers would be sufficient to allocate one
531 * copy-buffer per TX channel. Failure is non-fatal, as there
532 * are only a small number of PIO buffers shared between all
533 * functions of the controller.
535 if (efx_piobuf_size
!= 0 &&
536 ER_DZ_TX_PIOBUF_SIZE
/ efx_piobuf_size
* EF10_TX_PIOBUF_COUNT
>=
537 efx
->n_tx_channels
) {
538 unsigned int n_piobufs
=
539 DIV_ROUND_UP(efx
->n_tx_channels
,
540 ER_DZ_TX_PIOBUF_SIZE
/ efx_piobuf_size
);
542 rc
= efx_ef10_alloc_piobufs(efx
, n_piobufs
);
544 netif_err(efx
, probe
, efx
->net_dev
,
545 "failed to allocate PIO buffers (%d)\n", rc
);
547 netif_dbg(efx
, probe
, efx
->net_dev
,
548 "allocated %u PIO buffers\n", n_piobufs
);
551 nic_data
->n_piobufs
= 0;
554 /* PIO buffers should be mapped with write-combining enabled,
555 * and we want to make single UC and WC mappings rather than
556 * several of each (in fact that's the only option if host
557 * page size is >4K). So we may allocate some extra VIs just
558 * for writing PIO buffers through.
560 uc_mem_map_size
= PAGE_ALIGN((min_vis
- 1) * EFX_VI_PAGE_SIZE
+
562 if (nic_data
->n_piobufs
) {
563 pio_write_vi_base
= uc_mem_map_size
/ EFX_VI_PAGE_SIZE
;
564 wc_mem_map_size
= (PAGE_ALIGN((pio_write_vi_base
+
565 nic_data
->n_piobufs
) *
568 max_vis
= pio_write_vi_base
+ nic_data
->n_piobufs
;
570 pio_write_vi_base
= 0;
575 /* In case the last attached driver failed to free VIs, do it now */
576 rc
= efx_ef10_free_vis(efx
);
580 rc
= efx_ef10_alloc_vis(efx
, min_vis
, max_vis
);
584 /* If we didn't get enough VIs to map all the PIO buffers, free the
587 if (nic_data
->n_piobufs
&&
588 nic_data
->n_allocated_vis
<
589 pio_write_vi_base
+ nic_data
->n_piobufs
) {
590 netif_dbg(efx
, probe
, efx
->net_dev
,
591 "%u VIs are not sufficient to map %u PIO buffers\n",
592 nic_data
->n_allocated_vis
, nic_data
->n_piobufs
);
593 efx_ef10_free_piobufs(efx
);
596 /* Shrink the original UC mapping of the memory BAR */
597 membase
= ioremap_nocache(efx
->membase_phys
, uc_mem_map_size
);
599 netif_err(efx
, probe
, efx
->net_dev
,
600 "could not shrink memory BAR to %x\n",
604 iounmap(efx
->membase
);
605 efx
->membase
= membase
;
607 /* Set up the WC mapping if needed */
608 if (wc_mem_map_size
) {
609 nic_data
->wc_membase
= ioremap_wc(efx
->membase_phys
+
612 if (!nic_data
->wc_membase
) {
613 netif_err(efx
, probe
, efx
->net_dev
,
614 "could not allocate WC mapping of size %x\n",
618 nic_data
->pio_write_vi_base
= pio_write_vi_base
;
619 nic_data
->pio_write_base
=
620 nic_data
->wc_membase
+
621 (pio_write_vi_base
* EFX_VI_PAGE_SIZE
+ ER_DZ_TX_PIOBUF
-
624 rc
= efx_ef10_link_piobufs(efx
);
626 efx_ef10_free_piobufs(efx
);
629 netif_dbg(efx
, probe
, efx
->net_dev
,
630 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
631 &efx
->membase_phys
, efx
->membase
, uc_mem_map_size
,
632 nic_data
->wc_membase
, wc_mem_map_size
);
637 static int efx_ef10_init_nic(struct efx_nic
*efx
)
639 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
642 if (nic_data
->must_check_datapath_caps
) {
643 rc
= efx_ef10_init_datapath_caps(efx
);
646 nic_data
->must_check_datapath_caps
= false;
649 if (nic_data
->must_realloc_vis
) {
650 /* We cannot let the number of VIs change now */
651 rc
= efx_ef10_alloc_vis(efx
, nic_data
->n_allocated_vis
,
652 nic_data
->n_allocated_vis
);
655 nic_data
->must_realloc_vis
= false;
658 if (nic_data
->must_restore_piobufs
&& nic_data
->n_piobufs
) {
659 rc
= efx_ef10_alloc_piobufs(efx
, nic_data
->n_piobufs
);
661 rc
= efx_ef10_link_piobufs(efx
);
663 efx_ef10_free_piobufs(efx
);
666 /* Log an error on failure, but this is non-fatal */
668 netif_err(efx
, drv
, efx
->net_dev
,
669 "failed to restore PIO buffers (%d)\n", rc
);
670 nic_data
->must_restore_piobufs
= false;
673 efx_ef10_rx_push_indir_table(efx
);
677 static int efx_ef10_map_reset_flags(u32
*flags
)
680 EF10_RESET_PORT
= ((ETH_RESET_MAC
| ETH_RESET_PHY
) <<
681 ETH_RESET_SHARED_SHIFT
),
682 EF10_RESET_MC
= ((ETH_RESET_DMA
| ETH_RESET_FILTER
|
683 ETH_RESET_OFFLOAD
| ETH_RESET_MAC
|
684 ETH_RESET_PHY
| ETH_RESET_MGMT
) <<
685 ETH_RESET_SHARED_SHIFT
)
688 /* We assume for now that our PCI function is permitted to
692 if ((*flags
& EF10_RESET_MC
) == EF10_RESET_MC
) {
693 *flags
&= ~EF10_RESET_MC
;
694 return RESET_TYPE_WORLD
;
697 if ((*flags
& EF10_RESET_PORT
) == EF10_RESET_PORT
) {
698 *flags
&= ~EF10_RESET_PORT
;
699 return RESET_TYPE_ALL
;
702 /* no invisible reset implemented */
707 #define EF10_DMA_STAT(ext_name, mcdi_name) \
708 [EF10_STAT_ ## ext_name] = \
709 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
710 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
711 [EF10_STAT_ ## int_name] = \
712 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
713 #define EF10_OTHER_STAT(ext_name) \
714 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
716 static const struct efx_hw_stat_desc efx_ef10_stat_desc
[EF10_STAT_COUNT
] = {
717 EF10_DMA_STAT(tx_bytes
, TX_BYTES
),
718 EF10_DMA_STAT(tx_packets
, TX_PKTS
),
719 EF10_DMA_STAT(tx_pause
, TX_PAUSE_PKTS
),
720 EF10_DMA_STAT(tx_control
, TX_CONTROL_PKTS
),
721 EF10_DMA_STAT(tx_unicast
, TX_UNICAST_PKTS
),
722 EF10_DMA_STAT(tx_multicast
, TX_MULTICAST_PKTS
),
723 EF10_DMA_STAT(tx_broadcast
, TX_BROADCAST_PKTS
),
724 EF10_DMA_STAT(tx_lt64
, TX_LT64_PKTS
),
725 EF10_DMA_STAT(tx_64
, TX_64_PKTS
),
726 EF10_DMA_STAT(tx_65_to_127
, TX_65_TO_127_PKTS
),
727 EF10_DMA_STAT(tx_128_to_255
, TX_128_TO_255_PKTS
),
728 EF10_DMA_STAT(tx_256_to_511
, TX_256_TO_511_PKTS
),
729 EF10_DMA_STAT(tx_512_to_1023
, TX_512_TO_1023_PKTS
),
730 EF10_DMA_STAT(tx_1024_to_15xx
, TX_1024_TO_15XX_PKTS
),
731 EF10_DMA_STAT(tx_15xx_to_jumbo
, TX_15XX_TO_JUMBO_PKTS
),
732 EF10_DMA_STAT(rx_bytes
, RX_BYTES
),
733 EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes
, RX_BAD_BYTES
),
734 EF10_OTHER_STAT(rx_good_bytes
),
735 EF10_OTHER_STAT(rx_bad_bytes
),
736 EF10_DMA_STAT(rx_packets
, RX_PKTS
),
737 EF10_DMA_STAT(rx_good
, RX_GOOD_PKTS
),
738 EF10_DMA_STAT(rx_bad
, RX_BAD_FCS_PKTS
),
739 EF10_DMA_STAT(rx_pause
, RX_PAUSE_PKTS
),
740 EF10_DMA_STAT(rx_control
, RX_CONTROL_PKTS
),
741 EF10_DMA_STAT(rx_unicast
, RX_UNICAST_PKTS
),
742 EF10_DMA_STAT(rx_multicast
, RX_MULTICAST_PKTS
),
743 EF10_DMA_STAT(rx_broadcast
, RX_BROADCAST_PKTS
),
744 EF10_DMA_STAT(rx_lt64
, RX_UNDERSIZE_PKTS
),
745 EF10_DMA_STAT(rx_64
, RX_64_PKTS
),
746 EF10_DMA_STAT(rx_65_to_127
, RX_65_TO_127_PKTS
),
747 EF10_DMA_STAT(rx_128_to_255
, RX_128_TO_255_PKTS
),
748 EF10_DMA_STAT(rx_256_to_511
, RX_256_TO_511_PKTS
),
749 EF10_DMA_STAT(rx_512_to_1023
, RX_512_TO_1023_PKTS
),
750 EF10_DMA_STAT(rx_1024_to_15xx
, RX_1024_TO_15XX_PKTS
),
751 EF10_DMA_STAT(rx_15xx_to_jumbo
, RX_15XX_TO_JUMBO_PKTS
),
752 EF10_DMA_STAT(rx_gtjumbo
, RX_GTJUMBO_PKTS
),
753 EF10_DMA_STAT(rx_bad_gtjumbo
, RX_JABBER_PKTS
),
754 EF10_DMA_STAT(rx_overflow
, RX_OVERFLOW_PKTS
),
755 EF10_DMA_STAT(rx_align_error
, RX_ALIGN_ERROR_PKTS
),
756 EF10_DMA_STAT(rx_length_error
, RX_LENGTH_ERROR_PKTS
),
757 EF10_DMA_STAT(rx_nodesc_drops
, RX_NODESC_DROPS
),
758 EF10_DMA_STAT(rx_pm_trunc_bb_overflow
, PM_TRUNC_BB_OVERFLOW
),
759 EF10_DMA_STAT(rx_pm_discard_bb_overflow
, PM_DISCARD_BB_OVERFLOW
),
760 EF10_DMA_STAT(rx_pm_trunc_vfifo_full
, PM_TRUNC_VFIFO_FULL
),
761 EF10_DMA_STAT(rx_pm_discard_vfifo_full
, PM_DISCARD_VFIFO_FULL
),
762 EF10_DMA_STAT(rx_pm_trunc_qbb
, PM_TRUNC_QBB
),
763 EF10_DMA_STAT(rx_pm_discard_qbb
, PM_DISCARD_QBB
),
764 EF10_DMA_STAT(rx_pm_discard_mapping
, PM_DISCARD_MAPPING
),
765 EF10_DMA_STAT(rx_dp_q_disabled_packets
, RXDP_Q_DISABLED_PKTS
),
766 EF10_DMA_STAT(rx_dp_di_dropped_packets
, RXDP_DI_DROPPED_PKTS
),
767 EF10_DMA_STAT(rx_dp_streaming_packets
, RXDP_STREAMING_PKTS
),
768 EF10_DMA_STAT(rx_dp_emerg_fetch
, RXDP_EMERGENCY_FETCH_CONDITIONS
),
769 EF10_DMA_STAT(rx_dp_emerg_wait
, RXDP_EMERGENCY_WAIT_CONDITIONS
),
772 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
773 (1ULL << EF10_STAT_tx_packets) | \
774 (1ULL << EF10_STAT_tx_pause) | \
775 (1ULL << EF10_STAT_tx_unicast) | \
776 (1ULL << EF10_STAT_tx_multicast) | \
777 (1ULL << EF10_STAT_tx_broadcast) | \
778 (1ULL << EF10_STAT_rx_bytes) | \
779 (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
780 (1ULL << EF10_STAT_rx_good_bytes) | \
781 (1ULL << EF10_STAT_rx_bad_bytes) | \
782 (1ULL << EF10_STAT_rx_packets) | \
783 (1ULL << EF10_STAT_rx_good) | \
784 (1ULL << EF10_STAT_rx_bad) | \
785 (1ULL << EF10_STAT_rx_pause) | \
786 (1ULL << EF10_STAT_rx_control) | \
787 (1ULL << EF10_STAT_rx_unicast) | \
788 (1ULL << EF10_STAT_rx_multicast) | \
789 (1ULL << EF10_STAT_rx_broadcast) | \
790 (1ULL << EF10_STAT_rx_lt64) | \
791 (1ULL << EF10_STAT_rx_64) | \
792 (1ULL << EF10_STAT_rx_65_to_127) | \
793 (1ULL << EF10_STAT_rx_128_to_255) | \
794 (1ULL << EF10_STAT_rx_256_to_511) | \
795 (1ULL << EF10_STAT_rx_512_to_1023) | \
796 (1ULL << EF10_STAT_rx_1024_to_15xx) | \
797 (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
798 (1ULL << EF10_STAT_rx_gtjumbo) | \
799 (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
800 (1ULL << EF10_STAT_rx_overflow) | \
801 (1ULL << EF10_STAT_rx_nodesc_drops))
803 /* These statistics are only provided by the 10G MAC. For a 10G/40G
804 * switchable port we do not expose these because they might not
805 * include all the packets they should.
807 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
808 (1ULL << EF10_STAT_tx_lt64) | \
809 (1ULL << EF10_STAT_tx_64) | \
810 (1ULL << EF10_STAT_tx_65_to_127) | \
811 (1ULL << EF10_STAT_tx_128_to_255) | \
812 (1ULL << EF10_STAT_tx_256_to_511) | \
813 (1ULL << EF10_STAT_tx_512_to_1023) | \
814 (1ULL << EF10_STAT_tx_1024_to_15xx) | \
815 (1ULL << EF10_STAT_tx_15xx_to_jumbo))
817 /* These statistics are only provided by the 40G MAC. For a 10G/40G
818 * switchable port we do expose these because the errors will otherwise
821 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
822 (1ULL << EF10_STAT_rx_length_error))
824 /* These statistics are only provided if the firmware supports the
825 * capability PM_AND_RXDP_COUNTERS.
827 #define HUNT_PM_AND_RXDP_STAT_MASK ( \
828 (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
829 (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
830 (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
831 (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
832 (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
833 (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
834 (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
835 (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
836 (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
837 (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
838 (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
839 (1ULL << EF10_STAT_rx_dp_emerg_wait))
841 static u64
efx_ef10_raw_stat_mask(struct efx_nic
*efx
)
843 u64 raw_mask
= HUNT_COMMON_STAT_MASK
;
844 u32 port_caps
= efx_mcdi_phy_get_caps(efx
);
845 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
847 if (port_caps
& (1 << MC_CMD_PHY_CAP_40000FDX_LBN
))
848 raw_mask
|= HUNT_40G_EXTRA_STAT_MASK
;
850 raw_mask
|= HUNT_10G_ONLY_STAT_MASK
;
852 if (nic_data
->datapath_caps
&
853 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN
))
854 raw_mask
|= HUNT_PM_AND_RXDP_STAT_MASK
;
859 static void efx_ef10_get_stat_mask(struct efx_nic
*efx
, unsigned long *mask
)
861 u64 raw_mask
= efx_ef10_raw_stat_mask(efx
);
863 #if BITS_PER_LONG == 64
866 mask
[0] = raw_mask
& 0xffffffff;
867 mask
[1] = raw_mask
>> 32;
871 static size_t efx_ef10_describe_stats(struct efx_nic
*efx
, u8
*names
)
873 DECLARE_BITMAP(mask
, EF10_STAT_COUNT
);
875 efx_ef10_get_stat_mask(efx
, mask
);
876 return efx_nic_describe_stats(efx_ef10_stat_desc
, EF10_STAT_COUNT
,
880 static int efx_ef10_try_update_nic_stats(struct efx_nic
*efx
)
882 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
883 DECLARE_BITMAP(mask
, EF10_STAT_COUNT
);
884 __le64 generation_start
, generation_end
;
885 u64
*stats
= nic_data
->stats
;
888 efx_ef10_get_stat_mask(efx
, mask
);
890 dma_stats
= efx
->stats_buffer
.addr
;
891 nic_data
= efx
->nic_data
;
893 generation_end
= dma_stats
[MC_CMD_MAC_GENERATION_END
];
894 if (generation_end
== EFX_MC_STATS_GENERATION_INVALID
)
897 efx_nic_update_stats(efx_ef10_stat_desc
, EF10_STAT_COUNT
, mask
,
898 stats
, efx
->stats_buffer
.addr
, false);
900 generation_start
= dma_stats
[MC_CMD_MAC_GENERATION_START
];
901 if (generation_end
!= generation_start
)
904 /* Update derived statistics */
905 stats
[EF10_STAT_rx_good_bytes
] =
906 stats
[EF10_STAT_rx_bytes
] -
907 stats
[EF10_STAT_rx_bytes_minus_good_bytes
];
908 efx_update_diff_stat(&stats
[EF10_STAT_rx_bad_bytes
],
909 stats
[EF10_STAT_rx_bytes_minus_good_bytes
]);
915 static size_t efx_ef10_update_stats(struct efx_nic
*efx
, u64
*full_stats
,
916 struct rtnl_link_stats64
*core_stats
)
918 DECLARE_BITMAP(mask
, EF10_STAT_COUNT
);
919 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
920 u64
*stats
= nic_data
->stats
;
921 size_t stats_count
= 0, index
;
924 efx_ef10_get_stat_mask(efx
, mask
);
926 /* If we're unlucky enough to read statistics during the DMA, wait
927 * up to 10ms for it to finish (typically takes <500us)
929 for (retry
= 0; retry
< 100; ++retry
) {
930 if (efx_ef10_try_update_nic_stats(efx
) == 0)
936 for_each_set_bit(index
, mask
, EF10_STAT_COUNT
) {
937 if (efx_ef10_stat_desc
[index
].name
) {
938 *full_stats
++ = stats
[index
];
945 core_stats
->rx_packets
= stats
[EF10_STAT_rx_packets
];
946 core_stats
->tx_packets
= stats
[EF10_STAT_tx_packets
];
947 core_stats
->rx_bytes
= stats
[EF10_STAT_rx_bytes
];
948 core_stats
->tx_bytes
= stats
[EF10_STAT_tx_bytes
];
949 core_stats
->rx_dropped
= stats
[EF10_STAT_rx_nodesc_drops
];
950 core_stats
->multicast
= stats
[EF10_STAT_rx_multicast
];
951 core_stats
->rx_length_errors
=
952 stats
[EF10_STAT_rx_gtjumbo
] +
953 stats
[EF10_STAT_rx_length_error
];
954 core_stats
->rx_crc_errors
= stats
[EF10_STAT_rx_bad
];
955 core_stats
->rx_frame_errors
= stats
[EF10_STAT_rx_align_error
];
956 core_stats
->rx_fifo_errors
= stats
[EF10_STAT_rx_overflow
];
957 core_stats
->rx_errors
= (core_stats
->rx_length_errors
+
958 core_stats
->rx_crc_errors
+
959 core_stats
->rx_frame_errors
);
965 static void efx_ef10_push_irq_moderation(struct efx_channel
*channel
)
967 struct efx_nic
*efx
= channel
->efx
;
968 unsigned int mode
, value
;
969 efx_dword_t timer_cmd
;
971 if (channel
->irq_moderation
) {
973 value
= channel
->irq_moderation
- 1;
979 if (EFX_EF10_WORKAROUND_35388(efx
)) {
980 EFX_POPULATE_DWORD_3(timer_cmd
, ERF_DD_EVQ_IND_TIMER_FLAGS
,
981 EFE_DD_EVQ_IND_TIMER_FLAGS
,
982 ERF_DD_EVQ_IND_TIMER_MODE
, mode
,
983 ERF_DD_EVQ_IND_TIMER_VAL
, value
);
984 efx_writed_page(efx
, &timer_cmd
, ER_DD_EVQ_INDIRECT
,
987 EFX_POPULATE_DWORD_2(timer_cmd
, ERF_DZ_TC_TIMER_MODE
, mode
,
988 ERF_DZ_TC_TIMER_VAL
, value
);
989 efx_writed_page(efx
, &timer_cmd
, ER_DZ_EVQ_TMR
,
994 static void efx_ef10_get_wol(struct efx_nic
*efx
, struct ethtool_wolinfo
*wol
)
998 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
1001 static int efx_ef10_set_wol(struct efx_nic
*efx
, u32 type
)
1008 static void efx_ef10_mcdi_request(struct efx_nic
*efx
,
1009 const efx_dword_t
*hdr
, size_t hdr_len
,
1010 const efx_dword_t
*sdu
, size_t sdu_len
)
1012 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
1013 u8
*pdu
= nic_data
->mcdi_buf
.addr
;
1015 memcpy(pdu
, hdr
, hdr_len
);
1016 memcpy(pdu
+ hdr_len
, sdu
, sdu_len
);
1019 /* The hardware provides 'low' and 'high' (doorbell) registers
1020 * for passing the 64-bit address of an MCDI request to
1021 * firmware. However the dwords are swapped by firmware. The
1022 * least significant bits of the doorbell are then 0 for all
1023 * MCDI requests due to alignment.
1025 _efx_writed(efx
, cpu_to_le32((u64
)nic_data
->mcdi_buf
.dma_addr
>> 32),
1027 _efx_writed(efx
, cpu_to_le32((u32
)nic_data
->mcdi_buf
.dma_addr
),
1031 static bool efx_ef10_mcdi_poll_response(struct efx_nic
*efx
)
1033 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
1034 const efx_dword_t hdr
= *(const efx_dword_t
*)nic_data
->mcdi_buf
.addr
;
1037 return EFX_DWORD_FIELD(hdr
, MCDI_HEADER_RESPONSE
);
1041 efx_ef10_mcdi_read_response(struct efx_nic
*efx
, efx_dword_t
*outbuf
,
1042 size_t offset
, size_t outlen
)
1044 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
1045 const u8
*pdu
= nic_data
->mcdi_buf
.addr
;
1047 memcpy(outbuf
, pdu
+ offset
, outlen
);
1050 static int efx_ef10_mcdi_poll_reboot(struct efx_nic
*efx
)
1052 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
1055 rc
= efx_ef10_get_warm_boot_count(efx
);
1057 /* The firmware is presumably in the process of
1058 * rebooting. However, we are supposed to report each
1059 * reboot just once, so we must only do that once we
1060 * can read and store the updated warm boot count.
1065 if (rc
== nic_data
->warm_boot_count
)
1068 nic_data
->warm_boot_count
= rc
;
1070 /* All our allocations have been reset */
1071 nic_data
->must_realloc_vis
= true;
1072 nic_data
->must_restore_filters
= true;
1073 nic_data
->must_restore_piobufs
= true;
1074 nic_data
->rx_rss_context
= EFX_EF10_RSS_CONTEXT_INVALID
;
1076 /* The datapath firmware might have been changed */
1077 nic_data
->must_check_datapath_caps
= true;
1079 /* MAC statistics have been cleared on the NIC; clear the local
1080 * statistic that we update with efx_update_diff_stat().
1082 nic_data
->stats
[EF10_STAT_rx_bad_bytes
] = 0;
1087 /* Handle an MSI interrupt
1089 * Handle an MSI hardware interrupt. This routine schedules event
1090 * queue processing. No interrupt acknowledgement cycle is necessary.
1091 * Also, we never need to check that the interrupt is for us, since
1092 * MSI interrupts cannot be shared.
1094 static irqreturn_t
efx_ef10_msi_interrupt(int irq
, void *dev_id
)
1096 struct efx_msi_context
*context
= dev_id
;
1097 struct efx_nic
*efx
= context
->efx
;
1099 netif_vdbg(efx
, intr
, efx
->net_dev
,
1100 "IRQ %d on CPU %d\n", irq
, raw_smp_processor_id());
1102 if (likely(ACCESS_ONCE(efx
->irq_soft_enabled
))) {
1103 /* Note test interrupts */
1104 if (context
->index
== efx
->irq_level
)
1105 efx
->last_irq_cpu
= raw_smp_processor_id();
1107 /* Schedule processing of the channel */
1108 efx_schedule_channel_irq(efx
->channel
[context
->index
]);
1114 static irqreturn_t
efx_ef10_legacy_interrupt(int irq
, void *dev_id
)
1116 struct efx_nic
*efx
= dev_id
;
1117 bool soft_enabled
= ACCESS_ONCE(efx
->irq_soft_enabled
);
1118 struct efx_channel
*channel
;
1122 /* Read the ISR which also ACKs the interrupts */
1123 efx_readd(efx
, ®
, ER_DZ_BIU_INT_ISR
);
1124 queues
= EFX_DWORD_FIELD(reg
, ERF_DZ_ISR_REG
);
1129 if (likely(soft_enabled
)) {
1130 /* Note test interrupts */
1131 if (queues
& (1U << efx
->irq_level
))
1132 efx
->last_irq_cpu
= raw_smp_processor_id();
1134 efx_for_each_channel(channel
, efx
) {
1136 efx_schedule_channel_irq(channel
);
1141 netif_vdbg(efx
, intr
, efx
->net_dev
,
1142 "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1143 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1148 static void efx_ef10_irq_test_generate(struct efx_nic
*efx
)
1150 MCDI_DECLARE_BUF(inbuf
, MC_CMD_TRIGGER_INTERRUPT_IN_LEN
);
1152 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN
!= 0);
1154 MCDI_SET_DWORD(inbuf
, TRIGGER_INTERRUPT_IN_INTR_LEVEL
, efx
->irq_level
);
1155 (void) efx_mcdi_rpc(efx
, MC_CMD_TRIGGER_INTERRUPT
,
1156 inbuf
, sizeof(inbuf
), NULL
, 0, NULL
);
1159 static int efx_ef10_tx_probe(struct efx_tx_queue
*tx_queue
)
1161 return efx_nic_alloc_buffer(tx_queue
->efx
, &tx_queue
->txd
.buf
,
1162 (tx_queue
->ptr_mask
+ 1) *
1163 sizeof(efx_qword_t
),
1167 /* This writes to the TX_DESC_WPTR and also pushes data */
1168 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue
*tx_queue
,
1169 const efx_qword_t
*txd
)
1171 unsigned int write_ptr
;
1174 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
1175 EFX_POPULATE_OWORD_1(reg
, ERF_DZ_TX_DESC_WPTR
, write_ptr
);
1176 reg
.qword
[0] = *txd
;
1177 efx_writeo_page(tx_queue
->efx
, ®
,
1178 ER_DZ_TX_DESC_UPD
, tx_queue
->queue
);
1181 static void efx_ef10_tx_init(struct efx_tx_queue
*tx_queue
)
1183 MCDI_DECLARE_BUF(inbuf
, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE
* 8 /
1185 MCDI_DECLARE_BUF(outbuf
, MC_CMD_INIT_TXQ_OUT_LEN
);
1186 bool csum_offload
= tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
;
1187 size_t entries
= tx_queue
->txd
.buf
.len
/ EFX_BUF_SIZE
;
1188 struct efx_channel
*channel
= tx_queue
->channel
;
1189 struct efx_nic
*efx
= tx_queue
->efx
;
1190 size_t inlen
, outlen
;
1191 dma_addr_t dma_addr
;
1196 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_SIZE
, tx_queue
->ptr_mask
+ 1);
1197 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_TARGET_EVQ
, channel
->channel
);
1198 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_LABEL
, tx_queue
->queue
);
1199 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_INSTANCE
, tx_queue
->queue
);
1200 MCDI_POPULATE_DWORD_2(inbuf
, INIT_TXQ_IN_FLAGS
,
1201 INIT_TXQ_IN_FLAG_IP_CSUM_DIS
, !csum_offload
,
1202 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS
, !csum_offload
);
1203 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_OWNER_ID
, 0);
1204 MCDI_SET_DWORD(inbuf
, INIT_TXQ_IN_PORT_ID
, EVB_PORT_ID_ASSIGNED
);
1206 dma_addr
= tx_queue
->txd
.buf
.dma_addr
;
1208 netif_dbg(efx
, hw
, efx
->net_dev
, "pushing TXQ %d. %zu entries (%llx)\n",
1209 tx_queue
->queue
, entries
, (u64
)dma_addr
);
1211 for (i
= 0; i
< entries
; ++i
) {
1212 MCDI_SET_ARRAY_QWORD(inbuf
, INIT_TXQ_IN_DMA_ADDR
, i
, dma_addr
);
1213 dma_addr
+= EFX_BUF_SIZE
;
1216 inlen
= MC_CMD_INIT_TXQ_IN_LEN(entries
);
1218 rc
= efx_mcdi_rpc(efx
, MC_CMD_INIT_TXQ
, inbuf
, inlen
,
1219 outbuf
, sizeof(outbuf
), &outlen
);
1223 /* A previous user of this TX queue might have set us up the
1224 * bomb by writing a descriptor to the TX push collector but
1225 * not the doorbell. (Each collector belongs to a port, not a
1226 * queue or function, so cannot easily be reset.) We must
1227 * attempt to push a no-op descriptor in its place.
1229 tx_queue
->buffer
[0].flags
= EFX_TX_BUF_OPTION
;
1230 tx_queue
->insert_count
= 1;
1231 txd
= efx_tx_desc(tx_queue
, 0);
1232 EFX_POPULATE_QWORD_4(*txd
,
1233 ESF_DZ_TX_DESC_IS_OPT
, true,
1234 ESF_DZ_TX_OPTION_TYPE
,
1235 ESE_DZ_TX_OPTION_DESC_CRC_CSUM
,
1236 ESF_DZ_TX_OPTION_UDP_TCP_CSUM
, csum_offload
,
1237 ESF_DZ_TX_OPTION_IP_CSUM
, csum_offload
);
1238 tx_queue
->write_count
= 1;
1240 efx_ef10_push_tx_desc(tx_queue
, txd
);
1246 netif_err(efx
, hw
, efx
->net_dev
, "%s: failed rc=%d\n", __func__
, rc
);
1249 static void efx_ef10_tx_fini(struct efx_tx_queue
*tx_queue
)
1251 MCDI_DECLARE_BUF(inbuf
, MC_CMD_FINI_TXQ_IN_LEN
);
1252 MCDI_DECLARE_BUF(outbuf
, MC_CMD_FINI_TXQ_OUT_LEN
);
1253 struct efx_nic
*efx
= tx_queue
->efx
;
1257 MCDI_SET_DWORD(inbuf
, FINI_TXQ_IN_INSTANCE
,
1260 rc
= efx_mcdi_rpc(efx
, MC_CMD_FINI_TXQ
, inbuf
, sizeof(inbuf
),
1261 outbuf
, sizeof(outbuf
), &outlen
);
1263 if (rc
&& rc
!= -EALREADY
)
1269 netif_err(efx
, hw
, efx
->net_dev
, "%s: failed rc=%d\n", __func__
, rc
);
1272 static void efx_ef10_tx_remove(struct efx_tx_queue
*tx_queue
)
1274 efx_nic_free_buffer(tx_queue
->efx
, &tx_queue
->txd
.buf
);
1277 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
1278 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
1280 unsigned int write_ptr
;
1283 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
1284 EFX_POPULATE_DWORD_1(reg
, ERF_DZ_TX_DESC_WPTR_DWORD
, write_ptr
);
1285 efx_writed_page(tx_queue
->efx
, ®
,
1286 ER_DZ_TX_DESC_UPD_DWORD
, tx_queue
->queue
);
1289 static void efx_ef10_tx_write(struct efx_tx_queue
*tx_queue
)
1291 unsigned int old_write_count
= tx_queue
->write_count
;
1292 struct efx_tx_buffer
*buffer
;
1293 unsigned int write_ptr
;
1296 BUG_ON(tx_queue
->write_count
== tx_queue
->insert_count
);
1299 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
1300 buffer
= &tx_queue
->buffer
[write_ptr
];
1301 txd
= efx_tx_desc(tx_queue
, write_ptr
);
1302 ++tx_queue
->write_count
;
1304 /* Create TX descriptor ring entry */
1305 if (buffer
->flags
& EFX_TX_BUF_OPTION
) {
1306 *txd
= buffer
->option
;
1308 BUILD_BUG_ON(EFX_TX_BUF_CONT
!= 1);
1309 EFX_POPULATE_QWORD_3(
1312 buffer
->flags
& EFX_TX_BUF_CONT
,
1313 ESF_DZ_TX_KER_BYTE_CNT
, buffer
->len
,
1314 ESF_DZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
1316 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
1318 wmb(); /* Ensure descriptors are written before they are fetched */
1320 if (efx_nic_may_push_tx_desc(tx_queue
, old_write_count
)) {
1321 txd
= efx_tx_desc(tx_queue
,
1322 old_write_count
& tx_queue
->ptr_mask
);
1323 efx_ef10_push_tx_desc(tx_queue
, txd
);
1326 efx_ef10_notify_tx_desc(tx_queue
);
1330 static int efx_ef10_alloc_rss_context(struct efx_nic
*efx
, u32
*context
)
1332 MCDI_DECLARE_BUF(inbuf
, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN
);
1333 MCDI_DECLARE_BUF(outbuf
, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN
);
1337 MCDI_SET_DWORD(inbuf
, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID
,
1338 EVB_PORT_ID_ASSIGNED
);
1339 MCDI_SET_DWORD(inbuf
, RSS_CONTEXT_ALLOC_IN_TYPE
,
1340 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE
);
1341 MCDI_SET_DWORD(inbuf
, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES
,
1344 rc
= efx_mcdi_rpc(efx
, MC_CMD_RSS_CONTEXT_ALLOC
, inbuf
, sizeof(inbuf
),
1345 outbuf
, sizeof(outbuf
), &outlen
);
1349 if (outlen
< MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN
)
1352 *context
= MCDI_DWORD(outbuf
, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID
);
1357 static void efx_ef10_free_rss_context(struct efx_nic
*efx
, u32 context
)
1359 MCDI_DECLARE_BUF(inbuf
, MC_CMD_RSS_CONTEXT_FREE_IN_LEN
);
1362 MCDI_SET_DWORD(inbuf
, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID
,
1365 rc
= efx_mcdi_rpc(efx
, MC_CMD_RSS_CONTEXT_FREE
, inbuf
, sizeof(inbuf
),
1370 static int efx_ef10_populate_rss_table(struct efx_nic
*efx
, u32 context
)
1372 MCDI_DECLARE_BUF(tablebuf
, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN
);
1373 MCDI_DECLARE_BUF(keybuf
, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN
);
1376 MCDI_SET_DWORD(tablebuf
, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID
,
1378 BUILD_BUG_ON(ARRAY_SIZE(efx
->rx_indir_table
) !=
1379 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN
);
1381 for (i
= 0; i
< ARRAY_SIZE(efx
->rx_indir_table
); ++i
)
1383 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE
)[i
] =
1384 (u8
) efx
->rx_indir_table
[i
];
1386 rc
= efx_mcdi_rpc(efx
, MC_CMD_RSS_CONTEXT_SET_TABLE
, tablebuf
,
1387 sizeof(tablebuf
), NULL
, 0, NULL
);
1391 MCDI_SET_DWORD(keybuf
, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID
,
1393 BUILD_BUG_ON(ARRAY_SIZE(efx
->rx_hash_key
) !=
1394 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN
);
1395 for (i
= 0; i
< ARRAY_SIZE(efx
->rx_hash_key
); ++i
)
1396 MCDI_PTR(keybuf
, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY
)[i
] =
1397 efx
->rx_hash_key
[i
];
1399 return efx_mcdi_rpc(efx
, MC_CMD_RSS_CONTEXT_SET_KEY
, keybuf
,
1400 sizeof(keybuf
), NULL
, 0, NULL
);
1403 static void efx_ef10_rx_free_indir_table(struct efx_nic
*efx
)
1405 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
1407 if (nic_data
->rx_rss_context
!= EFX_EF10_RSS_CONTEXT_INVALID
)
1408 efx_ef10_free_rss_context(efx
, nic_data
->rx_rss_context
);
1409 nic_data
->rx_rss_context
= EFX_EF10_RSS_CONTEXT_INVALID
;
1412 static void efx_ef10_rx_push_indir_table(struct efx_nic
*efx
)
1414 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
1417 netif_dbg(efx
, drv
, efx
->net_dev
, "pushing RX indirection table\n");
1419 if (nic_data
->rx_rss_context
== EFX_EF10_RSS_CONTEXT_INVALID
) {
1420 rc
= efx_ef10_alloc_rss_context(efx
, &nic_data
->rx_rss_context
);
1425 rc
= efx_ef10_populate_rss_table(efx
, nic_data
->rx_rss_context
);
1432 netif_err(efx
, hw
, efx
->net_dev
, "%s: failed rc=%d\n", __func__
, rc
);
1435 static int efx_ef10_rx_probe(struct efx_rx_queue
*rx_queue
)
1437 return efx_nic_alloc_buffer(rx_queue
->efx
, &rx_queue
->rxd
.buf
,
1438 (rx_queue
->ptr_mask
+ 1) *
1439 sizeof(efx_qword_t
),
1443 static void efx_ef10_rx_init(struct efx_rx_queue
*rx_queue
)
1445 MCDI_DECLARE_BUF(inbuf
,
1446 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE
* 8 /
1448 MCDI_DECLARE_BUF(outbuf
, MC_CMD_INIT_RXQ_OUT_LEN
);
1449 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
1450 size_t entries
= rx_queue
->rxd
.buf
.len
/ EFX_BUF_SIZE
;
1451 struct efx_nic
*efx
= rx_queue
->efx
;
1452 size_t inlen
, outlen
;
1453 dma_addr_t dma_addr
;
1457 rx_queue
->scatter_n
= 0;
1458 rx_queue
->scatter_len
= 0;
1460 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_SIZE
, rx_queue
->ptr_mask
+ 1);
1461 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_TARGET_EVQ
, channel
->channel
);
1462 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_LABEL
, efx_rx_queue_index(rx_queue
));
1463 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_INSTANCE
,
1464 efx_rx_queue_index(rx_queue
));
1465 MCDI_POPULATE_DWORD_1(inbuf
, INIT_RXQ_IN_FLAGS
,
1466 INIT_RXQ_IN_FLAG_PREFIX
, 1);
1467 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_OWNER_ID
, 0);
1468 MCDI_SET_DWORD(inbuf
, INIT_RXQ_IN_PORT_ID
, EVB_PORT_ID_ASSIGNED
);
1470 dma_addr
= rx_queue
->rxd
.buf
.dma_addr
;
1472 netif_dbg(efx
, hw
, efx
->net_dev
, "pushing RXQ %d. %zu entries (%llx)\n",
1473 efx_rx_queue_index(rx_queue
), entries
, (u64
)dma_addr
);
1475 for (i
= 0; i
< entries
; ++i
) {
1476 MCDI_SET_ARRAY_QWORD(inbuf
, INIT_RXQ_IN_DMA_ADDR
, i
, dma_addr
);
1477 dma_addr
+= EFX_BUF_SIZE
;
1480 inlen
= MC_CMD_INIT_RXQ_IN_LEN(entries
);
1482 rc
= efx_mcdi_rpc(efx
, MC_CMD_INIT_RXQ
, inbuf
, inlen
,
1483 outbuf
, sizeof(outbuf
), &outlen
);
1491 netif_err(efx
, hw
, efx
->net_dev
, "%s: failed rc=%d\n", __func__
, rc
);
1494 static void efx_ef10_rx_fini(struct efx_rx_queue
*rx_queue
)
1496 MCDI_DECLARE_BUF(inbuf
, MC_CMD_FINI_RXQ_IN_LEN
);
1497 MCDI_DECLARE_BUF(outbuf
, MC_CMD_FINI_RXQ_OUT_LEN
);
1498 struct efx_nic
*efx
= rx_queue
->efx
;
1502 MCDI_SET_DWORD(inbuf
, FINI_RXQ_IN_INSTANCE
,
1503 efx_rx_queue_index(rx_queue
));
1505 rc
= efx_mcdi_rpc(efx
, MC_CMD_FINI_RXQ
, inbuf
, sizeof(inbuf
),
1506 outbuf
, sizeof(outbuf
), &outlen
);
1508 if (rc
&& rc
!= -EALREADY
)
1514 netif_err(efx
, hw
, efx
->net_dev
, "%s: failed rc=%d\n", __func__
, rc
);
1517 static void efx_ef10_rx_remove(struct efx_rx_queue
*rx_queue
)
1519 efx_nic_free_buffer(rx_queue
->efx
, &rx_queue
->rxd
.buf
);
1522 /* This creates an entry in the RX descriptor queue */
1524 efx_ef10_build_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned int index
)
1526 struct efx_rx_buffer
*rx_buf
;
1529 rxd
= efx_rx_desc(rx_queue
, index
);
1530 rx_buf
= efx_rx_buffer(rx_queue
, index
);
1531 EFX_POPULATE_QWORD_2(*rxd
,
1532 ESF_DZ_RX_KER_BYTE_CNT
, rx_buf
->len
,
1533 ESF_DZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
1536 static void efx_ef10_rx_write(struct efx_rx_queue
*rx_queue
)
1538 struct efx_nic
*efx
= rx_queue
->efx
;
1539 unsigned int write_count
;
1542 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
1543 write_count
= rx_queue
->added_count
& ~7;
1544 if (rx_queue
->notified_count
== write_count
)
1548 efx_ef10_build_rx_desc(
1550 rx_queue
->notified_count
& rx_queue
->ptr_mask
);
1551 while (++rx_queue
->notified_count
!= write_count
);
1554 EFX_POPULATE_DWORD_1(reg
, ERF_DZ_RX_DESC_WPTR
,
1555 write_count
& rx_queue
->ptr_mask
);
1556 efx_writed_page(efx
, ®
, ER_DZ_RX_DESC_UPD
,
1557 efx_rx_queue_index(rx_queue
));
1560 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete
;
1562 static void efx_ef10_rx_defer_refill(struct efx_rx_queue
*rx_queue
)
1564 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
1565 MCDI_DECLARE_BUF(inbuf
, MC_CMD_DRIVER_EVENT_IN_LEN
);
1568 EFX_POPULATE_QWORD_2(event
,
1569 ESF_DZ_EV_CODE
, EFX_EF10_DRVGEN_EV
,
1570 ESF_DZ_EV_DATA
, EFX_EF10_REFILL
);
1572 MCDI_SET_DWORD(inbuf
, DRIVER_EVENT_IN_EVQ
, channel
->channel
);
1574 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1575 * already swapped the data to little-endian order.
1577 memcpy(MCDI_PTR(inbuf
, DRIVER_EVENT_IN_DATA
), &event
.u64
[0],
1578 sizeof(efx_qword_t
));
1580 efx_mcdi_rpc_async(channel
->efx
, MC_CMD_DRIVER_EVENT
,
1581 inbuf
, sizeof(inbuf
), 0,
1582 efx_ef10_rx_defer_refill_complete
, 0);
1586 efx_ef10_rx_defer_refill_complete(struct efx_nic
*efx
, unsigned long cookie
,
1587 int rc
, efx_dword_t
*outbuf
,
1588 size_t outlen_actual
)
1593 static int efx_ef10_ev_probe(struct efx_channel
*channel
)
1595 return efx_nic_alloc_buffer(channel
->efx
, &channel
->eventq
.buf
,
1596 (channel
->eventq_mask
+ 1) *
1597 sizeof(efx_qword_t
),
1601 static int efx_ef10_ev_init(struct efx_channel
*channel
)
1603 MCDI_DECLARE_BUF(inbuf
,
1604 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE
* 8 /
1606 MCDI_DECLARE_BUF(outbuf
, MC_CMD_INIT_EVQ_OUT_LEN
);
1607 size_t entries
= channel
->eventq
.buf
.len
/ EFX_BUF_SIZE
;
1608 struct efx_nic
*efx
= channel
->efx
;
1609 struct efx_ef10_nic_data
*nic_data
;
1610 bool supports_rx_merge
;
1611 size_t inlen
, outlen
;
1612 dma_addr_t dma_addr
;
1616 nic_data
= efx
->nic_data
;
1618 !!(nic_data
->datapath_caps
&
1619 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN
);
1621 /* Fill event queue with all ones (i.e. empty events) */
1622 memset(channel
->eventq
.buf
.addr
, 0xff, channel
->eventq
.buf
.len
);
1624 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_SIZE
, channel
->eventq_mask
+ 1);
1625 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_INSTANCE
, channel
->channel
);
1626 /* INIT_EVQ expects index in vector table, not absolute */
1627 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_IRQ_NUM
, channel
->channel
);
1628 MCDI_POPULATE_DWORD_4(inbuf
, INIT_EVQ_IN_FLAGS
,
1629 INIT_EVQ_IN_FLAG_INTERRUPTING
, 1,
1630 INIT_EVQ_IN_FLAG_RX_MERGE
, 1,
1631 INIT_EVQ_IN_FLAG_TX_MERGE
, 1,
1632 INIT_EVQ_IN_FLAG_CUT_THRU
, !supports_rx_merge
);
1633 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_TMR_MODE
,
1634 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS
);
1635 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_TMR_LOAD
, 0);
1636 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_TMR_RELOAD
, 0);
1637 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_COUNT_MODE
,
1638 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS
);
1639 MCDI_SET_DWORD(inbuf
, INIT_EVQ_IN_COUNT_THRSHLD
, 0);
1641 dma_addr
= channel
->eventq
.buf
.dma_addr
;
1642 for (i
= 0; i
< entries
; ++i
) {
1643 MCDI_SET_ARRAY_QWORD(inbuf
, INIT_EVQ_IN_DMA_ADDR
, i
, dma_addr
);
1644 dma_addr
+= EFX_BUF_SIZE
;
1647 inlen
= MC_CMD_INIT_EVQ_IN_LEN(entries
);
1649 rc
= efx_mcdi_rpc(efx
, MC_CMD_INIT_EVQ
, inbuf
, inlen
,
1650 outbuf
, sizeof(outbuf
), &outlen
);
1654 /* IRQ return is ignored */
1659 netif_err(efx
, hw
, efx
->net_dev
, "%s: failed rc=%d\n", __func__
, rc
);
1663 static void efx_ef10_ev_fini(struct efx_channel
*channel
)
1665 MCDI_DECLARE_BUF(inbuf
, MC_CMD_FINI_EVQ_IN_LEN
);
1666 MCDI_DECLARE_BUF(outbuf
, MC_CMD_FINI_EVQ_OUT_LEN
);
1667 struct efx_nic
*efx
= channel
->efx
;
1671 MCDI_SET_DWORD(inbuf
, FINI_EVQ_IN_INSTANCE
, channel
->channel
);
1673 rc
= efx_mcdi_rpc(efx
, MC_CMD_FINI_EVQ
, inbuf
, sizeof(inbuf
),
1674 outbuf
, sizeof(outbuf
), &outlen
);
1676 if (rc
&& rc
!= -EALREADY
)
1682 netif_err(efx
, hw
, efx
->net_dev
, "%s: failed rc=%d\n", __func__
, rc
);
1685 static void efx_ef10_ev_remove(struct efx_channel
*channel
)
1687 efx_nic_free_buffer(channel
->efx
, &channel
->eventq
.buf
);
1690 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue
*rx_queue
,
1691 unsigned int rx_queue_label
)
1693 struct efx_nic
*efx
= rx_queue
->efx
;
1695 netif_info(efx
, hw
, efx
->net_dev
,
1696 "rx event arrived on queue %d labeled as queue %u\n",
1697 efx_rx_queue_index(rx_queue
), rx_queue_label
);
1699 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1703 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue
*rx_queue
,
1704 unsigned int actual
, unsigned int expected
)
1706 unsigned int dropped
= (actual
- expected
) & rx_queue
->ptr_mask
;
1707 struct efx_nic
*efx
= rx_queue
->efx
;
1709 netif_info(efx
, hw
, efx
->net_dev
,
1710 "dropped %d events (index=%d expected=%d)\n",
1711 dropped
, actual
, expected
);
1713 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1716 /* partially received RX was aborted. clean up. */
1717 static void efx_ef10_handle_rx_abort(struct efx_rx_queue
*rx_queue
)
1719 unsigned int rx_desc_ptr
;
1721 WARN_ON(rx_queue
->scatter_n
== 0);
1723 netif_dbg(rx_queue
->efx
, hw
, rx_queue
->efx
->net_dev
,
1724 "scattered RX aborted (dropping %u buffers)\n",
1725 rx_queue
->scatter_n
);
1727 rx_desc_ptr
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
1729 efx_rx_packet(rx_queue
, rx_desc_ptr
, rx_queue
->scatter_n
,
1730 0, EFX_RX_PKT_DISCARD
);
1732 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1733 rx_queue
->scatter_n
= 0;
1734 rx_queue
->scatter_len
= 0;
1735 ++efx_rx_queue_channel(rx_queue
)->n_rx_nodesc_trunc
;
1738 static int efx_ef10_handle_rx_event(struct efx_channel
*channel
,
1739 const efx_qword_t
*event
)
1741 unsigned int rx_bytes
, next_ptr_lbits
, rx_queue_label
, rx_l4_class
;
1742 unsigned int n_descs
, n_packets
, i
;
1743 struct efx_nic
*efx
= channel
->efx
;
1744 struct efx_rx_queue
*rx_queue
;
1748 if (unlikely(ACCESS_ONCE(efx
->reset_pending
)))
1751 /* Basic packet information */
1752 rx_bytes
= EFX_QWORD_FIELD(*event
, ESF_DZ_RX_BYTES
);
1753 next_ptr_lbits
= EFX_QWORD_FIELD(*event
, ESF_DZ_RX_DSC_PTR_LBITS
);
1754 rx_queue_label
= EFX_QWORD_FIELD(*event
, ESF_DZ_RX_QLABEL
);
1755 rx_l4_class
= EFX_QWORD_FIELD(*event
, ESF_DZ_RX_L4_CLASS
);
1756 rx_cont
= EFX_QWORD_FIELD(*event
, ESF_DZ_RX_CONT
);
1758 WARN_ON(EFX_QWORD_FIELD(*event
, ESF_DZ_RX_DROP_EVENT
));
1760 rx_queue
= efx_channel_get_rx_queue(channel
);
1762 if (unlikely(rx_queue_label
!= efx_rx_queue_index(rx_queue
)))
1763 efx_ef10_handle_rx_wrong_queue(rx_queue
, rx_queue_label
);
1765 n_descs
= ((next_ptr_lbits
- rx_queue
->removed_count
) &
1766 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH
) - 1));
1768 if (n_descs
!= rx_queue
->scatter_n
+ 1) {
1769 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
1771 /* detect rx abort */
1772 if (unlikely(n_descs
== rx_queue
->scatter_n
)) {
1773 WARN_ON(rx_bytes
!= 0);
1774 efx_ef10_handle_rx_abort(rx_queue
);
1778 /* Check that RX completion merging is valid, i.e.
1779 * the current firmware supports it and this is a
1780 * non-scattered packet.
1782 if (!(nic_data
->datapath_caps
&
1783 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN
)) ||
1784 rx_queue
->scatter_n
!= 0 || rx_cont
) {
1785 efx_ef10_handle_rx_bad_lbits(
1786 rx_queue
, next_ptr_lbits
,
1787 (rx_queue
->removed_count
+
1788 rx_queue
->scatter_n
+ 1) &
1789 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH
) - 1));
1793 /* Merged completion for multiple non-scattered packets */
1794 rx_queue
->scatter_n
= 1;
1795 rx_queue
->scatter_len
= 0;
1796 n_packets
= n_descs
;
1797 ++channel
->n_rx_merge_events
;
1798 channel
->n_rx_merge_packets
+= n_packets
;
1799 flags
|= EFX_RX_PKT_PREFIX_LEN
;
1801 ++rx_queue
->scatter_n
;
1802 rx_queue
->scatter_len
+= rx_bytes
;
1808 if (unlikely(EFX_QWORD_FIELD(*event
, ESF_DZ_RX_ECRC_ERR
)))
1809 flags
|= EFX_RX_PKT_DISCARD
;
1811 if (unlikely(EFX_QWORD_FIELD(*event
, ESF_DZ_RX_IPCKSUM_ERR
))) {
1812 channel
->n_rx_ip_hdr_chksum_err
+= n_packets
;
1813 } else if (unlikely(EFX_QWORD_FIELD(*event
,
1814 ESF_DZ_RX_TCPUDP_CKSUM_ERR
))) {
1815 channel
->n_rx_tcp_udp_chksum_err
+= n_packets
;
1816 } else if (rx_l4_class
== ESE_DZ_L4_CLASS_TCP
||
1817 rx_l4_class
== ESE_DZ_L4_CLASS_UDP
) {
1818 flags
|= EFX_RX_PKT_CSUMMED
;
1821 if (rx_l4_class
== ESE_DZ_L4_CLASS_TCP
)
1822 flags
|= EFX_RX_PKT_TCP
;
1824 channel
->irq_mod_score
+= 2 * n_packets
;
1826 /* Handle received packet(s) */
1827 for (i
= 0; i
< n_packets
; i
++) {
1828 efx_rx_packet(rx_queue
,
1829 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1830 rx_queue
->scatter_n
, rx_queue
->scatter_len
,
1832 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1835 rx_queue
->scatter_n
= 0;
1836 rx_queue
->scatter_len
= 0;
1842 efx_ef10_handle_tx_event(struct efx_channel
*channel
, efx_qword_t
*event
)
1844 struct efx_nic
*efx
= channel
->efx
;
1845 struct efx_tx_queue
*tx_queue
;
1846 unsigned int tx_ev_desc_ptr
;
1847 unsigned int tx_ev_q_label
;
1850 if (unlikely(ACCESS_ONCE(efx
->reset_pending
)))
1853 if (unlikely(EFX_QWORD_FIELD(*event
, ESF_DZ_TX_DROP_EVENT
)))
1856 /* Transmit completion */
1857 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, ESF_DZ_TX_DESCR_INDX
);
1858 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, ESF_DZ_TX_QLABEL
);
1859 tx_queue
= efx_channel_get_tx_queue(channel
,
1860 tx_ev_q_label
% EFX_TXQ_TYPES
);
1861 tx_descs
= ((tx_ev_desc_ptr
+ 1 - tx_queue
->read_count
) &
1862 tx_queue
->ptr_mask
);
1863 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
& tx_queue
->ptr_mask
);
1869 efx_ef10_handle_driver_event(struct efx_channel
*channel
, efx_qword_t
*event
)
1871 struct efx_nic
*efx
= channel
->efx
;
1874 subcode
= EFX_QWORD_FIELD(*event
, ESF_DZ_DRV_SUB_CODE
);
1877 case ESE_DZ_DRV_TIMER_EV
:
1878 case ESE_DZ_DRV_WAKE_UP_EV
:
1880 case ESE_DZ_DRV_START_UP_EV
:
1881 /* event queue init complete. ok. */
1884 netif_err(efx
, hw
, efx
->net_dev
,
1885 "channel %d unknown driver event type %d"
1886 " (data " EFX_QWORD_FMT
")\n",
1887 channel
->channel
, subcode
,
1888 EFX_QWORD_VAL(*event
));
1893 static void efx_ef10_handle_driver_generated_event(struct efx_channel
*channel
,
1896 struct efx_nic
*efx
= channel
->efx
;
1899 subcode
= EFX_QWORD_FIELD(*event
, EFX_DWORD_0
);
1903 channel
->event_test_cpu
= raw_smp_processor_id();
1905 case EFX_EF10_REFILL
:
1906 /* The queue must be empty, so we won't receive any rx
1907 * events, so efx_process_channel() won't refill the
1908 * queue. Refill it here
1910 efx_fast_push_rx_descriptors(&channel
->rx_queue
, true);
1913 netif_err(efx
, hw
, efx
->net_dev
,
1914 "channel %d unknown driver event type %u"
1915 " (data " EFX_QWORD_FMT
")\n",
1916 channel
->channel
, (unsigned) subcode
,
1917 EFX_QWORD_VAL(*event
));
1921 static int efx_ef10_ev_process(struct efx_channel
*channel
, int quota
)
1923 struct efx_nic
*efx
= channel
->efx
;
1924 efx_qword_t event
, *p_event
;
1925 unsigned int read_ptr
;
1930 read_ptr
= channel
->eventq_read_ptr
;
1933 p_event
= efx_event(channel
, read_ptr
);
1936 if (!efx_event_present(&event
))
1939 EFX_SET_QWORD(*p_event
);
1943 ev_code
= EFX_QWORD_FIELD(event
, ESF_DZ_EV_CODE
);
1945 netif_vdbg(efx
, drv
, efx
->net_dev
,
1946 "processing event on %d " EFX_QWORD_FMT
"\n",
1947 channel
->channel
, EFX_QWORD_VAL(event
));
1950 case ESE_DZ_EV_CODE_MCDI_EV
:
1951 efx_mcdi_process_event(channel
, &event
);
1953 case ESE_DZ_EV_CODE_RX_EV
:
1954 spent
+= efx_ef10_handle_rx_event(channel
, &event
);
1955 if (spent
>= quota
) {
1956 /* XXX can we split a merged event to
1957 * avoid going over-quota?
1963 case ESE_DZ_EV_CODE_TX_EV
:
1964 tx_descs
+= efx_ef10_handle_tx_event(channel
, &event
);
1965 if (tx_descs
> efx
->txq_entries
) {
1968 } else if (++spent
== quota
) {
1972 case ESE_DZ_EV_CODE_DRIVER_EV
:
1973 efx_ef10_handle_driver_event(channel
, &event
);
1974 if (++spent
== quota
)
1977 case EFX_EF10_DRVGEN_EV
:
1978 efx_ef10_handle_driver_generated_event(channel
, &event
);
1981 netif_err(efx
, hw
, efx
->net_dev
,
1982 "channel %d unknown event type %d"
1983 " (data " EFX_QWORD_FMT
")\n",
1984 channel
->channel
, ev_code
,
1985 EFX_QWORD_VAL(event
));
1990 channel
->eventq_read_ptr
= read_ptr
;
1994 static void efx_ef10_ev_read_ack(struct efx_channel
*channel
)
1996 struct efx_nic
*efx
= channel
->efx
;
1999 if (EFX_EF10_WORKAROUND_35388(efx
)) {
2000 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE
<
2001 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH
));
2002 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE
>
2003 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH
));
2005 EFX_POPULATE_DWORD_2(rptr
, ERF_DD_EVQ_IND_RPTR_FLAGS
,
2006 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH
,
2007 ERF_DD_EVQ_IND_RPTR
,
2008 (channel
->eventq_read_ptr
&
2009 channel
->eventq_mask
) >>
2010 ERF_DD_EVQ_IND_RPTR_WIDTH
);
2011 efx_writed_page(efx
, &rptr
, ER_DD_EVQ_INDIRECT
,
2013 EFX_POPULATE_DWORD_2(rptr
, ERF_DD_EVQ_IND_RPTR_FLAGS
,
2014 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW
,
2015 ERF_DD_EVQ_IND_RPTR
,
2016 channel
->eventq_read_ptr
&
2017 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH
) - 1));
2018 efx_writed_page(efx
, &rptr
, ER_DD_EVQ_INDIRECT
,
2021 EFX_POPULATE_DWORD_1(rptr
, ERF_DZ_EVQ_RPTR
,
2022 channel
->eventq_read_ptr
&
2023 channel
->eventq_mask
);
2024 efx_writed_page(efx
, &rptr
, ER_DZ_EVQ_RPTR
, channel
->channel
);
2028 static void efx_ef10_ev_test_generate(struct efx_channel
*channel
)
2030 MCDI_DECLARE_BUF(inbuf
, MC_CMD_DRIVER_EVENT_IN_LEN
);
2031 struct efx_nic
*efx
= channel
->efx
;
2035 EFX_POPULATE_QWORD_2(event
,
2036 ESF_DZ_EV_CODE
, EFX_EF10_DRVGEN_EV
,
2037 ESF_DZ_EV_DATA
, EFX_EF10_TEST
);
2039 MCDI_SET_DWORD(inbuf
, DRIVER_EVENT_IN_EVQ
, channel
->channel
);
2041 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2042 * already swapped the data to little-endian order.
2044 memcpy(MCDI_PTR(inbuf
, DRIVER_EVENT_IN_DATA
), &event
.u64
[0],
2045 sizeof(efx_qword_t
));
2047 rc
= efx_mcdi_rpc(efx
, MC_CMD_DRIVER_EVENT
, inbuf
, sizeof(inbuf
),
2056 netif_err(efx
, hw
, efx
->net_dev
, "%s: failed rc=%d\n", __func__
, rc
);
2059 void efx_ef10_handle_drain_event(struct efx_nic
*efx
)
2061 if (atomic_dec_and_test(&efx
->active_queues
))
2062 wake_up(&efx
->flush_wq
);
2064 WARN_ON(atomic_read(&efx
->active_queues
) < 0);
2067 static int efx_ef10_fini_dmaq(struct efx_nic
*efx
)
2069 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
2070 struct efx_channel
*channel
;
2071 struct efx_tx_queue
*tx_queue
;
2072 struct efx_rx_queue
*rx_queue
;
2075 /* If the MC has just rebooted, the TX/RX queues will have already been
2076 * torn down, but efx->active_queues needs to be set to zero.
2078 if (nic_data
->must_realloc_vis
) {
2079 atomic_set(&efx
->active_queues
, 0);
2083 /* Do not attempt to write to the NIC during EEH recovery */
2084 if (efx
->state
!= STATE_RECOVERY
) {
2085 efx_for_each_channel(channel
, efx
) {
2086 efx_for_each_channel_rx_queue(rx_queue
, channel
)
2087 efx_ef10_rx_fini(rx_queue
);
2088 efx_for_each_channel_tx_queue(tx_queue
, channel
)
2089 efx_ef10_tx_fini(tx_queue
);
2092 wait_event_timeout(efx
->flush_wq
,
2093 atomic_read(&efx
->active_queues
) == 0,
2094 msecs_to_jiffies(EFX_MAX_FLUSH_TIME
));
2095 pending
= atomic_read(&efx
->active_queues
);
2097 netif_err(efx
, hw
, efx
->net_dev
, "failed to flush %d queues\n",
2106 static bool efx_ef10_filter_equal(const struct efx_filter_spec
*left
,
2107 const struct efx_filter_spec
*right
)
2109 if ((left
->match_flags
^ right
->match_flags
) |
2110 ((left
->flags
^ right
->flags
) &
2111 (EFX_FILTER_FLAG_RX
| EFX_FILTER_FLAG_TX
)))
2114 return memcmp(&left
->outer_vid
, &right
->outer_vid
,
2115 sizeof(struct efx_filter_spec
) -
2116 offsetof(struct efx_filter_spec
, outer_vid
)) == 0;
2119 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec
*spec
)
2121 BUILD_BUG_ON(offsetof(struct efx_filter_spec
, outer_vid
) & 3);
2122 return jhash2((const u32
*)&spec
->outer_vid
,
2123 (sizeof(struct efx_filter_spec
) -
2124 offsetof(struct efx_filter_spec
, outer_vid
)) / 4,
2126 /* XXX should we randomise the initval? */
2129 /* Decide whether a filter should be exclusive or else should allow
2130 * delivery to additional recipients. Currently we decide that
2131 * filters for specific local unicast MAC and IP addresses are
2134 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec
*spec
)
2136 if (spec
->match_flags
& EFX_FILTER_MATCH_LOC_MAC
&&
2137 !is_multicast_ether_addr(spec
->loc_mac
))
2140 if ((spec
->match_flags
&
2141 (EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_LOC_HOST
)) ==
2142 (EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_LOC_HOST
)) {
2143 if (spec
->ether_type
== htons(ETH_P_IP
) &&
2144 !ipv4_is_multicast(spec
->loc_host
[0]))
2146 if (spec
->ether_type
== htons(ETH_P_IPV6
) &&
2147 ((const u8
*)spec
->loc_host
)[0] != 0xff)
2154 static struct efx_filter_spec
*
2155 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table
*table
,
2156 unsigned int filter_idx
)
2158 return (struct efx_filter_spec
*)(table
->entry
[filter_idx
].spec
&
2159 ~EFX_EF10_FILTER_FLAGS
);
2163 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table
*table
,
2164 unsigned int filter_idx
)
2166 return table
->entry
[filter_idx
].spec
& EFX_EF10_FILTER_FLAGS
;
2170 efx_ef10_filter_set_entry(struct efx_ef10_filter_table
*table
,
2171 unsigned int filter_idx
,
2172 const struct efx_filter_spec
*spec
,
2175 table
->entry
[filter_idx
].spec
= (unsigned long)spec
| flags
;
2178 static void efx_ef10_filter_push_prep(struct efx_nic
*efx
,
2179 const struct efx_filter_spec
*spec
,
2180 efx_dword_t
*inbuf
, u64 handle
,
2183 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
2185 memset(inbuf
, 0, MC_CMD_FILTER_OP_IN_LEN
);
2188 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_OP
,
2189 MC_CMD_FILTER_OP_IN_OP_REPLACE
);
2190 MCDI_SET_QWORD(inbuf
, FILTER_OP_IN_HANDLE
, handle
);
2192 u32 match_fields
= 0;
2194 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_OP
,
2195 efx_ef10_filter_is_exclusive(spec
) ?
2196 MC_CMD_FILTER_OP_IN_OP_INSERT
:
2197 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE
);
2199 /* Convert match flags and values. Unlike almost
2200 * everything else in MCDI, these fields are in
2201 * network byte order.
2203 if (spec
->match_flags
& EFX_FILTER_MATCH_LOC_MAC_IG
)
2205 is_multicast_ether_addr(spec
->loc_mac
) ?
2206 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN
:
2207 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN
;
2208 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
2209 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
2211 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2212 mcdi_field ## _LBN; \
2214 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
2215 sizeof(spec->gen_field)); \
2216 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
2217 &spec->gen_field, sizeof(spec->gen_field)); \
2219 COPY_FIELD(REM_HOST
, rem_host
, SRC_IP
);
2220 COPY_FIELD(LOC_HOST
, loc_host
, DST_IP
);
2221 COPY_FIELD(REM_MAC
, rem_mac
, SRC_MAC
);
2222 COPY_FIELD(REM_PORT
, rem_port
, SRC_PORT
);
2223 COPY_FIELD(LOC_MAC
, loc_mac
, DST_MAC
);
2224 COPY_FIELD(LOC_PORT
, loc_port
, DST_PORT
);
2225 COPY_FIELD(ETHER_TYPE
, ether_type
, ETHER_TYPE
);
2226 COPY_FIELD(INNER_VID
, inner_vid
, INNER_VLAN
);
2227 COPY_FIELD(OUTER_VID
, outer_vid
, OUTER_VLAN
);
2228 COPY_FIELD(IP_PROTO
, ip_proto
, IP_PROTO
);
2230 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_MATCH_FIELDS
,
2234 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_PORT_ID
, EVB_PORT_ID_ASSIGNED
);
2235 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_RX_DEST
,
2236 spec
->dmaq_id
== EFX_FILTER_RX_DMAQ_ID_DROP
?
2237 MC_CMD_FILTER_OP_IN_RX_DEST_DROP
:
2238 MC_CMD_FILTER_OP_IN_RX_DEST_HOST
);
2239 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_TX_DEST
,
2240 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT
);
2241 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_RX_QUEUE
, spec
->dmaq_id
);
2242 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_RX_MODE
,
2243 (spec
->flags
& EFX_FILTER_FLAG_RX_RSS
) ?
2244 MC_CMD_FILTER_OP_IN_RX_MODE_RSS
:
2245 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE
);
2246 if (spec
->flags
& EFX_FILTER_FLAG_RX_RSS
)
2247 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_RX_CONTEXT
,
2248 spec
->rss_context
!=
2249 EFX_FILTER_RSS_CONTEXT_DEFAULT
?
2250 spec
->rss_context
: nic_data
->rx_rss_context
);
2253 static int efx_ef10_filter_push(struct efx_nic
*efx
,
2254 const struct efx_filter_spec
*spec
,
2255 u64
*handle
, bool replacing
)
2257 MCDI_DECLARE_BUF(inbuf
, MC_CMD_FILTER_OP_IN_LEN
);
2258 MCDI_DECLARE_BUF(outbuf
, MC_CMD_FILTER_OP_OUT_LEN
);
2261 efx_ef10_filter_push_prep(efx
, spec
, inbuf
, *handle
, replacing
);
2262 rc
= efx_mcdi_rpc(efx
, MC_CMD_FILTER_OP
, inbuf
, sizeof(inbuf
),
2263 outbuf
, sizeof(outbuf
), NULL
);
2265 *handle
= MCDI_QWORD(outbuf
, FILTER_OP_OUT_HANDLE
);
2269 static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table
*table
,
2270 enum efx_filter_match_flags match_flags
)
2272 unsigned int match_pri
;
2275 match_pri
< table
->rx_match_count
;
2277 if (table
->rx_match_flags
[match_pri
] == match_flags
)
2280 return -EPROTONOSUPPORT
;
2283 static s32
efx_ef10_filter_insert(struct efx_nic
*efx
,
2284 struct efx_filter_spec
*spec
,
2287 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
2288 DECLARE_BITMAP(mc_rem_map
, EFX_EF10_FILTER_SEARCH_LIMIT
);
2289 struct efx_filter_spec
*saved_spec
;
2290 unsigned int match_pri
, hash
;
2291 unsigned int priv_flags
;
2292 bool replacing
= false;
2298 /* For now, only support RX filters */
2299 if ((spec
->flags
& (EFX_FILTER_FLAG_RX
| EFX_FILTER_FLAG_TX
)) !=
2303 rc
= efx_ef10_filter_rx_match_pri(table
, spec
->match_flags
);
2308 hash
= efx_ef10_filter_hash(spec
);
2309 is_mc_recip
= efx_filter_is_mc_recipient(spec
);
2311 bitmap_zero(mc_rem_map
, EFX_EF10_FILTER_SEARCH_LIMIT
);
2313 /* Find any existing filters with the same match tuple or
2314 * else a free slot to insert at. If any of them are busy,
2315 * we have to wait and retry.
2318 unsigned int depth
= 1;
2321 spin_lock_bh(&efx
->filter_lock
);
2324 i
= (hash
+ depth
) & (HUNT_FILTER_TBL_ROWS
- 1);
2325 saved_spec
= efx_ef10_filter_entry_spec(table
, i
);
2330 } else if (efx_ef10_filter_equal(spec
, saved_spec
)) {
2331 if (table
->entry
[i
].spec
&
2332 EFX_EF10_FILTER_FLAG_BUSY
)
2334 if (spec
->priority
< saved_spec
->priority
&&
2335 !(saved_spec
->priority
==
2336 EFX_FILTER_PRI_REQUIRED
&&
2338 EFX_FILTER_FLAG_RX_STACK
)) {
2343 /* This is the only one */
2344 if (spec
->priority
==
2345 saved_spec
->priority
&&
2352 } else if (spec
->priority
>
2353 saved_spec
->priority
||
2355 saved_spec
->priority
&&
2360 __set_bit(depth
, mc_rem_map
);
2364 /* Once we reach the maximum search depth, use
2365 * the first suitable slot or return -EBUSY if
2368 if (depth
== EFX_EF10_FILTER_SEARCH_LIMIT
) {
2369 if (ins_index
< 0) {
2379 prepare_to_wait(&table
->waitq
, &wait
, TASK_UNINTERRUPTIBLE
);
2380 spin_unlock_bh(&efx
->filter_lock
);
2385 /* Create a software table entry if necessary, and mark it
2386 * busy. We might yet fail to insert, but any attempt to
2387 * insert a conflicting filter while we're waiting for the
2388 * firmware must find the busy entry.
2390 saved_spec
= efx_ef10_filter_entry_spec(table
, ins_index
);
2392 if (spec
->flags
& EFX_FILTER_FLAG_RX_STACK
) {
2393 /* Just make sure it won't be removed */
2394 saved_spec
->flags
|= EFX_FILTER_FLAG_RX_STACK
;
2395 table
->entry
[ins_index
].spec
&=
2396 ~EFX_EF10_FILTER_FLAG_STACK_OLD
;
2401 priv_flags
= efx_ef10_filter_entry_flags(table
, ins_index
);
2403 saved_spec
= kmalloc(sizeof(*spec
), GFP_ATOMIC
);
2408 *saved_spec
= *spec
;
2411 efx_ef10_filter_set_entry(table
, ins_index
, saved_spec
,
2412 priv_flags
| EFX_EF10_FILTER_FLAG_BUSY
);
2414 /* Mark lower-priority multicast recipients busy prior to removal */
2416 unsigned int depth
, i
;
2418 for (depth
= 0; depth
< EFX_EF10_FILTER_SEARCH_LIMIT
; depth
++) {
2419 i
= (hash
+ depth
) & (HUNT_FILTER_TBL_ROWS
- 1);
2420 if (test_bit(depth
, mc_rem_map
))
2421 table
->entry
[i
].spec
|=
2422 EFX_EF10_FILTER_FLAG_BUSY
;
2426 spin_unlock_bh(&efx
->filter_lock
);
2428 rc
= efx_ef10_filter_push(efx
, spec
, &table
->entry
[ins_index
].handle
,
2431 /* Finalise the software table entry */
2432 spin_lock_bh(&efx
->filter_lock
);
2435 /* Update the fields that may differ */
2436 saved_spec
->priority
= spec
->priority
;
2437 saved_spec
->flags
&= EFX_FILTER_FLAG_RX_STACK
;
2438 saved_spec
->flags
|= spec
->flags
;
2439 saved_spec
->rss_context
= spec
->rss_context
;
2440 saved_spec
->dmaq_id
= spec
->dmaq_id
;
2442 } else if (!replacing
) {
2446 efx_ef10_filter_set_entry(table
, ins_index
, saved_spec
, priv_flags
);
2448 /* Remove and finalise entries for lower-priority multicast
2452 MCDI_DECLARE_BUF(inbuf
, MC_CMD_FILTER_OP_IN_LEN
);
2453 unsigned int depth
, i
;
2455 memset(inbuf
, 0, sizeof(inbuf
));
2457 for (depth
= 0; depth
< EFX_EF10_FILTER_SEARCH_LIMIT
; depth
++) {
2458 if (!test_bit(depth
, mc_rem_map
))
2461 i
= (hash
+ depth
) & (HUNT_FILTER_TBL_ROWS
- 1);
2462 saved_spec
= efx_ef10_filter_entry_spec(table
, i
);
2463 priv_flags
= efx_ef10_filter_entry_flags(table
, i
);
2466 spin_unlock_bh(&efx
->filter_lock
);
2467 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_OP
,
2468 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE
);
2469 MCDI_SET_QWORD(inbuf
, FILTER_OP_IN_HANDLE
,
2470 table
->entry
[i
].handle
);
2471 rc
= efx_mcdi_rpc(efx
, MC_CMD_FILTER_OP
,
2472 inbuf
, sizeof(inbuf
),
2474 spin_lock_bh(&efx
->filter_lock
);
2482 priv_flags
&= ~EFX_EF10_FILTER_FLAG_BUSY
;
2484 efx_ef10_filter_set_entry(table
, i
, saved_spec
,
2489 /* If successful, return the inserted filter ID */
2491 rc
= match_pri
* HUNT_FILTER_TBL_ROWS
+ ins_index
;
2493 wake_up_all(&table
->waitq
);
2495 spin_unlock_bh(&efx
->filter_lock
);
2496 finish_wait(&table
->waitq
, &wait
);
2500 static void efx_ef10_filter_update_rx_scatter(struct efx_nic
*efx
)
2502 /* no need to do anything here on EF10 */
2506 * If !stack_requested, remove by ID
2507 * If stack_requested, remove by index
2508 * Filter ID may come from userland and must be range-checked.
2510 static int efx_ef10_filter_remove_internal(struct efx_nic
*efx
,
2511 enum efx_filter_priority priority
,
2512 u32 filter_id
, bool stack_requested
)
2514 unsigned int filter_idx
= filter_id
% HUNT_FILTER_TBL_ROWS
;
2515 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
2516 MCDI_DECLARE_BUF(inbuf
,
2517 MC_CMD_FILTER_OP_IN_HANDLE_OFST
+
2518 MC_CMD_FILTER_OP_IN_HANDLE_LEN
);
2519 struct efx_filter_spec
*spec
;
2523 /* Find the software table entry and mark it busy. Don't
2524 * remove it yet; any attempt to update while we're waiting
2525 * for the firmware must find the busy entry.
2528 spin_lock_bh(&efx
->filter_lock
);
2529 if (!(table
->entry
[filter_idx
].spec
&
2530 EFX_EF10_FILTER_FLAG_BUSY
))
2532 prepare_to_wait(&table
->waitq
, &wait
, TASK_UNINTERRUPTIBLE
);
2533 spin_unlock_bh(&efx
->filter_lock
);
2536 spec
= efx_ef10_filter_entry_spec(table
, filter_idx
);
2537 if (!spec
|| spec
->priority
> priority
||
2538 (!stack_requested
&&
2539 efx_ef10_filter_rx_match_pri(table
, spec
->match_flags
) !=
2540 filter_id
/ HUNT_FILTER_TBL_ROWS
)) {
2544 table
->entry
[filter_idx
].spec
|= EFX_EF10_FILTER_FLAG_BUSY
;
2545 spin_unlock_bh(&efx
->filter_lock
);
2547 if (spec
->flags
& EFX_FILTER_FLAG_RX_STACK
&& !stack_requested
) {
2548 /* Reset steering of a stack-owned filter */
2550 struct efx_filter_spec new_spec
= *spec
;
2552 new_spec
.priority
= EFX_FILTER_PRI_REQUIRED
;
2553 new_spec
.flags
= (EFX_FILTER_FLAG_RX
|
2554 EFX_FILTER_FLAG_RX_RSS
|
2555 EFX_FILTER_FLAG_RX_STACK
);
2556 new_spec
.dmaq_id
= 0;
2557 new_spec
.rss_context
= EFX_FILTER_RSS_CONTEXT_DEFAULT
;
2558 rc
= efx_ef10_filter_push(efx
, &new_spec
,
2559 &table
->entry
[filter_idx
].handle
,
2562 spin_lock_bh(&efx
->filter_lock
);
2566 /* Really remove the filter */
2568 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_OP
,
2569 efx_ef10_filter_is_exclusive(spec
) ?
2570 MC_CMD_FILTER_OP_IN_OP_REMOVE
:
2571 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE
);
2572 MCDI_SET_QWORD(inbuf
, FILTER_OP_IN_HANDLE
,
2573 table
->entry
[filter_idx
].handle
);
2574 rc
= efx_mcdi_rpc(efx
, MC_CMD_FILTER_OP
,
2575 inbuf
, sizeof(inbuf
), NULL
, 0, NULL
);
2577 spin_lock_bh(&efx
->filter_lock
);
2580 efx_ef10_filter_set_entry(table
, filter_idx
, NULL
, 0);
2583 table
->entry
[filter_idx
].spec
&= ~EFX_EF10_FILTER_FLAG_BUSY
;
2584 wake_up_all(&table
->waitq
);
2586 spin_unlock_bh(&efx
->filter_lock
);
2587 finish_wait(&table
->waitq
, &wait
);
2591 static int efx_ef10_filter_remove_safe(struct efx_nic
*efx
,
2592 enum efx_filter_priority priority
,
2595 return efx_ef10_filter_remove_internal(efx
, priority
, filter_id
, false);
2598 static int efx_ef10_filter_get_safe(struct efx_nic
*efx
,
2599 enum efx_filter_priority priority
,
2600 u32 filter_id
, struct efx_filter_spec
*spec
)
2602 unsigned int filter_idx
= filter_id
% HUNT_FILTER_TBL_ROWS
;
2603 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
2604 const struct efx_filter_spec
*saved_spec
;
2607 spin_lock_bh(&efx
->filter_lock
);
2608 saved_spec
= efx_ef10_filter_entry_spec(table
, filter_idx
);
2609 if (saved_spec
&& saved_spec
->priority
== priority
&&
2610 efx_ef10_filter_rx_match_pri(table
, saved_spec
->match_flags
) ==
2611 filter_id
/ HUNT_FILTER_TBL_ROWS
) {
2612 *spec
= *saved_spec
;
2617 spin_unlock_bh(&efx
->filter_lock
);
2621 static void efx_ef10_filter_clear_rx(struct efx_nic
*efx
,
2622 enum efx_filter_priority priority
)
2627 static u32
efx_ef10_filter_count_rx_used(struct efx_nic
*efx
,
2628 enum efx_filter_priority priority
)
2630 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
2631 unsigned int filter_idx
;
2634 spin_lock_bh(&efx
->filter_lock
);
2635 for (filter_idx
= 0; filter_idx
< HUNT_FILTER_TBL_ROWS
; filter_idx
++) {
2636 if (table
->entry
[filter_idx
].spec
&&
2637 efx_ef10_filter_entry_spec(table
, filter_idx
)->priority
==
2641 spin_unlock_bh(&efx
->filter_lock
);
2645 static u32
efx_ef10_filter_get_rx_id_limit(struct efx_nic
*efx
)
2647 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
2649 return table
->rx_match_count
* HUNT_FILTER_TBL_ROWS
;
2652 static s32
efx_ef10_filter_get_rx_ids(struct efx_nic
*efx
,
2653 enum efx_filter_priority priority
,
2656 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
2657 struct efx_filter_spec
*spec
;
2658 unsigned int filter_idx
;
2661 spin_lock_bh(&efx
->filter_lock
);
2662 for (filter_idx
= 0; filter_idx
< HUNT_FILTER_TBL_ROWS
; filter_idx
++) {
2663 spec
= efx_ef10_filter_entry_spec(table
, filter_idx
);
2664 if (spec
&& spec
->priority
== priority
) {
2665 if (count
== size
) {
2669 buf
[count
++] = (efx_ef10_filter_rx_match_pri(
2670 table
, spec
->match_flags
) *
2671 HUNT_FILTER_TBL_ROWS
+
2675 spin_unlock_bh(&efx
->filter_lock
);
2679 #ifdef CONFIG_RFS_ACCEL
2681 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete
;
2683 static s32
efx_ef10_filter_rfs_insert(struct efx_nic
*efx
,
2684 struct efx_filter_spec
*spec
)
2686 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
2687 MCDI_DECLARE_BUF(inbuf
, MC_CMD_FILTER_OP_IN_LEN
);
2688 struct efx_filter_spec
*saved_spec
;
2689 unsigned int hash
, i
, depth
= 1;
2690 bool replacing
= false;
2695 /* Must be an RX filter without RSS and not for a multicast
2696 * destination address (RFS only works for connected sockets).
2697 * These restrictions allow us to pass only a tiny amount of
2698 * data through to the completion function.
2700 EFX_WARN_ON_PARANOID(spec
->flags
!=
2701 (EFX_FILTER_FLAG_RX
| EFX_FILTER_FLAG_RX_SCATTER
));
2702 EFX_WARN_ON_PARANOID(spec
->priority
!= EFX_FILTER_PRI_HINT
);
2703 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec
));
2705 hash
= efx_ef10_filter_hash(spec
);
2707 spin_lock_bh(&efx
->filter_lock
);
2709 /* Find any existing filter with the same match tuple or else
2710 * a free slot to insert at. If an existing filter is busy,
2711 * we have to give up.
2714 i
= (hash
+ depth
) & (HUNT_FILTER_TBL_ROWS
- 1);
2715 saved_spec
= efx_ef10_filter_entry_spec(table
, i
);
2720 } else if (efx_ef10_filter_equal(spec
, saved_spec
)) {
2721 if (table
->entry
[i
].spec
& EFX_EF10_FILTER_FLAG_BUSY
) {
2725 EFX_WARN_ON_PARANOID(saved_spec
->flags
&
2726 EFX_FILTER_FLAG_RX_STACK
);
2727 if (spec
->priority
< saved_spec
->priority
) {
2735 /* Once we reach the maximum search depth, use the
2736 * first suitable slot or return -EBUSY if there was
2739 if (depth
== EFX_EF10_FILTER_SEARCH_LIMIT
) {
2740 if (ins_index
< 0) {
2750 /* Create a software table entry if necessary, and mark it
2751 * busy. We might yet fail to insert, but any attempt to
2752 * insert a conflicting filter while we're waiting for the
2753 * firmware must find the busy entry.
2755 saved_spec
= efx_ef10_filter_entry_spec(table
, ins_index
);
2759 saved_spec
= kmalloc(sizeof(*spec
), GFP_ATOMIC
);
2764 *saved_spec
= *spec
;
2766 efx_ef10_filter_set_entry(table
, ins_index
, saved_spec
,
2767 EFX_EF10_FILTER_FLAG_BUSY
);
2769 spin_unlock_bh(&efx
->filter_lock
);
2771 /* Pack up the variables needed on completion */
2772 cookie
= replacing
<< 31 | ins_index
<< 16 | spec
->dmaq_id
;
2774 efx_ef10_filter_push_prep(efx
, spec
, inbuf
,
2775 table
->entry
[ins_index
].handle
, replacing
);
2776 efx_mcdi_rpc_async(efx
, MC_CMD_FILTER_OP
, inbuf
, sizeof(inbuf
),
2777 MC_CMD_FILTER_OP_OUT_LEN
,
2778 efx_ef10_filter_rfs_insert_complete
, cookie
);
2783 spin_unlock_bh(&efx
->filter_lock
);
2788 efx_ef10_filter_rfs_insert_complete(struct efx_nic
*efx
, unsigned long cookie
,
2789 int rc
, efx_dword_t
*outbuf
,
2790 size_t outlen_actual
)
2792 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
2793 unsigned int ins_index
, dmaq_id
;
2794 struct efx_filter_spec
*spec
;
2797 /* Unpack the cookie */
2798 replacing
= cookie
>> 31;
2799 ins_index
= (cookie
>> 16) & (HUNT_FILTER_TBL_ROWS
- 1);
2800 dmaq_id
= cookie
& 0xffff;
2802 spin_lock_bh(&efx
->filter_lock
);
2803 spec
= efx_ef10_filter_entry_spec(table
, ins_index
);
2805 table
->entry
[ins_index
].handle
=
2806 MCDI_QWORD(outbuf
, FILTER_OP_OUT_HANDLE
);
2808 spec
->dmaq_id
= dmaq_id
;
2809 } else if (!replacing
) {
2813 efx_ef10_filter_set_entry(table
, ins_index
, spec
, 0);
2814 spin_unlock_bh(&efx
->filter_lock
);
2816 wake_up_all(&table
->waitq
);
2820 efx_ef10_filter_rfs_expire_complete(struct efx_nic
*efx
,
2821 unsigned long filter_idx
,
2822 int rc
, efx_dword_t
*outbuf
,
2823 size_t outlen_actual
);
2825 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic
*efx
, u32 flow_id
,
2826 unsigned int filter_idx
)
2828 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
2829 struct efx_filter_spec
*spec
=
2830 efx_ef10_filter_entry_spec(table
, filter_idx
);
2831 MCDI_DECLARE_BUF(inbuf
,
2832 MC_CMD_FILTER_OP_IN_HANDLE_OFST
+
2833 MC_CMD_FILTER_OP_IN_HANDLE_LEN
);
2836 (table
->entry
[filter_idx
].spec
& EFX_EF10_FILTER_FLAG_BUSY
) ||
2837 spec
->priority
!= EFX_FILTER_PRI_HINT
||
2838 !rps_may_expire_flow(efx
->net_dev
, spec
->dmaq_id
,
2839 flow_id
, filter_idx
))
2842 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_OP
,
2843 MC_CMD_FILTER_OP_IN_OP_REMOVE
);
2844 MCDI_SET_QWORD(inbuf
, FILTER_OP_IN_HANDLE
,
2845 table
->entry
[filter_idx
].handle
);
2846 if (efx_mcdi_rpc_async(efx
, MC_CMD_FILTER_OP
, inbuf
, sizeof(inbuf
), 0,
2847 efx_ef10_filter_rfs_expire_complete
, filter_idx
))
2850 table
->entry
[filter_idx
].spec
|= EFX_EF10_FILTER_FLAG_BUSY
;
2855 efx_ef10_filter_rfs_expire_complete(struct efx_nic
*efx
,
2856 unsigned long filter_idx
,
2857 int rc
, efx_dword_t
*outbuf
,
2858 size_t outlen_actual
)
2860 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
2861 struct efx_filter_spec
*spec
=
2862 efx_ef10_filter_entry_spec(table
, filter_idx
);
2864 spin_lock_bh(&efx
->filter_lock
);
2867 efx_ef10_filter_set_entry(table
, filter_idx
, NULL
, 0);
2869 table
->entry
[filter_idx
].spec
&= ~EFX_EF10_FILTER_FLAG_BUSY
;
2870 wake_up_all(&table
->waitq
);
2871 spin_unlock_bh(&efx
->filter_lock
);
2874 #endif /* CONFIG_RFS_ACCEL */
2876 static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags
)
2878 int match_flags
= 0;
2880 #define MAP_FLAG(gen_flag, mcdi_field) { \
2881 u32 old_mcdi_flags = mcdi_flags; \
2882 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2883 mcdi_field ## _LBN); \
2884 if (mcdi_flags != old_mcdi_flags) \
2885 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
2887 MAP_FLAG(LOC_MAC_IG
, UNKNOWN_UCAST_DST
);
2888 MAP_FLAG(LOC_MAC_IG
, UNKNOWN_MCAST_DST
);
2889 MAP_FLAG(REM_HOST
, SRC_IP
);
2890 MAP_FLAG(LOC_HOST
, DST_IP
);
2891 MAP_FLAG(REM_MAC
, SRC_MAC
);
2892 MAP_FLAG(REM_PORT
, SRC_PORT
);
2893 MAP_FLAG(LOC_MAC
, DST_MAC
);
2894 MAP_FLAG(LOC_PORT
, DST_PORT
);
2895 MAP_FLAG(ETHER_TYPE
, ETHER_TYPE
);
2896 MAP_FLAG(INNER_VID
, INNER_VLAN
);
2897 MAP_FLAG(OUTER_VID
, OUTER_VLAN
);
2898 MAP_FLAG(IP_PROTO
, IP_PROTO
);
2901 /* Did we map them all? */
2908 static int efx_ef10_filter_table_probe(struct efx_nic
*efx
)
2910 MCDI_DECLARE_BUF(inbuf
, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN
);
2911 MCDI_DECLARE_BUF(outbuf
, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX
);
2912 unsigned int pd_match_pri
, pd_match_count
;
2913 struct efx_ef10_filter_table
*table
;
2917 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
2921 /* Find out which RX filter types are supported, and their priorities */
2922 MCDI_SET_DWORD(inbuf
, GET_PARSER_DISP_INFO_IN_OP
,
2923 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES
);
2924 rc
= efx_mcdi_rpc(efx
, MC_CMD_GET_PARSER_DISP_INFO
,
2925 inbuf
, sizeof(inbuf
), outbuf
, sizeof(outbuf
),
2929 pd_match_count
= MCDI_VAR_ARRAY_LEN(
2930 outlen
, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES
);
2931 table
->rx_match_count
= 0;
2933 for (pd_match_pri
= 0; pd_match_pri
< pd_match_count
; pd_match_pri
++) {
2937 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES
,
2939 rc
= efx_ef10_filter_match_flags_from_mcdi(mcdi_flags
);
2941 netif_dbg(efx
, probe
, efx
->net_dev
,
2942 "%s: fw flags %#x pri %u not supported in driver\n",
2943 __func__
, mcdi_flags
, pd_match_pri
);
2945 netif_dbg(efx
, probe
, efx
->net_dev
,
2946 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
2947 __func__
, mcdi_flags
, pd_match_pri
,
2948 rc
, table
->rx_match_count
);
2949 table
->rx_match_flags
[table
->rx_match_count
++] = rc
;
2953 table
->entry
= vzalloc(HUNT_FILTER_TBL_ROWS
* sizeof(*table
->entry
));
2954 if (!table
->entry
) {
2959 efx
->filter_state
= table
;
2960 init_waitqueue_head(&table
->waitq
);
2968 static void efx_ef10_filter_table_restore(struct efx_nic
*efx
)
2970 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
2971 struct efx_ef10_nic_data
*nic_data
= efx
->nic_data
;
2972 struct efx_filter_spec
*spec
;
2973 unsigned int filter_idx
;
2974 bool failed
= false;
2977 if (!nic_data
->must_restore_filters
)
2980 spin_lock_bh(&efx
->filter_lock
);
2982 for (filter_idx
= 0; filter_idx
< HUNT_FILTER_TBL_ROWS
; filter_idx
++) {
2983 spec
= efx_ef10_filter_entry_spec(table
, filter_idx
);
2987 table
->entry
[filter_idx
].spec
|= EFX_EF10_FILTER_FLAG_BUSY
;
2988 spin_unlock_bh(&efx
->filter_lock
);
2990 rc
= efx_ef10_filter_push(efx
, spec
,
2991 &table
->entry
[filter_idx
].handle
,
2996 spin_lock_bh(&efx
->filter_lock
);
2999 efx_ef10_filter_set_entry(table
, filter_idx
, NULL
, 0);
3001 table
->entry
[filter_idx
].spec
&=
3002 ~EFX_EF10_FILTER_FLAG_BUSY
;
3006 spin_unlock_bh(&efx
->filter_lock
);
3009 netif_err(efx
, hw
, efx
->net_dev
,
3010 "unable to restore all filters\n");
3012 nic_data
->must_restore_filters
= false;
3015 static void efx_ef10_filter_table_remove(struct efx_nic
*efx
)
3017 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
3018 MCDI_DECLARE_BUF(inbuf
, MC_CMD_FILTER_OP_IN_LEN
);
3019 struct efx_filter_spec
*spec
;
3020 unsigned int filter_idx
;
3023 for (filter_idx
= 0; filter_idx
< HUNT_FILTER_TBL_ROWS
; filter_idx
++) {
3024 spec
= efx_ef10_filter_entry_spec(table
, filter_idx
);
3028 MCDI_SET_DWORD(inbuf
, FILTER_OP_IN_OP
,
3029 efx_ef10_filter_is_exclusive(spec
) ?
3030 MC_CMD_FILTER_OP_IN_OP_REMOVE
:
3031 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE
);
3032 MCDI_SET_QWORD(inbuf
, FILTER_OP_IN_HANDLE
,
3033 table
->entry
[filter_idx
].handle
);
3034 rc
= efx_mcdi_rpc(efx
, MC_CMD_FILTER_OP
, inbuf
, sizeof(inbuf
),
3041 vfree(table
->entry
);
3045 static void efx_ef10_filter_sync_rx_mode(struct efx_nic
*efx
)
3047 struct efx_ef10_filter_table
*table
= efx
->filter_state
;
3048 struct net_device
*net_dev
= efx
->net_dev
;
3049 struct efx_filter_spec spec
;
3050 bool remove_failed
= false;
3051 struct netdev_hw_addr
*uc
;
3052 struct netdev_hw_addr
*mc
;
3053 unsigned int filter_idx
;
3056 if (!efx_dev_registered(efx
))
3059 /* Mark old filters that may need to be removed */
3060 spin_lock_bh(&efx
->filter_lock
);
3061 n
= table
->stack_uc_count
< 0 ? 1 : table
->stack_uc_count
;
3062 for (i
= 0; i
< n
; i
++) {
3063 filter_idx
= table
->stack_uc_list
[i
].id
% HUNT_FILTER_TBL_ROWS
;
3064 table
->entry
[filter_idx
].spec
|= EFX_EF10_FILTER_FLAG_STACK_OLD
;
3066 n
= table
->stack_mc_count
< 0 ? 1 : table
->stack_mc_count
;
3067 for (i
= 0; i
< n
; i
++) {
3068 filter_idx
= table
->stack_mc_list
[i
].id
% HUNT_FILTER_TBL_ROWS
;
3069 table
->entry
[filter_idx
].spec
|= EFX_EF10_FILTER_FLAG_STACK_OLD
;
3071 spin_unlock_bh(&efx
->filter_lock
);
3073 /* Copy/convert the address lists; add the primary station
3074 * address and broadcast address
3076 netif_addr_lock_bh(net_dev
);
3077 if (net_dev
->flags
& IFF_PROMISC
||
3078 netdev_uc_count(net_dev
) >= EFX_EF10_FILTER_STACK_UC_MAX
) {
3079 table
->stack_uc_count
= -1;
3081 table
->stack_uc_count
= 1 + netdev_uc_count(net_dev
);
3082 memcpy(table
->stack_uc_list
[0].addr
, net_dev
->dev_addr
,
3085 netdev_for_each_uc_addr(uc
, net_dev
) {
3086 memcpy(table
->stack_uc_list
[i
].addr
,
3087 uc
->addr
, ETH_ALEN
);
3091 if (net_dev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
) ||
3092 netdev_mc_count(net_dev
) >= EFX_EF10_FILTER_STACK_MC_MAX
) {
3093 table
->stack_mc_count
= -1;
3095 table
->stack_mc_count
= 1 + netdev_mc_count(net_dev
);
3096 eth_broadcast_addr(table
->stack_mc_list
[0].addr
);
3098 netdev_for_each_mc_addr(mc
, net_dev
) {
3099 memcpy(table
->stack_mc_list
[i
].addr
,
3100 mc
->addr
, ETH_ALEN
);
3104 netif_addr_unlock_bh(net_dev
);
3106 /* Insert/renew unicast filters */
3107 if (table
->stack_uc_count
>= 0) {
3108 for (i
= 0; i
< table
->stack_uc_count
; i
++) {
3109 efx_filter_init_rx(&spec
, EFX_FILTER_PRI_REQUIRED
,
3110 EFX_FILTER_FLAG_RX_RSS
|
3111 EFX_FILTER_FLAG_RX_STACK
,
3113 efx_filter_set_eth_local(&spec
, EFX_FILTER_VID_UNSPEC
,
3114 table
->stack_uc_list
[i
].addr
);
3115 rc
= efx_ef10_filter_insert(efx
, &spec
, true);
3117 /* Fall back to unicast-promisc */
3119 efx_ef10_filter_remove_safe(
3120 efx
, EFX_FILTER_PRI_REQUIRED
,
3121 table
->stack_uc_list
[i
].id
);
3122 table
->stack_uc_count
= -1;
3125 table
->stack_uc_list
[i
].id
= rc
;
3128 if (table
->stack_uc_count
< 0) {
3129 efx_filter_init_rx(&spec
, EFX_FILTER_PRI_REQUIRED
,
3130 EFX_FILTER_FLAG_RX_RSS
|
3131 EFX_FILTER_FLAG_RX_STACK
,
3133 efx_filter_set_uc_def(&spec
);
3134 rc
= efx_ef10_filter_insert(efx
, &spec
, true);
3137 table
->stack_uc_count
= 0;
3139 table
->stack_uc_list
[0].id
= rc
;
3143 /* Insert/renew multicast filters */
3144 if (table
->stack_mc_count
>= 0) {
3145 for (i
= 0; i
< table
->stack_mc_count
; i
++) {
3146 efx_filter_init_rx(&spec
, EFX_FILTER_PRI_REQUIRED
,
3147 EFX_FILTER_FLAG_RX_RSS
|
3148 EFX_FILTER_FLAG_RX_STACK
,
3150 efx_filter_set_eth_local(&spec
, EFX_FILTER_VID_UNSPEC
,
3151 table
->stack_mc_list
[i
].addr
);
3152 rc
= efx_ef10_filter_insert(efx
, &spec
, true);
3154 /* Fall back to multicast-promisc */
3156 efx_ef10_filter_remove_safe(
3157 efx
, EFX_FILTER_PRI_REQUIRED
,
3158 table
->stack_mc_list
[i
].id
);
3159 table
->stack_mc_count
= -1;
3162 table
->stack_mc_list
[i
].id
= rc
;
3165 if (table
->stack_mc_count
< 0) {
3166 efx_filter_init_rx(&spec
, EFX_FILTER_PRI_REQUIRED
,
3167 EFX_FILTER_FLAG_RX_RSS
|
3168 EFX_FILTER_FLAG_RX_STACK
,
3170 efx_filter_set_mc_def(&spec
);
3171 rc
= efx_ef10_filter_insert(efx
, &spec
, true);
3174 table
->stack_mc_count
= 0;
3176 table
->stack_mc_list
[0].id
= rc
;
3180 /* Remove filters that weren't renewed. Since nothing else
3181 * changes the STACK_OLD flag or removes these filters, we
3182 * don't need to hold the filter_lock while scanning for
3185 for (i
= 0; i
< HUNT_FILTER_TBL_ROWS
; i
++) {
3186 if (ACCESS_ONCE(table
->entry
[i
].spec
) &
3187 EFX_EF10_FILTER_FLAG_STACK_OLD
) {
3188 if (efx_ef10_filter_remove_internal(efx
,
3189 EFX_FILTER_PRI_REQUIRED
,
3191 remove_failed
= true;
3194 WARN_ON(remove_failed
);
3197 static int efx_ef10_mac_reconfigure(struct efx_nic
*efx
)
3199 efx_ef10_filter_sync_rx_mode(efx
);
3201 return efx_mcdi_set_mac(efx
);
3204 static int efx_ef10_start_bist(struct efx_nic
*efx
, u32 bist_type
)
3206 MCDI_DECLARE_BUF(inbuf
, MC_CMD_START_BIST_IN_LEN
);
3208 MCDI_SET_DWORD(inbuf
, START_BIST_IN_TYPE
, bist_type
);
3209 return efx_mcdi_rpc(efx
, MC_CMD_START_BIST
, inbuf
, sizeof(inbuf
),
3213 /* MC BISTs follow a different poll mechanism to phy BISTs.
3214 * The BIST is done in the poll handler on the MC, and the MCDI command
3215 * will block until the BIST is done.
3217 static int efx_ef10_poll_bist(struct efx_nic
*efx
)
3220 MCDI_DECLARE_BUF(outbuf
, MC_CMD_POLL_BIST_OUT_LEN
);
3224 rc
= efx_mcdi_rpc(efx
, MC_CMD_POLL_BIST
, NULL
, 0,
3225 outbuf
, sizeof(outbuf
), &outlen
);
3229 if (outlen
< MC_CMD_POLL_BIST_OUT_LEN
)
3232 result
= MCDI_DWORD(outbuf
, POLL_BIST_OUT_RESULT
);
3234 case MC_CMD_POLL_BIST_PASSED
:
3235 netif_dbg(efx
, hw
, efx
->net_dev
, "BIST passed.\n");
3237 case MC_CMD_POLL_BIST_TIMEOUT
:
3238 netif_err(efx
, hw
, efx
->net_dev
, "BIST timed out\n");
3240 case MC_CMD_POLL_BIST_FAILED
:
3241 netif_err(efx
, hw
, efx
->net_dev
, "BIST failed.\n");
3244 netif_err(efx
, hw
, efx
->net_dev
,
3245 "BIST returned unknown result %u", result
);
3250 static int efx_ef10_run_bist(struct efx_nic
*efx
, u32 bist_type
)
3254 netif_dbg(efx
, drv
, efx
->net_dev
, "starting BIST type %u\n", bist_type
);
3256 rc
= efx_ef10_start_bist(efx
, bist_type
);
3260 return efx_ef10_poll_bist(efx
);
3264 efx_ef10_test_chip(struct efx_nic
*efx
, struct efx_self_tests
*tests
)
3268 efx_reset_down(efx
, RESET_TYPE_WORLD
);
3270 rc
= efx_mcdi_rpc(efx
, MC_CMD_ENABLE_OFFLINE_BIST
,
3271 NULL
, 0, NULL
, 0, NULL
);
3275 tests
->memory
= efx_ef10_run_bist(efx
, MC_CMD_MC_MEM_BIST
) ? -1 : 1;
3276 tests
->registers
= efx_ef10_run_bist(efx
, MC_CMD_REG_BIST
) ? -1 : 1;
3278 rc
= efx_mcdi_reset(efx
, RESET_TYPE_WORLD
);
3281 rc2
= efx_reset_up(efx
, RESET_TYPE_WORLD
, rc
== 0);
3282 return rc
? rc
: rc2
;
3285 #ifdef CONFIG_SFC_MTD
3287 struct efx_ef10_nvram_type_info
{
3288 u16 type
, type_mask
;
3293 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types
[] = {
3294 { NVRAM_PARTITION_TYPE_MC_FIRMWARE
, 0, 0, "sfc_mcfw" },
3295 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP
, 0, 0, "sfc_mcfw_backup" },
3296 { NVRAM_PARTITION_TYPE_EXPANSION_ROM
, 0, 0, "sfc_exp_rom" },
3297 { NVRAM_PARTITION_TYPE_STATIC_CONFIG
, 0, 0, "sfc_static_cfg" },
3298 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG
, 0, 0, "sfc_dynamic_cfg" },
3299 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0
, 0, 0, "sfc_exp_rom_cfg" },
3300 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1
, 0, 1, "sfc_exp_rom_cfg" },
3301 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2
, 0, 2, "sfc_exp_rom_cfg" },
3302 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3
, 0, 3, "sfc_exp_rom_cfg" },
3303 { NVRAM_PARTITION_TYPE_PHY_MIN
, 0xff, 0, "sfc_phy_fw" },
3306 static int efx_ef10_mtd_probe_partition(struct efx_nic
*efx
,
3307 struct efx_mcdi_mtd_partition
*part
,
3310 MCDI_DECLARE_BUF(inbuf
, MC_CMD_NVRAM_METADATA_IN_LEN
);
3311 MCDI_DECLARE_BUF(outbuf
, MC_CMD_NVRAM_METADATA_OUT_LENMAX
);
3312 const struct efx_ef10_nvram_type_info
*info
;
3313 size_t size
, erase_size
, outlen
;
3317 for (info
= efx_ef10_nvram_types
; ; info
++) {
3319 efx_ef10_nvram_types
+ ARRAY_SIZE(efx_ef10_nvram_types
))
3321 if ((type
& ~info
->type_mask
) == info
->type
)
3324 if (info
->port
!= efx_port_num(efx
))
3327 rc
= efx_mcdi_nvram_info(efx
, type
, &size
, &erase_size
, &protected);
3331 return -ENODEV
; /* hide it */
3333 part
->nvram_type
= type
;
3335 MCDI_SET_DWORD(inbuf
, NVRAM_METADATA_IN_TYPE
, type
);
3336 rc
= efx_mcdi_rpc(efx
, MC_CMD_NVRAM_METADATA
, inbuf
, sizeof(inbuf
),
3337 outbuf
, sizeof(outbuf
), &outlen
);
3340 if (outlen
< MC_CMD_NVRAM_METADATA_OUT_LENMIN
)
3342 if (MCDI_DWORD(outbuf
, NVRAM_METADATA_OUT_FLAGS
) &
3343 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN
))
3344 part
->fw_subtype
= MCDI_DWORD(outbuf
,
3345 NVRAM_METADATA_OUT_SUBTYPE
);
3347 part
->common
.dev_type_name
= "EF10 NVRAM manager";
3348 part
->common
.type_name
= info
->name
;
3350 part
->common
.mtd
.type
= MTD_NORFLASH
;
3351 part
->common
.mtd
.flags
= MTD_CAP_NORFLASH
;
3352 part
->common
.mtd
.size
= size
;
3353 part
->common
.mtd
.erasesize
= erase_size
;
3358 static int efx_ef10_mtd_probe(struct efx_nic
*efx
)
3360 MCDI_DECLARE_BUF(outbuf
, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX
);
3361 struct efx_mcdi_mtd_partition
*parts
;
3362 size_t outlen
, n_parts_total
, i
, n_parts
;
3368 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN
!= 0);
3369 rc
= efx_mcdi_rpc(efx
, MC_CMD_NVRAM_PARTITIONS
, NULL
, 0,
3370 outbuf
, sizeof(outbuf
), &outlen
);
3373 if (outlen
< MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN
)
3376 n_parts_total
= MCDI_DWORD(outbuf
, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS
);
3378 MCDI_VAR_ARRAY_LEN(outlen
, NVRAM_PARTITIONS_OUT_TYPE_ID
))
3381 parts
= kcalloc(n_parts_total
, sizeof(*parts
), GFP_KERNEL
);
3386 for (i
= 0; i
< n_parts_total
; i
++) {
3387 type
= MCDI_ARRAY_DWORD(outbuf
, NVRAM_PARTITIONS_OUT_TYPE_ID
,
3389 rc
= efx_ef10_mtd_probe_partition(efx
, &parts
[n_parts
], type
);
3392 else if (rc
!= -ENODEV
)
3396 rc
= efx_mtd_add(efx
, &parts
[0].common
, n_parts
, sizeof(*parts
));
3403 #endif /* CONFIG_SFC_MTD */
3405 static void efx_ef10_ptp_write_host_time(struct efx_nic
*efx
, u32 host_time
)
3407 _efx_writed(efx
, cpu_to_le32(host_time
), ER_DZ_MC_DB_LWRD
);
3410 const struct efx_nic_type efx_hunt_a0_nic_type
= {
3411 .mem_map_size
= efx_ef10_mem_map_size
,
3412 .probe
= efx_ef10_probe
,
3413 .remove
= efx_ef10_remove
,
3414 .dimension_resources
= efx_ef10_dimension_resources
,
3415 .init
= efx_ef10_init_nic
,
3416 .fini
= efx_port_dummy_op_void
,
3417 .map_reset_reason
= efx_mcdi_map_reset_reason
,
3418 .map_reset_flags
= efx_ef10_map_reset_flags
,
3419 .reset
= efx_mcdi_reset
,
3420 .probe_port
= efx_mcdi_port_probe
,
3421 .remove_port
= efx_mcdi_port_remove
,
3422 .fini_dmaq
= efx_ef10_fini_dmaq
,
3423 .describe_stats
= efx_ef10_describe_stats
,
3424 .update_stats
= efx_ef10_update_stats
,
3425 .start_stats
= efx_mcdi_mac_start_stats
,
3426 .stop_stats
= efx_mcdi_mac_stop_stats
,
3427 .set_id_led
= efx_mcdi_set_id_led
,
3428 .push_irq_moderation
= efx_ef10_push_irq_moderation
,
3429 .reconfigure_mac
= efx_ef10_mac_reconfigure
,
3430 .check_mac_fault
= efx_mcdi_mac_check_fault
,
3431 .reconfigure_port
= efx_mcdi_port_reconfigure
,
3432 .get_wol
= efx_ef10_get_wol
,
3433 .set_wol
= efx_ef10_set_wol
,
3434 .resume_wol
= efx_port_dummy_op_void
,
3435 .test_chip
= efx_ef10_test_chip
,
3436 .test_nvram
= efx_mcdi_nvram_test_all
,
3437 .mcdi_request
= efx_ef10_mcdi_request
,
3438 .mcdi_poll_response
= efx_ef10_mcdi_poll_response
,
3439 .mcdi_read_response
= efx_ef10_mcdi_read_response
,
3440 .mcdi_poll_reboot
= efx_ef10_mcdi_poll_reboot
,
3441 .irq_enable_master
= efx_port_dummy_op_void
,
3442 .irq_test_generate
= efx_ef10_irq_test_generate
,
3443 .irq_disable_non_ev
= efx_port_dummy_op_void
,
3444 .irq_handle_msi
= efx_ef10_msi_interrupt
,
3445 .irq_handle_legacy
= efx_ef10_legacy_interrupt
,
3446 .tx_probe
= efx_ef10_tx_probe
,
3447 .tx_init
= efx_ef10_tx_init
,
3448 .tx_remove
= efx_ef10_tx_remove
,
3449 .tx_write
= efx_ef10_tx_write
,
3450 .rx_push_indir_table
= efx_ef10_rx_push_indir_table
,
3451 .rx_probe
= efx_ef10_rx_probe
,
3452 .rx_init
= efx_ef10_rx_init
,
3453 .rx_remove
= efx_ef10_rx_remove
,
3454 .rx_write
= efx_ef10_rx_write
,
3455 .rx_defer_refill
= efx_ef10_rx_defer_refill
,
3456 .ev_probe
= efx_ef10_ev_probe
,
3457 .ev_init
= efx_ef10_ev_init
,
3458 .ev_fini
= efx_ef10_ev_fini
,
3459 .ev_remove
= efx_ef10_ev_remove
,
3460 .ev_process
= efx_ef10_ev_process
,
3461 .ev_read_ack
= efx_ef10_ev_read_ack
,
3462 .ev_test_generate
= efx_ef10_ev_test_generate
,
3463 .filter_table_probe
= efx_ef10_filter_table_probe
,
3464 .filter_table_restore
= efx_ef10_filter_table_restore
,
3465 .filter_table_remove
= efx_ef10_filter_table_remove
,
3466 .filter_update_rx_scatter
= efx_ef10_filter_update_rx_scatter
,
3467 .filter_insert
= efx_ef10_filter_insert
,
3468 .filter_remove_safe
= efx_ef10_filter_remove_safe
,
3469 .filter_get_safe
= efx_ef10_filter_get_safe
,
3470 .filter_clear_rx
= efx_ef10_filter_clear_rx
,
3471 .filter_count_rx_used
= efx_ef10_filter_count_rx_used
,
3472 .filter_get_rx_id_limit
= efx_ef10_filter_get_rx_id_limit
,
3473 .filter_get_rx_ids
= efx_ef10_filter_get_rx_ids
,
3474 #ifdef CONFIG_RFS_ACCEL
3475 .filter_rfs_insert
= efx_ef10_filter_rfs_insert
,
3476 .filter_rfs_expire_one
= efx_ef10_filter_rfs_expire_one
,
3478 #ifdef CONFIG_SFC_MTD
3479 .mtd_probe
= efx_ef10_mtd_probe
,
3480 .mtd_rename
= efx_mcdi_mtd_rename
,
3481 .mtd_read
= efx_mcdi_mtd_read
,
3482 .mtd_erase
= efx_mcdi_mtd_erase
,
3483 .mtd_write
= efx_mcdi_mtd_write
,
3484 .mtd_sync
= efx_mcdi_mtd_sync
,
3486 .ptp_write_host_time
= efx_ef10_ptp_write_host_time
,
3488 .revision
= EFX_REV_HUNT_A0
,
3489 .max_dma_mask
= DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH
),
3490 .rx_prefix_size
= ES_DZ_RX_PREFIX_SIZE
,
3491 .rx_hash_offset
= ES_DZ_RX_PREFIX_HASH_OFST
,
3492 .can_rx_scatter
= true,
3493 .always_rx_scatter
= true,
3494 .max_interrupt_mode
= EFX_INT_MODE_MSIX
,
3495 .timer_period_max
= 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH
,
3496 .offload_features
= (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3497 NETIF_F_RXHASH
| NETIF_F_NTUPLE
),
3499 .max_rx_ip_filters
= HUNT_FILTER_TBL_ROWS
,