1 /* QLogic qede NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/version.h>
12 #include <linux/device.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/skbuff.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/interrupt.h>
21 #include <asm/byteorder.h>
22 #include <asm/param.h>
24 #include <linux/netdev_features.h>
25 #include <linux/udp.h>
26 #include <linux/tcp.h>
27 #ifdef CONFIG_QEDE_VXLAN
28 #include <net/vxlan.h>
30 #ifdef CONFIG_QEDE_GENEVE
31 #include <net/geneve.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/pkt_sched.h>
39 #include <linux/ethtool.h>
41 #include <linux/random.h>
42 #include <net/ip6_checksum.h>
43 #include <linux/bitops.h>
47 static char version
[] =
48 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION
"\n";
50 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_MODULE_VERSION
);
55 module_param(debug
, uint
, 0);
56 MODULE_PARM_DESC(debug
, " Default debug msglevel");
58 static const struct qed_eth_ops
*qed_ops
;
60 #define CHIP_NUM_57980S_40 0x1634
61 #define CHIP_NUM_57980S_10 0x1666
62 #define CHIP_NUM_57980S_MF 0x1636
63 #define CHIP_NUM_57980S_100 0x1644
64 #define CHIP_NUM_57980S_50 0x1654
65 #define CHIP_NUM_57980S_25 0x1656
66 #define CHIP_NUM_57980S_IOV 0x1664
68 #ifndef PCI_DEVICE_ID_NX2_57980E
69 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
70 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
71 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
72 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
73 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
74 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
75 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
78 enum qede_pci_private
{
83 static const struct pci_device_id qede_pci_tbl
[] = {
84 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_40
), QEDE_PRIVATE_PF
},
85 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_10
), QEDE_PRIVATE_PF
},
86 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_MF
), QEDE_PRIVATE_PF
},
87 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_100
), QEDE_PRIVATE_PF
},
88 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_50
), QEDE_PRIVATE_PF
},
89 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_25
), QEDE_PRIVATE_PF
},
90 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_IOV
), QEDE_PRIVATE_VF
},
94 MODULE_DEVICE_TABLE(pci
, qede_pci_tbl
);
96 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
);
98 #define TX_TIMEOUT (5 * HZ)
100 static void qede_remove(struct pci_dev
*pdev
);
101 static int qede_alloc_rx_buffer(struct qede_dev
*edev
,
102 struct qede_rx_queue
*rxq
);
103 static void qede_link_update(void *dev
, struct qed_link_output
*link
);
105 #ifdef CONFIG_QED_SRIOV
106 static int qede_set_vf_vlan(struct net_device
*ndev
, int vf
, u16 vlan
, u8 qos
)
108 struct qede_dev
*edev
= netdev_priv(ndev
);
111 DP_NOTICE(edev
, "Illegal vlan value %d\n", vlan
);
115 DP_VERBOSE(edev
, QED_MSG_IOV
, "Setting Vlan 0x%04x to VF [%d]\n",
118 return edev
->ops
->iov
->set_vlan(edev
->cdev
, vlan
, vf
);
121 static int qede_set_vf_mac(struct net_device
*ndev
, int vfidx
, u8
*mac
)
123 struct qede_dev
*edev
= netdev_priv(ndev
);
125 DP_VERBOSE(edev
, QED_MSG_IOV
,
126 "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
127 mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5], vfidx
);
129 if (!is_valid_ether_addr(mac
)) {
130 DP_VERBOSE(edev
, QED_MSG_IOV
, "MAC address isn't valid\n");
134 return edev
->ops
->iov
->set_mac(edev
->cdev
, mac
, vfidx
);
137 static int qede_sriov_configure(struct pci_dev
*pdev
, int num_vfs_param
)
139 struct qede_dev
*edev
= netdev_priv(pci_get_drvdata(pdev
));
140 struct qed_dev_info
*qed_info
= &edev
->dev_info
.common
;
143 DP_VERBOSE(edev
, QED_MSG_IOV
, "Requested %d VFs\n", num_vfs_param
);
145 rc
= edev
->ops
->iov
->configure(edev
->cdev
, num_vfs_param
);
147 /* Enable/Disable Tx switching for PF */
148 if ((rc
== num_vfs_param
) && netif_running(edev
->ndev
) &&
149 qed_info
->mf_mode
!= QED_MF_NPAR
&& qed_info
->tx_switching
) {
150 struct qed_update_vport_params params
;
152 memset(¶ms
, 0, sizeof(params
));
154 params
.update_tx_switching_flg
= 1;
155 params
.tx_switching_flg
= num_vfs_param
? 1 : 0;
156 edev
->ops
->vport_update(edev
->cdev
, ¶ms
);
163 static struct pci_driver qede_pci_driver
= {
165 .id_table
= qede_pci_tbl
,
167 .remove
= qede_remove
,
168 #ifdef CONFIG_QED_SRIOV
169 .sriov_configure
= qede_sriov_configure
,
173 static void qede_force_mac(void *dev
, u8
*mac
)
175 struct qede_dev
*edev
= dev
;
177 ether_addr_copy(edev
->ndev
->dev_addr
, mac
);
178 ether_addr_copy(edev
->primary_mac
, mac
);
181 static struct qed_eth_cb_ops qede_ll_ops
= {
183 .link_update
= qede_link_update
,
185 .force_mac
= qede_force_mac
,
188 static int qede_netdev_event(struct notifier_block
*this, unsigned long event
,
191 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
192 struct ethtool_drvinfo drvinfo
;
193 struct qede_dev
*edev
;
195 /* Currently only support name change */
196 if (event
!= NETDEV_CHANGENAME
)
199 /* Check whether this is a qede device */
200 if (!ndev
|| !ndev
->ethtool_ops
|| !ndev
->ethtool_ops
->get_drvinfo
)
203 memset(&drvinfo
, 0, sizeof(drvinfo
));
204 ndev
->ethtool_ops
->get_drvinfo(ndev
, &drvinfo
);
205 if (strcmp(drvinfo
.driver
, "qede"))
207 edev
= netdev_priv(ndev
);
209 /* Notify qed of the name change */
210 if (!edev
->ops
|| !edev
->ops
->common
)
212 edev
->ops
->common
->set_id(edev
->cdev
, edev
->ndev
->name
,
219 static struct notifier_block qede_netdev_notifier
= {
220 .notifier_call
= qede_netdev_event
,
224 int __init
qede_init(void)
228 pr_notice("qede_init: %s\n", version
);
230 qed_ops
= qed_get_eth_ops();
232 pr_notice("Failed to get qed ethtool operations\n");
236 /* Must register notifier before pci ops, since we might miss
237 * interface rename after pci probe and netdev registeration.
239 ret
= register_netdevice_notifier(&qede_netdev_notifier
);
241 pr_notice("Failed to register netdevice_notifier\n");
246 ret
= pci_register_driver(&qede_pci_driver
);
248 pr_notice("Failed to register driver\n");
249 unregister_netdevice_notifier(&qede_netdev_notifier
);
257 static void __exit
qede_cleanup(void)
259 pr_notice("qede_cleanup called\n");
261 unregister_netdevice_notifier(&qede_netdev_notifier
);
262 pci_unregister_driver(&qede_pci_driver
);
266 module_init(qede_init
);
267 module_exit(qede_cleanup
);
269 /* -------------------------------------------------------------------------
271 * -------------------------------------------------------------------------
274 /* Unmap the data and free skb */
275 static int qede_free_tx_pkt(struct qede_dev
*edev
,
276 struct qede_tx_queue
*txq
,
279 u16 idx
= txq
->sw_tx_cons
& NUM_TX_BDS_MAX
;
280 struct sk_buff
*skb
= txq
->sw_tx_ring
[idx
].skb
;
281 struct eth_tx_1st_bd
*first_bd
;
282 struct eth_tx_bd
*tx_data_bd
;
283 int bds_consumed
= 0;
285 bool data_split
= txq
->sw_tx_ring
[idx
].flags
& QEDE_TSO_SPLIT_BD
;
286 int i
, split_bd_len
= 0;
288 if (unlikely(!skb
)) {
290 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
291 idx
, txq
->sw_tx_cons
, txq
->sw_tx_prod
);
297 first_bd
= (struct eth_tx_1st_bd
*)qed_chain_consume(&txq
->tx_pbl
);
301 nbds
= first_bd
->data
.nbds
;
304 struct eth_tx_bd
*split
= (struct eth_tx_bd
*)
305 qed_chain_consume(&txq
->tx_pbl
);
306 split_bd_len
= BD_UNMAP_LEN(split
);
309 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(first_bd
),
310 BD_UNMAP_LEN(first_bd
) + split_bd_len
, DMA_TO_DEVICE
);
312 /* Unmap the data of the skb frags */
313 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++, bds_consumed
++) {
314 tx_data_bd
= (struct eth_tx_bd
*)
315 qed_chain_consume(&txq
->tx_pbl
);
316 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
317 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
320 while (bds_consumed
++ < nbds
)
321 qed_chain_consume(&txq
->tx_pbl
);
324 dev_kfree_skb_any(skb
);
325 txq
->sw_tx_ring
[idx
].skb
= NULL
;
326 txq
->sw_tx_ring
[idx
].flags
= 0;
331 /* Unmap the data and free skb when mapping failed during start_xmit */
332 static void qede_free_failed_tx_pkt(struct qede_dev
*edev
,
333 struct qede_tx_queue
*txq
,
334 struct eth_tx_1st_bd
*first_bd
,
338 u16 idx
= txq
->sw_tx_prod
& NUM_TX_BDS_MAX
;
339 struct sk_buff
*skb
= txq
->sw_tx_ring
[idx
].skb
;
340 struct eth_tx_bd
*tx_data_bd
;
341 int i
, split_bd_len
= 0;
343 /* Return prod to its position before this skb was handled */
344 qed_chain_set_prod(&txq
->tx_pbl
,
345 le16_to_cpu(txq
->tx_db
.data
.bd_prod
),
348 first_bd
= (struct eth_tx_1st_bd
*)qed_chain_produce(&txq
->tx_pbl
);
351 struct eth_tx_bd
*split
= (struct eth_tx_bd
*)
352 qed_chain_produce(&txq
->tx_pbl
);
353 split_bd_len
= BD_UNMAP_LEN(split
);
357 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(first_bd
),
358 BD_UNMAP_LEN(first_bd
) + split_bd_len
, DMA_TO_DEVICE
);
360 /* Unmap the data of the skb frags */
361 for (i
= 0; i
< nbd
; i
++) {
362 tx_data_bd
= (struct eth_tx_bd
*)
363 qed_chain_produce(&txq
->tx_pbl
);
364 if (tx_data_bd
->nbytes
)
365 dma_unmap_page(&edev
->pdev
->dev
,
366 BD_UNMAP_ADDR(tx_data_bd
),
367 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
370 /* Return again prod to its position before this skb was handled */
371 qed_chain_set_prod(&txq
->tx_pbl
,
372 le16_to_cpu(txq
->tx_db
.data
.bd_prod
),
376 dev_kfree_skb_any(skb
);
377 txq
->sw_tx_ring
[idx
].skb
= NULL
;
378 txq
->sw_tx_ring
[idx
].flags
= 0;
381 static u32
qede_xmit_type(struct qede_dev
*edev
,
385 u32 rc
= XMIT_L4_CSUM
;
388 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
391 l3_proto
= vlan_get_protocol(skb
);
392 if (l3_proto
== htons(ETH_P_IPV6
) &&
393 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
396 if (skb
->encapsulation
)
405 static void qede_set_params_for_ipv6_ext(struct sk_buff
*skb
,
406 struct eth_tx_2nd_bd
*second_bd
,
407 struct eth_tx_3rd_bd
*third_bd
)
410 u16 bd2_bits1
= 0, bd2_bits2
= 0;
412 bd2_bits1
|= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT
);
414 bd2_bits2
|= ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) &
415 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK
)
416 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT
;
418 bd2_bits1
|= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH
<<
419 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT
);
421 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
))
422 l4_proto
= ipv6_hdr(skb
)->nexthdr
;
424 l4_proto
= ip_hdr(skb
)->protocol
;
426 if (l4_proto
== IPPROTO_UDP
)
427 bd2_bits1
|= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT
;
430 third_bd
->data
.bitfields
|=
431 cpu_to_le16(((tcp_hdrlen(skb
) / 4) &
432 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK
) <<
433 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT
);
435 second_bd
->data
.bitfields1
= cpu_to_le16(bd2_bits1
);
436 second_bd
->data
.bitfields2
= cpu_to_le16(bd2_bits2
);
439 static int map_frag_to_bd(struct qede_dev
*edev
,
441 struct eth_tx_bd
*bd
)
445 /* Map skb non-linear frag data for DMA */
446 mapping
= skb_frag_dma_map(&edev
->pdev
->dev
, frag
, 0,
449 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
450 DP_NOTICE(edev
, "Unable to map frag - dropping packet\n");
454 /* Setup the data pointer of the frag data */
455 BD_SET_UNMAP_ADDR_LEN(bd
, mapping
, skb_frag_size(frag
));
460 static u16
qede_get_skb_hlen(struct sk_buff
*skb
, bool is_encap_pkt
)
463 return (skb_inner_transport_header(skb
) +
464 inner_tcp_hdrlen(skb
) - skb
->data
);
466 return (skb_transport_header(skb
) +
467 tcp_hdrlen(skb
) - skb
->data
);
470 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
471 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
472 static bool qede_pkt_req_lin(struct qede_dev
*edev
, struct sk_buff
*skb
,
475 int allowed_frags
= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
- 1;
477 if (xmit_type
& XMIT_LSO
) {
480 hlen
= qede_get_skb_hlen(skb
, xmit_type
& XMIT_ENC
);
482 /* linear payload would require its own BD */
483 if (skb_headlen(skb
) > hlen
)
487 return (skb_shinfo(skb
)->nr_frags
> allowed_frags
);
491 /* Main transmit function */
493 netdev_tx_t
qede_start_xmit(struct sk_buff
*skb
,
494 struct net_device
*ndev
)
496 struct qede_dev
*edev
= netdev_priv(ndev
);
497 struct netdev_queue
*netdev_txq
;
498 struct qede_tx_queue
*txq
;
499 struct eth_tx_1st_bd
*first_bd
;
500 struct eth_tx_2nd_bd
*second_bd
= NULL
;
501 struct eth_tx_3rd_bd
*third_bd
= NULL
;
502 struct eth_tx_bd
*tx_data_bd
= NULL
;
506 int rc
, frag_idx
= 0, ipv6_ext
= 0;
510 bool data_split
= false;
512 /* Get tx-queue context and netdev index */
513 txq_index
= skb_get_queue_mapping(skb
);
514 WARN_ON(txq_index
>= QEDE_TSS_CNT(edev
));
515 txq
= QEDE_TX_QUEUE(edev
, txq_index
);
516 netdev_txq
= netdev_get_tx_queue(ndev
, txq_index
);
518 WARN_ON(qed_chain_get_elem_left(&txq
->tx_pbl
) <
519 (MAX_SKB_FRAGS
+ 1));
521 xmit_type
= qede_xmit_type(edev
, skb
, &ipv6_ext
);
523 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
524 if (qede_pkt_req_lin(edev
, skb
, xmit_type
)) {
525 if (skb_linearize(skb
)) {
527 "SKB linearization failed - silently dropping this SKB\n");
528 dev_kfree_skb_any(skb
);
534 /* Fill the entry in the SW ring and the BDs in the FW ring */
535 idx
= txq
->sw_tx_prod
& NUM_TX_BDS_MAX
;
536 txq
->sw_tx_ring
[idx
].skb
= skb
;
537 first_bd
= (struct eth_tx_1st_bd
*)
538 qed_chain_produce(&txq
->tx_pbl
);
539 memset(first_bd
, 0, sizeof(*first_bd
));
540 first_bd
->data
.bd_flags
.bitfields
=
541 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT
;
543 /* Map skb linear data for DMA and set in the first BD */
544 mapping
= dma_map_single(&edev
->pdev
->dev
, skb
->data
,
545 skb_headlen(skb
), DMA_TO_DEVICE
);
546 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
547 DP_NOTICE(edev
, "SKB mapping failed\n");
548 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, 0, false);
552 BD_SET_UNMAP_ADDR_LEN(first_bd
, mapping
, skb_headlen(skb
));
554 /* In case there is IPv6 with extension headers or LSO we need 2nd and
557 if (unlikely((xmit_type
& XMIT_LSO
) | ipv6_ext
)) {
558 second_bd
= (struct eth_tx_2nd_bd
*)
559 qed_chain_produce(&txq
->tx_pbl
);
560 memset(second_bd
, 0, sizeof(*second_bd
));
563 third_bd
= (struct eth_tx_3rd_bd
*)
564 qed_chain_produce(&txq
->tx_pbl
);
565 memset(third_bd
, 0, sizeof(*third_bd
));
568 /* We need to fill in additional data in second_bd... */
569 tx_data_bd
= (struct eth_tx_bd
*)second_bd
;
572 if (skb_vlan_tag_present(skb
)) {
573 first_bd
->data
.vlan
= cpu_to_le16(skb_vlan_tag_get(skb
));
574 first_bd
->data
.bd_flags
.bitfields
|=
575 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT
;
578 /* Fill the parsing flags & params according to the requested offload */
579 if (xmit_type
& XMIT_L4_CSUM
) {
580 u16 temp
= 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT
;
582 /* We don't re-calculate IP checksum as it is already done by
585 first_bd
->data
.bd_flags
.bitfields
|=
586 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT
;
588 if (xmit_type
& XMIT_ENC
) {
589 first_bd
->data
.bd_flags
.bitfields
|=
590 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT
;
592 /* In cases when OS doesn't indicate for inner offloads
593 * when packet is tunnelled, we need to override the HW
594 * tunnel configuration so that packets are treated as
595 * regular non tunnelled packets and no inner offloads
596 * are done by the hardware.
598 first_bd
->data
.bitfields
|= cpu_to_le16(temp
);
601 /* If the packet is IPv6 with extension header, indicate that
602 * to FW and pass few params, since the device cracker doesn't
603 * support parsing IPv6 with extension header/s.
605 if (unlikely(ipv6_ext
))
606 qede_set_params_for_ipv6_ext(skb
, second_bd
, third_bd
);
609 if (xmit_type
& XMIT_LSO
) {
610 first_bd
->data
.bd_flags
.bitfields
|=
611 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT
);
612 third_bd
->data
.lso_mss
=
613 cpu_to_le16(skb_shinfo(skb
)->gso_size
);
615 if (unlikely(xmit_type
& XMIT_ENC
)) {
616 first_bd
->data
.bd_flags
.bitfields
|=
617 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT
;
618 hlen
= qede_get_skb_hlen(skb
, true);
620 first_bd
->data
.bd_flags
.bitfields
|=
621 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT
;
622 hlen
= qede_get_skb_hlen(skb
, false);
625 /* @@@TBD - if will not be removed need to check */
626 third_bd
->data
.bitfields
|=
627 cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT
));
629 /* Make life easier for FW guys who can't deal with header and
630 * data on same BD. If we need to split, use the second bd...
632 if (unlikely(skb_headlen(skb
) > hlen
)) {
633 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
634 "TSO split header size is %d (%x:%x)\n",
635 first_bd
->nbytes
, first_bd
->addr
.hi
,
638 mapping
= HILO_U64(le32_to_cpu(first_bd
->addr
.hi
),
639 le32_to_cpu(first_bd
->addr
.lo
)) +
642 BD_SET_UNMAP_ADDR_LEN(tx_data_bd
, mapping
,
643 le16_to_cpu(first_bd
->nbytes
) -
646 /* this marks the BD as one that has no
649 txq
->sw_tx_ring
[idx
].flags
|= QEDE_TSO_SPLIT_BD
;
651 first_bd
->nbytes
= cpu_to_le16(hlen
);
653 tx_data_bd
= (struct eth_tx_bd
*)third_bd
;
658 /* Handle fragmented skb */
659 /* special handle for frags inside 2nd and 3rd bds.. */
660 while (tx_data_bd
&& frag_idx
< skb_shinfo(skb
)->nr_frags
) {
661 rc
= map_frag_to_bd(edev
,
662 &skb_shinfo(skb
)->frags
[frag_idx
],
665 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, nbd
,
670 if (tx_data_bd
== (struct eth_tx_bd
*)second_bd
)
671 tx_data_bd
= (struct eth_tx_bd
*)third_bd
;
678 /* map last frags into 4th, 5th .... */
679 for (; frag_idx
< skb_shinfo(skb
)->nr_frags
; frag_idx
++, nbd
++) {
680 tx_data_bd
= (struct eth_tx_bd
*)
681 qed_chain_produce(&txq
->tx_pbl
);
683 memset(tx_data_bd
, 0, sizeof(*tx_data_bd
));
685 rc
= map_frag_to_bd(edev
,
686 &skb_shinfo(skb
)->frags
[frag_idx
],
689 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, nbd
,
695 /* update the first BD with the actual num BDs */
696 first_bd
->data
.nbds
= nbd
;
698 netdev_tx_sent_queue(netdev_txq
, skb
->len
);
700 skb_tx_timestamp(skb
);
702 /* Advance packet producer only before sending the packet since mapping
707 /* 'next page' entries are counted in the producer value */
708 txq
->tx_db
.data
.bd_prod
=
709 cpu_to_le16(qed_chain_get_prod_idx(&txq
->tx_pbl
));
711 /* wmb makes sure that the BDs data is updated before updating the
712 * producer, otherwise FW may read old data from the BDs.
716 writel(txq
->tx_db
.raw
, txq
->doorbell_addr
);
718 /* mmiowb is needed to synchronize doorbell writes from more than one
719 * processor. It guarantees that the write arrives to the device before
720 * the queue lock is released and another start_xmit is called (possibly
721 * on another CPU). Without this barrier, the next doorbell can bypass
722 * this doorbell. This is applicable to IA64/Altix systems.
726 if (unlikely(qed_chain_get_elem_left(&txq
->tx_pbl
)
727 < (MAX_SKB_FRAGS
+ 1))) {
728 netif_tx_stop_queue(netdev_txq
);
729 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
730 "Stop queue was called\n");
731 /* paired memory barrier is in qede_tx_int(), we have to keep
732 * ordering of set_bit() in netif_tx_stop_queue() and read of
737 if (qed_chain_get_elem_left(&txq
->tx_pbl
)
738 >= (MAX_SKB_FRAGS
+ 1) &&
739 (edev
->state
== QEDE_STATE_OPEN
)) {
740 netif_tx_wake_queue(netdev_txq
);
741 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
742 "Wake queue was called\n");
749 int qede_txq_has_work(struct qede_tx_queue
*txq
)
753 /* Tell compiler that consumer and producer can change */
755 hw_bd_cons
= le16_to_cpu(*txq
->hw_cons_ptr
);
756 if (qed_chain_get_cons_idx(&txq
->tx_pbl
) == hw_bd_cons
+ 1)
759 return hw_bd_cons
!= qed_chain_get_cons_idx(&txq
->tx_pbl
);
762 static int qede_tx_int(struct qede_dev
*edev
,
763 struct qede_tx_queue
*txq
)
765 struct netdev_queue
*netdev_txq
;
767 unsigned int pkts_compl
= 0, bytes_compl
= 0;
770 netdev_txq
= netdev_get_tx_queue(edev
->ndev
, txq
->index
);
772 hw_bd_cons
= le16_to_cpu(*txq
->hw_cons_ptr
);
775 while (hw_bd_cons
!= qed_chain_get_cons_idx(&txq
->tx_pbl
)) {
778 rc
= qede_free_tx_pkt(edev
, txq
, &len
);
780 DP_NOTICE(edev
, "hw_bd_cons = %d, chain_cons=%d\n",
782 qed_chain_get_cons_idx(&txq
->tx_pbl
));
791 netdev_tx_completed_queue(netdev_txq
, pkts_compl
, bytes_compl
);
793 /* Need to make the tx_bd_cons update visible to start_xmit()
794 * before checking for netif_tx_queue_stopped(). Without the
795 * memory barrier, there is a small possibility that
796 * start_xmit() will miss it and cause the queue to be stopped
798 * On the other hand we need an rmb() here to ensure the proper
799 * ordering of bit testing in the following
800 * netif_tx_queue_stopped(txq) call.
804 if (unlikely(netif_tx_queue_stopped(netdev_txq
))) {
805 /* Taking tx_lock is needed to prevent reenabling the queue
806 * while it's empty. This could have happen if rx_action() gets
807 * suspended in qede_tx_int() after the condition before
808 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
810 * stops the queue->sees fresh tx_bd_cons->releases the queue->
811 * sends some packets consuming the whole queue again->
815 __netif_tx_lock(netdev_txq
, smp_processor_id());
817 if ((netif_tx_queue_stopped(netdev_txq
)) &&
818 (edev
->state
== QEDE_STATE_OPEN
) &&
819 (qed_chain_get_elem_left(&txq
->tx_pbl
)
820 >= (MAX_SKB_FRAGS
+ 1))) {
821 netif_tx_wake_queue(netdev_txq
);
822 DP_VERBOSE(edev
, NETIF_MSG_TX_DONE
,
823 "Wake queue was called\n");
826 __netif_tx_unlock(netdev_txq
);
832 bool qede_has_rx_work(struct qede_rx_queue
*rxq
)
834 u16 hw_comp_cons
, sw_comp_cons
;
836 /* Tell compiler that status block fields can change */
839 hw_comp_cons
= le16_to_cpu(*rxq
->hw_cons_ptr
);
840 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
842 return hw_comp_cons
!= sw_comp_cons
;
845 static bool qede_has_tx_work(struct qede_fastpath
*fp
)
849 for (tc
= 0; tc
< fp
->edev
->num_tc
; tc
++)
850 if (qede_txq_has_work(&fp
->txqs
[tc
]))
855 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue
*rxq
)
857 qed_chain_consume(&rxq
->rx_bd_ring
);
861 /* This function reuses the buffer(from an offset) from
862 * consumer index to producer index in the bd ring
864 static inline void qede_reuse_page(struct qede_dev
*edev
,
865 struct qede_rx_queue
*rxq
,
866 struct sw_rx_data
*curr_cons
)
868 struct eth_rx_bd
*rx_bd_prod
= qed_chain_produce(&rxq
->rx_bd_ring
);
869 struct sw_rx_data
*curr_prod
;
870 dma_addr_t new_mapping
;
872 curr_prod
= &rxq
->sw_rx_ring
[rxq
->sw_rx_prod
& NUM_RX_BDS_MAX
];
873 *curr_prod
= *curr_cons
;
875 new_mapping
= curr_prod
->mapping
+ curr_prod
->page_offset
;
877 rx_bd_prod
->addr
.hi
= cpu_to_le32(upper_32_bits(new_mapping
));
878 rx_bd_prod
->addr
.lo
= cpu_to_le32(lower_32_bits(new_mapping
));
881 curr_cons
->data
= NULL
;
884 /* In case of allocation failures reuse buffers
885 * from consumer index to produce buffers for firmware
887 void qede_recycle_rx_bd_ring(struct qede_rx_queue
*rxq
,
888 struct qede_dev
*edev
, u8 count
)
890 struct sw_rx_data
*curr_cons
;
892 for (; count
> 0; count
--) {
893 curr_cons
= &rxq
->sw_rx_ring
[rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
];
894 qede_reuse_page(edev
, rxq
, curr_cons
);
895 qede_rx_bd_ring_consume(rxq
);
899 static inline int qede_realloc_rx_buffer(struct qede_dev
*edev
,
900 struct qede_rx_queue
*rxq
,
901 struct sw_rx_data
*curr_cons
)
903 /* Move to the next segment in the page */
904 curr_cons
->page_offset
+= rxq
->rx_buf_seg_size
;
906 if (curr_cons
->page_offset
== PAGE_SIZE
) {
907 if (unlikely(qede_alloc_rx_buffer(edev
, rxq
))) {
908 /* Since we failed to allocate new buffer
909 * current buffer can be used again.
911 curr_cons
->page_offset
-= rxq
->rx_buf_seg_size
;
916 dma_unmap_page(&edev
->pdev
->dev
, curr_cons
->mapping
,
917 PAGE_SIZE
, DMA_FROM_DEVICE
);
919 /* Increment refcount of the page as we don't want
920 * network stack to take the ownership of the page
921 * which can be recycled multiple times by the driver.
923 page_ref_inc(curr_cons
->data
);
924 qede_reuse_page(edev
, rxq
, curr_cons
);
930 static inline void qede_update_rx_prod(struct qede_dev
*edev
,
931 struct qede_rx_queue
*rxq
)
933 u16 bd_prod
= qed_chain_get_prod_idx(&rxq
->rx_bd_ring
);
934 u16 cqe_prod
= qed_chain_get_prod_idx(&rxq
->rx_comp_ring
);
935 struct eth_rx_prod_data rx_prods
= {0};
937 /* Update producers */
938 rx_prods
.bd_prod
= cpu_to_le16(bd_prod
);
939 rx_prods
.cqe_prod
= cpu_to_le16(cqe_prod
);
941 /* Make sure that the BD and SGE data is updated before updating the
942 * producers since FW might read the BD/SGE right after the producer
947 internal_ram_wr(rxq
->hw_rxq_prod_addr
, sizeof(rx_prods
),
950 /* mmiowb is needed to synchronize doorbell writes from more than one
951 * processor. It guarantees that the write arrives to the device before
952 * the napi lock is released and another qede_poll is called (possibly
953 * on another CPU). Without this barrier, the next doorbell can bypass
954 * this doorbell. This is applicable to IA64/Altix systems.
959 static u32
qede_get_rxhash(struct qede_dev
*edev
,
962 enum pkt_hash_types
*rxhash_type
)
964 enum rss_hash_type htype
;
966 htype
= GET_FIELD(bitfields
, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE
);
968 if ((edev
->ndev
->features
& NETIF_F_RXHASH
) && htype
) {
969 *rxhash_type
= ((htype
== RSS_HASH_TYPE_IPV4
) ||
970 (htype
== RSS_HASH_TYPE_IPV6
)) ?
971 PKT_HASH_TYPE_L3
: PKT_HASH_TYPE_L4
;
972 return le32_to_cpu(rss_hash
);
974 *rxhash_type
= PKT_HASH_TYPE_NONE
;
978 static void qede_set_skb_csum(struct sk_buff
*skb
, u8 csum_flag
)
980 skb_checksum_none_assert(skb
);
982 if (csum_flag
& QEDE_CSUM_UNNECESSARY
)
983 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
985 if (csum_flag
& QEDE_TUNN_CSUM_UNNECESSARY
)
989 static inline void qede_skb_receive(struct qede_dev
*edev
,
990 struct qede_fastpath
*fp
,
995 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
998 napi_gro_receive(&fp
->napi
, skb
);
1001 static void qede_set_gro_params(struct qede_dev
*edev
,
1002 struct sk_buff
*skb
,
1003 struct eth_fast_path_rx_tpa_start_cqe
*cqe
)
1005 u16 parsing_flags
= le16_to_cpu(cqe
->pars_flags
.flags
);
1007 if (((parsing_flags
>> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT
) &
1008 PARSING_AND_ERR_FLAGS_L3TYPE_MASK
) == 2)
1009 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1011 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1013 skb_shinfo(skb
)->gso_size
= __le16_to_cpu(cqe
->len_on_first_bd
) -
1017 static int qede_fill_frag_skb(struct qede_dev
*edev
,
1018 struct qede_rx_queue
*rxq
,
1022 struct sw_rx_data
*current_bd
= &rxq
->sw_rx_ring
[rxq
->sw_rx_cons
&
1024 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[tpa_agg_index
];
1025 struct sk_buff
*skb
= tpa_info
->skb
;
1027 if (unlikely(tpa_info
->agg_state
!= QEDE_AGG_STATE_START
))
1030 /* Add one frag and update the appropriate fields in the skb */
1031 skb_fill_page_desc(skb
, tpa_info
->frag_id
++,
1032 current_bd
->data
, current_bd
->page_offset
,
1035 if (unlikely(qede_realloc_rx_buffer(edev
, rxq
, current_bd
))) {
1036 /* Incr page ref count to reuse on allocation failure
1037 * so that it doesn't get freed while freeing SKB.
1039 page_ref_inc(current_bd
->data
);
1043 qed_chain_consume(&rxq
->rx_bd_ring
);
1046 skb
->data_len
+= len_on_bd
;
1047 skb
->truesize
+= rxq
->rx_buf_seg_size
;
1048 skb
->len
+= len_on_bd
;
1053 tpa_info
->agg_state
= QEDE_AGG_STATE_ERROR
;
1054 qede_recycle_rx_bd_ring(rxq
, edev
, 1);
1058 static void qede_tpa_start(struct qede_dev
*edev
,
1059 struct qede_rx_queue
*rxq
,
1060 struct eth_fast_path_rx_tpa_start_cqe
*cqe
)
1062 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[cqe
->tpa_agg_index
];
1063 struct eth_rx_bd
*rx_bd_cons
= qed_chain_consume(&rxq
->rx_bd_ring
);
1064 struct eth_rx_bd
*rx_bd_prod
= qed_chain_produce(&rxq
->rx_bd_ring
);
1065 struct sw_rx_data
*replace_buf
= &tpa_info
->replace_buf
;
1066 dma_addr_t mapping
= tpa_info
->replace_buf_mapping
;
1067 struct sw_rx_data
*sw_rx_data_cons
;
1068 struct sw_rx_data
*sw_rx_data_prod
;
1069 enum pkt_hash_types rxhash_type
;
1072 sw_rx_data_cons
= &rxq
->sw_rx_ring
[rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
];
1073 sw_rx_data_prod
= &rxq
->sw_rx_ring
[rxq
->sw_rx_prod
& NUM_RX_BDS_MAX
];
1075 /* Use pre-allocated replacement buffer - we can't release the agg.
1076 * start until its over and we don't want to risk allocation failing
1077 * here, so re-allocate when aggregation will be over.
1079 sw_rx_data_prod
->mapping
= replace_buf
->mapping
;
1081 sw_rx_data_prod
->data
= replace_buf
->data
;
1082 rx_bd_prod
->addr
.hi
= cpu_to_le32(upper_32_bits(mapping
));
1083 rx_bd_prod
->addr
.lo
= cpu_to_le32(lower_32_bits(mapping
));
1084 sw_rx_data_prod
->page_offset
= replace_buf
->page_offset
;
1088 /* move partial skb from cons to pool (don't unmap yet)
1089 * save mapping, incase we drop the packet later on.
1091 tpa_info
->start_buf
= *sw_rx_data_cons
;
1092 mapping
= HILO_U64(le32_to_cpu(rx_bd_cons
->addr
.hi
),
1093 le32_to_cpu(rx_bd_cons
->addr
.lo
));
1095 tpa_info
->start_buf_mapping
= mapping
;
1098 /* set tpa state to start only if we are able to allocate skb
1099 * for this aggregation, otherwise mark as error and aggregation will
1102 tpa_info
->skb
= netdev_alloc_skb(edev
->ndev
,
1103 le16_to_cpu(cqe
->len_on_first_bd
));
1104 if (unlikely(!tpa_info
->skb
)) {
1105 DP_NOTICE(edev
, "Failed to allocate SKB for gro\n");
1106 tpa_info
->agg_state
= QEDE_AGG_STATE_ERROR
;
1110 skb_put(tpa_info
->skb
, le16_to_cpu(cqe
->len_on_first_bd
));
1111 memcpy(&tpa_info
->start_cqe
, cqe
, sizeof(tpa_info
->start_cqe
));
1113 /* Start filling in the aggregation info */
1114 tpa_info
->frag_id
= 0;
1115 tpa_info
->agg_state
= QEDE_AGG_STATE_START
;
1117 rxhash
= qede_get_rxhash(edev
, cqe
->bitfields
,
1118 cqe
->rss_hash
, &rxhash_type
);
1119 skb_set_hash(tpa_info
->skb
, rxhash
, rxhash_type
);
1120 if ((le16_to_cpu(cqe
->pars_flags
.flags
) >>
1121 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT
) &
1122 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK
)
1123 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
1125 tpa_info
->vlan_tag
= 0;
1127 /* This is needed in order to enable forwarding support */
1128 qede_set_gro_params(edev
, tpa_info
->skb
, cqe
);
1130 cons_buf
: /* We still need to handle bd_len_list to consume buffers */
1131 if (likely(cqe
->ext_bd_len_list
[0]))
1132 qede_fill_frag_skb(edev
, rxq
, cqe
->tpa_agg_index
,
1133 le16_to_cpu(cqe
->ext_bd_len_list
[0]));
1135 if (unlikely(cqe
->ext_bd_len_list
[1])) {
1137 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
1138 tpa_info
->agg_state
= QEDE_AGG_STATE_ERROR
;
1143 static void qede_gro_ip_csum(struct sk_buff
*skb
)
1145 const struct iphdr
*iph
= ip_hdr(skb
);
1148 skb_set_transport_header(skb
, sizeof(struct iphdr
));
1151 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
1152 iph
->saddr
, iph
->daddr
, 0);
1154 tcp_gro_complete(skb
);
1157 static void qede_gro_ipv6_csum(struct sk_buff
*skb
)
1159 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
1162 skb_set_transport_header(skb
, sizeof(struct ipv6hdr
));
1165 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
1166 &iph
->saddr
, &iph
->daddr
, 0);
1167 tcp_gro_complete(skb
);
1171 static void qede_gro_receive(struct qede_dev
*edev
,
1172 struct qede_fastpath
*fp
,
1173 struct sk_buff
*skb
,
1176 /* FW can send a single MTU sized packet from gro flow
1177 * due to aggregation timeout/last segment etc. which
1178 * is not expected to be a gro packet. If a skb has zero
1179 * frags then simply push it in the stack as non gso skb.
1181 if (unlikely(!skb
->data_len
)) {
1182 skb_shinfo(skb
)->gso_type
= 0;
1183 skb_shinfo(skb
)->gso_size
= 0;
1188 if (skb_shinfo(skb
)->gso_size
) {
1189 skb_set_network_header(skb
, 0);
1191 switch (skb
->protocol
) {
1192 case htons(ETH_P_IP
):
1193 qede_gro_ip_csum(skb
);
1195 case htons(ETH_P_IPV6
):
1196 qede_gro_ipv6_csum(skb
);
1200 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
1201 ntohs(skb
->protocol
));
1207 skb_record_rx_queue(skb
, fp
->rss_id
);
1208 qede_skb_receive(edev
, fp
, skb
, vlan_tag
);
1211 static inline void qede_tpa_cont(struct qede_dev
*edev
,
1212 struct qede_rx_queue
*rxq
,
1213 struct eth_fast_path_rx_tpa_cont_cqe
*cqe
)
1217 for (i
= 0; cqe
->len_list
[i
]; i
++)
1218 qede_fill_frag_skb(edev
, rxq
, cqe
->tpa_agg_index
,
1219 le16_to_cpu(cqe
->len_list
[i
]));
1221 if (unlikely(i
> 1))
1223 "Strange - TPA cont with more than a single len_list entry\n");
1226 static void qede_tpa_end(struct qede_dev
*edev
,
1227 struct qede_fastpath
*fp
,
1228 struct eth_fast_path_rx_tpa_end_cqe
*cqe
)
1230 struct qede_rx_queue
*rxq
= fp
->rxq
;
1231 struct qede_agg_info
*tpa_info
;
1232 struct sk_buff
*skb
;
1235 tpa_info
= &rxq
->tpa_info
[cqe
->tpa_agg_index
];
1236 skb
= tpa_info
->skb
;
1238 for (i
= 0; cqe
->len_list
[i
]; i
++)
1239 qede_fill_frag_skb(edev
, rxq
, cqe
->tpa_agg_index
,
1240 le16_to_cpu(cqe
->len_list
[i
]));
1241 if (unlikely(i
> 1))
1243 "Strange - TPA emd with more than a single len_list entry\n");
1245 if (unlikely(tpa_info
->agg_state
!= QEDE_AGG_STATE_START
))
1249 if (unlikely(cqe
->num_of_bds
!= tpa_info
->frag_id
+ 1))
1251 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
1252 cqe
->num_of_bds
, tpa_info
->frag_id
);
1253 if (unlikely(skb
->len
!= le16_to_cpu(cqe
->total_packet_len
)))
1255 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
1256 le16_to_cpu(cqe
->total_packet_len
), skb
->len
);
1259 page_address(tpa_info
->start_buf
.data
) +
1260 tpa_info
->start_cqe
.placement_offset
+
1261 tpa_info
->start_buf
.page_offset
,
1262 le16_to_cpu(tpa_info
->start_cqe
.len_on_first_bd
));
1264 /* Recycle [mapped] start buffer for the next replacement */
1265 tpa_info
->replace_buf
= tpa_info
->start_buf
;
1266 tpa_info
->replace_buf_mapping
= tpa_info
->start_buf_mapping
;
1268 /* Finalize the SKB */
1269 skb
->protocol
= eth_type_trans(skb
, edev
->ndev
);
1270 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1272 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
1273 * to skb_shinfo(skb)->gso_segs
1275 NAPI_GRO_CB(skb
)->count
= le16_to_cpu(cqe
->num_of_coalesced_segs
);
1277 qede_gro_receive(edev
, fp
, skb
, tpa_info
->vlan_tag
);
1279 tpa_info
->agg_state
= QEDE_AGG_STATE_NONE
;
1283 /* The BD starting the aggregation is still mapped; Re-use it for
1284 * future aggregations [as replacement buffer]
1286 memcpy(&tpa_info
->replace_buf
, &tpa_info
->start_buf
,
1287 sizeof(struct sw_rx_data
));
1288 tpa_info
->replace_buf_mapping
= tpa_info
->start_buf_mapping
;
1289 tpa_info
->start_buf
.data
= NULL
;
1290 tpa_info
->agg_state
= QEDE_AGG_STATE_NONE
;
1291 dev_kfree_skb_any(tpa_info
->skb
);
1292 tpa_info
->skb
= NULL
;
1295 static bool qede_tunn_exist(u16 flag
)
1297 return !!(flag
& (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK
<<
1298 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT
));
1301 static u8
qede_check_tunn_csum(u16 flag
)
1306 if (flag
& (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK
<<
1307 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT
))
1308 csum_flag
|= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK
<<
1309 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT
;
1311 if (flag
& (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK
<<
1312 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT
)) {
1313 csum_flag
|= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK
<<
1314 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT
;
1315 tcsum
= QEDE_TUNN_CSUM_UNNECESSARY
;
1318 csum_flag
|= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK
<<
1319 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT
|
1320 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK
<<
1321 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT
;
1323 if (csum_flag
& flag
)
1324 return QEDE_CSUM_ERROR
;
1326 return QEDE_CSUM_UNNECESSARY
| tcsum
;
1329 static u8
qede_check_notunn_csum(u16 flag
)
1334 if (flag
& (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK
<<
1335 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT
)) {
1336 csum_flag
|= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK
<<
1337 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT
;
1338 csum
= QEDE_CSUM_UNNECESSARY
;
1341 csum_flag
|= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK
<<
1342 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT
;
1344 if (csum_flag
& flag
)
1345 return QEDE_CSUM_ERROR
;
1350 static u8
qede_check_csum(u16 flag
)
1352 if (!qede_tunn_exist(flag
))
1353 return qede_check_notunn_csum(flag
);
1355 return qede_check_tunn_csum(flag
);
1358 static int qede_rx_int(struct qede_fastpath
*fp
, int budget
)
1360 struct qede_dev
*edev
= fp
->edev
;
1361 struct qede_rx_queue
*rxq
= fp
->rxq
;
1363 u16 hw_comp_cons
, sw_comp_cons
, sw_rx_index
, parse_flag
;
1367 hw_comp_cons
= le16_to_cpu(*rxq
->hw_cons_ptr
);
1368 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
1370 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1371 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1372 * read before it is written by FW, then FW writes CQE and SB, and then
1373 * the CPU reads the hw_comp_cons, it will use an old CQE.
1377 /* Loop to complete all indicated BDs */
1378 while (sw_comp_cons
!= hw_comp_cons
) {
1379 struct eth_fast_path_rx_reg_cqe
*fp_cqe
;
1380 enum pkt_hash_types rxhash_type
;
1381 enum eth_rx_cqe_type cqe_type
;
1382 struct sw_rx_data
*sw_rx_data
;
1383 union eth_rx_cqe
*cqe
;
1384 struct sk_buff
*skb
;
1390 /* Get the CQE from the completion ring */
1391 cqe
= (union eth_rx_cqe
*)
1392 qed_chain_consume(&rxq
->rx_comp_ring
);
1393 cqe_type
= cqe
->fast_path_regular
.type
;
1395 if (unlikely(cqe_type
== ETH_RX_CQE_TYPE_SLOW_PATH
)) {
1396 edev
->ops
->eth_cqe_completion(
1397 edev
->cdev
, fp
->rss_id
,
1398 (struct eth_slow_path_rx_cqe
*)cqe
);
1402 if (cqe_type
!= ETH_RX_CQE_TYPE_REGULAR
) {
1404 case ETH_RX_CQE_TYPE_TPA_START
:
1405 qede_tpa_start(edev
, rxq
,
1406 &cqe
->fast_path_tpa_start
);
1408 case ETH_RX_CQE_TYPE_TPA_CONT
:
1409 qede_tpa_cont(edev
, rxq
,
1410 &cqe
->fast_path_tpa_cont
);
1412 case ETH_RX_CQE_TYPE_TPA_END
:
1413 qede_tpa_end(edev
, fp
,
1414 &cqe
->fast_path_tpa_end
);
1421 /* Get the data from the SW ring */
1422 sw_rx_index
= rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
;
1423 sw_rx_data
= &rxq
->sw_rx_ring
[sw_rx_index
];
1424 data
= sw_rx_data
->data
;
1426 fp_cqe
= &cqe
->fast_path_regular
;
1427 len
= le16_to_cpu(fp_cqe
->len_on_first_bd
);
1428 pad
= fp_cqe
->placement_offset
;
1429 flags
= cqe
->fast_path_regular
.pars_flags
.flags
;
1431 /* If this is an error packet then drop it */
1432 parse_flag
= le16_to_cpu(flags
);
1434 csum_flag
= qede_check_csum(parse_flag
);
1435 if (unlikely(csum_flag
== QEDE_CSUM_ERROR
)) {
1437 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
1438 sw_comp_cons
, parse_flag
);
1439 rxq
->rx_hw_errors
++;
1440 qede_recycle_rx_bd_ring(rxq
, edev
, fp_cqe
->bd_num
);
1444 skb
= netdev_alloc_skb(edev
->ndev
, QEDE_RX_HDR_SIZE
);
1445 if (unlikely(!skb
)) {
1447 "Build_skb failed, dropping incoming packet\n");
1448 qede_recycle_rx_bd_ring(rxq
, edev
, fp_cqe
->bd_num
);
1449 rxq
->rx_alloc_errors
++;
1453 /* Copy data into SKB */
1454 if (len
+ pad
<= QEDE_RX_HDR_SIZE
) {
1455 memcpy(skb_put(skb
, len
),
1456 page_address(data
) + pad
+
1457 sw_rx_data
->page_offset
, len
);
1458 qede_reuse_page(edev
, rxq
, sw_rx_data
);
1460 struct skb_frag_struct
*frag
;
1461 unsigned int pull_len
;
1464 frag
= &skb_shinfo(skb
)->frags
[0];
1466 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, data
,
1467 pad
+ sw_rx_data
->page_offset
,
1468 len
, rxq
->rx_buf_seg_size
);
1470 va
= skb_frag_address(frag
);
1471 pull_len
= eth_get_headlen(va
, QEDE_RX_HDR_SIZE
);
1473 /* Align the pull_len to optimize memcpy */
1474 memcpy(skb
->data
, va
, ALIGN(pull_len
, sizeof(long)));
1476 skb_frag_size_sub(frag
, pull_len
);
1477 frag
->page_offset
+= pull_len
;
1478 skb
->data_len
-= pull_len
;
1479 skb
->tail
+= pull_len
;
1481 if (unlikely(qede_realloc_rx_buffer(edev
, rxq
,
1483 DP_ERR(edev
, "Failed to allocate rx buffer\n");
1484 /* Incr page ref count to reuse on allocation
1485 * failure so that it doesn't get freed while
1489 page_ref_inc(sw_rx_data
->data
);
1490 rxq
->rx_alloc_errors
++;
1491 qede_recycle_rx_bd_ring(rxq
, edev
,
1493 dev_kfree_skb_any(skb
);
1498 qede_rx_bd_ring_consume(rxq
);
1500 if (fp_cqe
->bd_num
!= 1) {
1501 u16 pkt_len
= le16_to_cpu(fp_cqe
->pkt_len
);
1506 for (num_frags
= fp_cqe
->bd_num
- 1; num_frags
> 0;
1508 u16 cur_size
= pkt_len
> rxq
->rx_buf_size
?
1509 rxq
->rx_buf_size
: pkt_len
;
1510 if (unlikely(!cur_size
)) {
1512 "Still got %d BDs for mapping jumbo, but length became 0\n",
1514 qede_recycle_rx_bd_ring(rxq
, edev
,
1516 dev_kfree_skb_any(skb
);
1520 if (unlikely(qede_alloc_rx_buffer(edev
, rxq
))) {
1521 qede_recycle_rx_bd_ring(rxq
, edev
,
1523 dev_kfree_skb_any(skb
);
1527 sw_rx_index
= rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
;
1528 sw_rx_data
= &rxq
->sw_rx_ring
[sw_rx_index
];
1529 qede_rx_bd_ring_consume(rxq
);
1531 dma_unmap_page(&edev
->pdev
->dev
,
1532 sw_rx_data
->mapping
,
1533 PAGE_SIZE
, DMA_FROM_DEVICE
);
1535 skb_fill_page_desc(skb
,
1536 skb_shinfo(skb
)->nr_frags
++,
1537 sw_rx_data
->data
, 0,
1540 skb
->truesize
+= PAGE_SIZE
;
1541 skb
->data_len
+= cur_size
;
1542 skb
->len
+= cur_size
;
1543 pkt_len
-= cur_size
;
1546 if (unlikely(pkt_len
))
1548 "Mapped all BDs of jumbo, but still have %d bytes\n",
1552 skb
->protocol
= eth_type_trans(skb
, edev
->ndev
);
1554 rx_hash
= qede_get_rxhash(edev
, fp_cqe
->bitfields
,
1558 skb_set_hash(skb
, rx_hash
, rxhash_type
);
1560 qede_set_skb_csum(skb
, csum_flag
);
1562 skb_record_rx_queue(skb
, fp
->rss_id
);
1564 qede_skb_receive(edev
, fp
, skb
, le16_to_cpu(fp_cqe
->vlan_tag
));
1568 next_cqe
: /* don't consume bd rx buffer */
1569 qed_chain_recycle_consumed(&rxq
->rx_comp_ring
);
1570 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
1571 /* CR TPA - revisit how to handle budget in TPA perhaps
1574 if (rx_pkt
== budget
)
1576 } /* repeat while sw_comp_cons != hw_comp_cons... */
1578 /* Update producers */
1579 qede_update_rx_prod(edev
, rxq
);
1584 static int qede_poll(struct napi_struct
*napi
, int budget
)
1587 struct qede_fastpath
*fp
= container_of(napi
, struct qede_fastpath
,
1589 struct qede_dev
*edev
= fp
->edev
;
1594 for (tc
= 0; tc
< edev
->num_tc
; tc
++)
1595 if (qede_txq_has_work(&fp
->txqs
[tc
]))
1596 qede_tx_int(edev
, &fp
->txqs
[tc
]);
1598 if (qede_has_rx_work(fp
->rxq
)) {
1599 work_done
+= qede_rx_int(fp
, budget
- work_done
);
1601 /* must not complete if we consumed full budget */
1602 if (work_done
>= budget
)
1606 /* Fall out from the NAPI loop if needed */
1607 if (!(qede_has_rx_work(fp
->rxq
) || qede_has_tx_work(fp
))) {
1608 qed_sb_update_sb_idx(fp
->sb_info
);
1609 /* *_has_*_work() reads the status block,
1610 * thus we need to ensure that status block indices
1611 * have been actually read (qed_sb_update_sb_idx)
1612 * prior to this check (*_has_*_work) so that
1613 * we won't write the "newer" value of the status block
1614 * to HW (if there was a DMA right after
1615 * qede_has_rx_work and if there is no rmb, the memory
1616 * reading (qed_sb_update_sb_idx) may be postponed
1617 * to right before *_ack_sb). In this case there
1618 * will never be another interrupt until there is
1619 * another update of the status block, while there
1620 * is still unhandled work.
1624 if (!(qede_has_rx_work(fp
->rxq
) ||
1625 qede_has_tx_work(fp
))) {
1626 napi_complete(napi
);
1627 /* Update and reenable interrupts */
1628 qed_sb_ack(fp
->sb_info
, IGU_INT_ENABLE
,
1638 static irqreturn_t
qede_msix_fp_int(int irq
, void *fp_cookie
)
1640 struct qede_fastpath
*fp
= fp_cookie
;
1642 qed_sb_ack(fp
->sb_info
, IGU_INT_DISABLE
, 0 /*do not update*/);
1644 napi_schedule_irqoff(&fp
->napi
);
1648 /* -------------------------------------------------------------------------
1650 * -------------------------------------------------------------------------
1653 static int qede_open(struct net_device
*ndev
);
1654 static int qede_close(struct net_device
*ndev
);
1655 static int qede_set_mac_addr(struct net_device
*ndev
, void *p
);
1656 static void qede_set_rx_mode(struct net_device
*ndev
);
1657 static void qede_config_rx_mode(struct net_device
*ndev
);
1659 static int qede_set_ucast_rx_mac(struct qede_dev
*edev
,
1660 enum qed_filter_xcast_params_type opcode
,
1661 unsigned char mac
[ETH_ALEN
])
1663 struct qed_filter_params filter_cmd
;
1665 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
1666 filter_cmd
.type
= QED_FILTER_TYPE_UCAST
;
1667 filter_cmd
.filter
.ucast
.type
= opcode
;
1668 filter_cmd
.filter
.ucast
.mac_valid
= 1;
1669 ether_addr_copy(filter_cmd
.filter
.ucast
.mac
, mac
);
1671 return edev
->ops
->filter_config(edev
->cdev
, &filter_cmd
);
1674 static int qede_set_ucast_rx_vlan(struct qede_dev
*edev
,
1675 enum qed_filter_xcast_params_type opcode
,
1678 struct qed_filter_params filter_cmd
;
1680 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
1681 filter_cmd
.type
= QED_FILTER_TYPE_UCAST
;
1682 filter_cmd
.filter
.ucast
.type
= opcode
;
1683 filter_cmd
.filter
.ucast
.vlan_valid
= 1;
1684 filter_cmd
.filter
.ucast
.vlan
= vid
;
1686 return edev
->ops
->filter_config(edev
->cdev
, &filter_cmd
);
1689 void qede_fill_by_demand_stats(struct qede_dev
*edev
)
1691 struct qed_eth_stats stats
;
1693 edev
->ops
->get_vport_stats(edev
->cdev
, &stats
);
1694 edev
->stats
.no_buff_discards
= stats
.no_buff_discards
;
1695 edev
->stats
.rx_ucast_bytes
= stats
.rx_ucast_bytes
;
1696 edev
->stats
.rx_mcast_bytes
= stats
.rx_mcast_bytes
;
1697 edev
->stats
.rx_bcast_bytes
= stats
.rx_bcast_bytes
;
1698 edev
->stats
.rx_ucast_pkts
= stats
.rx_ucast_pkts
;
1699 edev
->stats
.rx_mcast_pkts
= stats
.rx_mcast_pkts
;
1700 edev
->stats
.rx_bcast_pkts
= stats
.rx_bcast_pkts
;
1701 edev
->stats
.mftag_filter_discards
= stats
.mftag_filter_discards
;
1702 edev
->stats
.mac_filter_discards
= stats
.mac_filter_discards
;
1704 edev
->stats
.tx_ucast_bytes
= stats
.tx_ucast_bytes
;
1705 edev
->stats
.tx_mcast_bytes
= stats
.tx_mcast_bytes
;
1706 edev
->stats
.tx_bcast_bytes
= stats
.tx_bcast_bytes
;
1707 edev
->stats
.tx_ucast_pkts
= stats
.tx_ucast_pkts
;
1708 edev
->stats
.tx_mcast_pkts
= stats
.tx_mcast_pkts
;
1709 edev
->stats
.tx_bcast_pkts
= stats
.tx_bcast_pkts
;
1710 edev
->stats
.tx_err_drop_pkts
= stats
.tx_err_drop_pkts
;
1711 edev
->stats
.coalesced_pkts
= stats
.tpa_coalesced_pkts
;
1712 edev
->stats
.coalesced_events
= stats
.tpa_coalesced_events
;
1713 edev
->stats
.coalesced_aborts_num
= stats
.tpa_aborts_num
;
1714 edev
->stats
.non_coalesced_pkts
= stats
.tpa_not_coalesced_pkts
;
1715 edev
->stats
.coalesced_bytes
= stats
.tpa_coalesced_bytes
;
1717 edev
->stats
.rx_64_byte_packets
= stats
.rx_64_byte_packets
;
1718 edev
->stats
.rx_65_to_127_byte_packets
= stats
.rx_65_to_127_byte_packets
;
1719 edev
->stats
.rx_128_to_255_byte_packets
=
1720 stats
.rx_128_to_255_byte_packets
;
1721 edev
->stats
.rx_256_to_511_byte_packets
=
1722 stats
.rx_256_to_511_byte_packets
;
1723 edev
->stats
.rx_512_to_1023_byte_packets
=
1724 stats
.rx_512_to_1023_byte_packets
;
1725 edev
->stats
.rx_1024_to_1518_byte_packets
=
1726 stats
.rx_1024_to_1518_byte_packets
;
1727 edev
->stats
.rx_1519_to_1522_byte_packets
=
1728 stats
.rx_1519_to_1522_byte_packets
;
1729 edev
->stats
.rx_1519_to_2047_byte_packets
=
1730 stats
.rx_1519_to_2047_byte_packets
;
1731 edev
->stats
.rx_2048_to_4095_byte_packets
=
1732 stats
.rx_2048_to_4095_byte_packets
;
1733 edev
->stats
.rx_4096_to_9216_byte_packets
=
1734 stats
.rx_4096_to_9216_byte_packets
;
1735 edev
->stats
.rx_9217_to_16383_byte_packets
=
1736 stats
.rx_9217_to_16383_byte_packets
;
1737 edev
->stats
.rx_crc_errors
= stats
.rx_crc_errors
;
1738 edev
->stats
.rx_mac_crtl_frames
= stats
.rx_mac_crtl_frames
;
1739 edev
->stats
.rx_pause_frames
= stats
.rx_pause_frames
;
1740 edev
->stats
.rx_pfc_frames
= stats
.rx_pfc_frames
;
1741 edev
->stats
.rx_align_errors
= stats
.rx_align_errors
;
1742 edev
->stats
.rx_carrier_errors
= stats
.rx_carrier_errors
;
1743 edev
->stats
.rx_oversize_packets
= stats
.rx_oversize_packets
;
1744 edev
->stats
.rx_jabbers
= stats
.rx_jabbers
;
1745 edev
->stats
.rx_undersize_packets
= stats
.rx_undersize_packets
;
1746 edev
->stats
.rx_fragments
= stats
.rx_fragments
;
1747 edev
->stats
.tx_64_byte_packets
= stats
.tx_64_byte_packets
;
1748 edev
->stats
.tx_65_to_127_byte_packets
= stats
.tx_65_to_127_byte_packets
;
1749 edev
->stats
.tx_128_to_255_byte_packets
=
1750 stats
.tx_128_to_255_byte_packets
;
1751 edev
->stats
.tx_256_to_511_byte_packets
=
1752 stats
.tx_256_to_511_byte_packets
;
1753 edev
->stats
.tx_512_to_1023_byte_packets
=
1754 stats
.tx_512_to_1023_byte_packets
;
1755 edev
->stats
.tx_1024_to_1518_byte_packets
=
1756 stats
.tx_1024_to_1518_byte_packets
;
1757 edev
->stats
.tx_1519_to_2047_byte_packets
=
1758 stats
.tx_1519_to_2047_byte_packets
;
1759 edev
->stats
.tx_2048_to_4095_byte_packets
=
1760 stats
.tx_2048_to_4095_byte_packets
;
1761 edev
->stats
.tx_4096_to_9216_byte_packets
=
1762 stats
.tx_4096_to_9216_byte_packets
;
1763 edev
->stats
.tx_9217_to_16383_byte_packets
=
1764 stats
.tx_9217_to_16383_byte_packets
;
1765 edev
->stats
.tx_pause_frames
= stats
.tx_pause_frames
;
1766 edev
->stats
.tx_pfc_frames
= stats
.tx_pfc_frames
;
1767 edev
->stats
.tx_lpi_entry_count
= stats
.tx_lpi_entry_count
;
1768 edev
->stats
.tx_total_collisions
= stats
.tx_total_collisions
;
1769 edev
->stats
.brb_truncates
= stats
.brb_truncates
;
1770 edev
->stats
.brb_discards
= stats
.brb_discards
;
1771 edev
->stats
.tx_mac_ctrl_frames
= stats
.tx_mac_ctrl_frames
;
1774 static struct rtnl_link_stats64
*qede_get_stats64(
1775 struct net_device
*dev
,
1776 struct rtnl_link_stats64
*stats
)
1778 struct qede_dev
*edev
= netdev_priv(dev
);
1780 qede_fill_by_demand_stats(edev
);
1782 stats
->rx_packets
= edev
->stats
.rx_ucast_pkts
+
1783 edev
->stats
.rx_mcast_pkts
+
1784 edev
->stats
.rx_bcast_pkts
;
1785 stats
->tx_packets
= edev
->stats
.tx_ucast_pkts
+
1786 edev
->stats
.tx_mcast_pkts
+
1787 edev
->stats
.tx_bcast_pkts
;
1789 stats
->rx_bytes
= edev
->stats
.rx_ucast_bytes
+
1790 edev
->stats
.rx_mcast_bytes
+
1791 edev
->stats
.rx_bcast_bytes
;
1793 stats
->tx_bytes
= edev
->stats
.tx_ucast_bytes
+
1794 edev
->stats
.tx_mcast_bytes
+
1795 edev
->stats
.tx_bcast_bytes
;
1797 stats
->tx_errors
= edev
->stats
.tx_err_drop_pkts
;
1798 stats
->multicast
= edev
->stats
.rx_mcast_pkts
+
1799 edev
->stats
.rx_bcast_pkts
;
1801 stats
->rx_fifo_errors
= edev
->stats
.no_buff_discards
;
1803 stats
->collisions
= edev
->stats
.tx_total_collisions
;
1804 stats
->rx_crc_errors
= edev
->stats
.rx_crc_errors
;
1805 stats
->rx_frame_errors
= edev
->stats
.rx_align_errors
;
1810 #ifdef CONFIG_QED_SRIOV
1811 static int qede_get_vf_config(struct net_device
*dev
, int vfidx
,
1812 struct ifla_vf_info
*ivi
)
1814 struct qede_dev
*edev
= netdev_priv(dev
);
1819 return edev
->ops
->iov
->get_config(edev
->cdev
, vfidx
, ivi
);
1822 static int qede_set_vf_rate(struct net_device
*dev
, int vfidx
,
1823 int min_tx_rate
, int max_tx_rate
)
1825 struct qede_dev
*edev
= netdev_priv(dev
);
1827 return edev
->ops
->iov
->set_rate(edev
->cdev
, vfidx
, max_tx_rate
,
1831 static int qede_set_vf_spoofchk(struct net_device
*dev
, int vfidx
, bool val
)
1833 struct qede_dev
*edev
= netdev_priv(dev
);
1838 return edev
->ops
->iov
->set_spoof(edev
->cdev
, vfidx
, val
);
1841 static int qede_set_vf_link_state(struct net_device
*dev
, int vfidx
,
1844 struct qede_dev
*edev
= netdev_priv(dev
);
1849 return edev
->ops
->iov
->set_link_state(edev
->cdev
, vfidx
, link_state
);
1853 static void qede_config_accept_any_vlan(struct qede_dev
*edev
, bool action
)
1855 struct qed_update_vport_params params
;
1858 /* Proceed only if action actually needs to be performed */
1859 if (edev
->accept_any_vlan
== action
)
1862 memset(¶ms
, 0, sizeof(params
));
1864 params
.vport_id
= 0;
1865 params
.accept_any_vlan
= action
;
1866 params
.update_accept_any_vlan_flg
= 1;
1868 rc
= edev
->ops
->vport_update(edev
->cdev
, ¶ms
);
1870 DP_ERR(edev
, "Failed to %s accept-any-vlan\n",
1871 action
? "enable" : "disable");
1873 DP_INFO(edev
, "%s accept-any-vlan\n",
1874 action
? "enabled" : "disabled");
1875 edev
->accept_any_vlan
= action
;
1879 static int qede_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1881 struct qede_dev
*edev
= netdev_priv(dev
);
1882 struct qede_vlan
*vlan
, *tmp
;
1885 DP_VERBOSE(edev
, NETIF_MSG_IFUP
, "Adding vlan 0x%04x\n", vid
);
1887 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
1889 DP_INFO(edev
, "Failed to allocate struct for vlan\n");
1892 INIT_LIST_HEAD(&vlan
->list
);
1894 vlan
->configured
= false;
1896 /* Verify vlan isn't already configured */
1897 list_for_each_entry(tmp
, &edev
->vlan_list
, list
) {
1898 if (tmp
->vid
== vlan
->vid
) {
1899 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
1900 "vlan already configured\n");
1906 /* If interface is down, cache this VLAN ID and return */
1907 if (edev
->state
!= QEDE_STATE_OPEN
) {
1908 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
,
1909 "Interface is down, VLAN %d will be configured when interface is up\n",
1912 edev
->non_configured_vlans
++;
1913 list_add(&vlan
->list
, &edev
->vlan_list
);
1918 /* Check for the filter limit.
1919 * Note - vlan0 has a reserved filter and can be added without
1920 * worrying about quota
1922 if ((edev
->configured_vlans
< edev
->dev_info
.num_vlan_filters
) ||
1924 rc
= qede_set_ucast_rx_vlan(edev
,
1925 QED_FILTER_XCAST_TYPE_ADD
,
1928 DP_ERR(edev
, "Failed to configure VLAN %d\n",
1933 vlan
->configured
= true;
1935 /* vlan0 filter isn't consuming out of our quota */
1937 edev
->configured_vlans
++;
1939 /* Out of quota; Activate accept-any-VLAN mode */
1940 if (!edev
->non_configured_vlans
)
1941 qede_config_accept_any_vlan(edev
, true);
1943 edev
->non_configured_vlans
++;
1946 list_add(&vlan
->list
, &edev
->vlan_list
);
1951 static void qede_del_vlan_from_list(struct qede_dev
*edev
,
1952 struct qede_vlan
*vlan
)
1954 /* vlan0 filter isn't consuming out of our quota */
1955 if (vlan
->vid
!= 0) {
1956 if (vlan
->configured
)
1957 edev
->configured_vlans
--;
1959 edev
->non_configured_vlans
--;
1962 list_del(&vlan
->list
);
1966 static int qede_configure_vlan_filters(struct qede_dev
*edev
)
1968 int rc
= 0, real_rc
= 0, accept_any_vlan
= 0;
1969 struct qed_dev_eth_info
*dev_info
;
1970 struct qede_vlan
*vlan
= NULL
;
1972 if (list_empty(&edev
->vlan_list
))
1975 dev_info
= &edev
->dev_info
;
1977 /* Configure non-configured vlans */
1978 list_for_each_entry(vlan
, &edev
->vlan_list
, list
) {
1979 if (vlan
->configured
)
1982 /* We have used all our credits, now enable accept_any_vlan */
1983 if ((vlan
->vid
!= 0) &&
1984 (edev
->configured_vlans
== dev_info
->num_vlan_filters
)) {
1985 accept_any_vlan
= 1;
1989 DP_VERBOSE(edev
, NETIF_MSG_IFUP
, "Adding vlan %d\n", vlan
->vid
);
1991 rc
= qede_set_ucast_rx_vlan(edev
, QED_FILTER_XCAST_TYPE_ADD
,
1994 DP_ERR(edev
, "Failed to configure VLAN %u\n",
2000 vlan
->configured
= true;
2001 /* vlan0 filter doesn't consume our VLAN filter's quota */
2002 if (vlan
->vid
!= 0) {
2003 edev
->non_configured_vlans
--;
2004 edev
->configured_vlans
++;
2008 /* enable accept_any_vlan mode if we have more VLANs than credits,
2009 * or remove accept_any_vlan mode if we've actually removed
2010 * a non-configured vlan, and all remaining vlans are truly configured.
2013 if (accept_any_vlan
)
2014 qede_config_accept_any_vlan(edev
, true);
2015 else if (!edev
->non_configured_vlans
)
2016 qede_config_accept_any_vlan(edev
, false);
2021 static int qede_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
2023 struct qede_dev
*edev
= netdev_priv(dev
);
2024 struct qede_vlan
*vlan
= NULL
;
2027 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
, "Removing vlan 0x%04x\n", vid
);
2029 /* Find whether entry exists */
2030 list_for_each_entry(vlan
, &edev
->vlan_list
, list
)
2031 if (vlan
->vid
== vid
)
2034 if (!vlan
|| (vlan
->vid
!= vid
)) {
2035 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
2036 "Vlan isn't configured\n");
2040 if (edev
->state
!= QEDE_STATE_OPEN
) {
2041 /* As interface is already down, we don't have a VPORT
2042 * instance to remove vlan filter. So just update vlan list
2044 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
,
2045 "Interface is down, removing VLAN from list only\n");
2046 qede_del_vlan_from_list(edev
, vlan
);
2051 rc
= qede_set_ucast_rx_vlan(edev
, QED_FILTER_XCAST_TYPE_DEL
, vid
);
2053 DP_ERR(edev
, "Failed to remove VLAN %d\n", vid
);
2057 qede_del_vlan_from_list(edev
, vlan
);
2059 /* We have removed a VLAN - try to see if we can
2060 * configure non-configured VLAN from the list.
2062 rc
= qede_configure_vlan_filters(edev
);
2067 static void qede_vlan_mark_nonconfigured(struct qede_dev
*edev
)
2069 struct qede_vlan
*vlan
= NULL
;
2071 if (list_empty(&edev
->vlan_list
))
2074 list_for_each_entry(vlan
, &edev
->vlan_list
, list
) {
2075 if (!vlan
->configured
)
2078 vlan
->configured
= false;
2080 /* vlan0 filter isn't consuming out of our quota */
2081 if (vlan
->vid
!= 0) {
2082 edev
->non_configured_vlans
++;
2083 edev
->configured_vlans
--;
2086 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
,
2087 "marked vlan %d as non-configured\n",
2091 edev
->accept_any_vlan
= false;
2094 #ifdef CONFIG_QEDE_VXLAN
2095 static void qede_add_vxlan_port(struct net_device
*dev
,
2096 sa_family_t sa_family
, __be16 port
)
2098 struct qede_dev
*edev
= netdev_priv(dev
);
2099 u16 t_port
= ntohs(port
);
2101 if (edev
->vxlan_dst_port
)
2104 edev
->vxlan_dst_port
= t_port
;
2106 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "Added vxlan port=%d", t_port
);
2108 set_bit(QEDE_SP_VXLAN_PORT_CONFIG
, &edev
->sp_flags
);
2109 schedule_delayed_work(&edev
->sp_task
, 0);
2112 static void qede_del_vxlan_port(struct net_device
*dev
,
2113 sa_family_t sa_family
, __be16 port
)
2115 struct qede_dev
*edev
= netdev_priv(dev
);
2116 u16 t_port
= ntohs(port
);
2118 if (t_port
!= edev
->vxlan_dst_port
)
2121 edev
->vxlan_dst_port
= 0;
2123 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "Deleted vxlan port=%d", t_port
);
2125 set_bit(QEDE_SP_VXLAN_PORT_CONFIG
, &edev
->sp_flags
);
2126 schedule_delayed_work(&edev
->sp_task
, 0);
2130 #ifdef CONFIG_QEDE_GENEVE
2131 static void qede_add_geneve_port(struct net_device
*dev
,
2132 sa_family_t sa_family
, __be16 port
)
2134 struct qede_dev
*edev
= netdev_priv(dev
);
2135 u16 t_port
= ntohs(port
);
2137 if (edev
->geneve_dst_port
)
2140 edev
->geneve_dst_port
= t_port
;
2142 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "Added geneve port=%d", t_port
);
2143 set_bit(QEDE_SP_GENEVE_PORT_CONFIG
, &edev
->sp_flags
);
2144 schedule_delayed_work(&edev
->sp_task
, 0);
2147 static void qede_del_geneve_port(struct net_device
*dev
,
2148 sa_family_t sa_family
, __be16 port
)
2150 struct qede_dev
*edev
= netdev_priv(dev
);
2151 u16 t_port
= ntohs(port
);
2153 if (t_port
!= edev
->geneve_dst_port
)
2156 edev
->geneve_dst_port
= 0;
2158 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "Deleted geneve port=%d", t_port
);
2159 set_bit(QEDE_SP_GENEVE_PORT_CONFIG
, &edev
->sp_flags
);
2160 schedule_delayed_work(&edev
->sp_task
, 0);
2164 static const struct net_device_ops qede_netdev_ops
= {
2165 .ndo_open
= qede_open
,
2166 .ndo_stop
= qede_close
,
2167 .ndo_start_xmit
= qede_start_xmit
,
2168 .ndo_set_rx_mode
= qede_set_rx_mode
,
2169 .ndo_set_mac_address
= qede_set_mac_addr
,
2170 .ndo_validate_addr
= eth_validate_addr
,
2171 .ndo_change_mtu
= qede_change_mtu
,
2172 #ifdef CONFIG_QED_SRIOV
2173 .ndo_set_vf_mac
= qede_set_vf_mac
,
2174 .ndo_set_vf_vlan
= qede_set_vf_vlan
,
2176 .ndo_vlan_rx_add_vid
= qede_vlan_rx_add_vid
,
2177 .ndo_vlan_rx_kill_vid
= qede_vlan_rx_kill_vid
,
2178 .ndo_get_stats64
= qede_get_stats64
,
2179 #ifdef CONFIG_QED_SRIOV
2180 .ndo_set_vf_link_state
= qede_set_vf_link_state
,
2181 .ndo_set_vf_spoofchk
= qede_set_vf_spoofchk
,
2182 .ndo_get_vf_config
= qede_get_vf_config
,
2183 .ndo_set_vf_rate
= qede_set_vf_rate
,
2185 #ifdef CONFIG_QEDE_VXLAN
2186 .ndo_add_vxlan_port
= qede_add_vxlan_port
,
2187 .ndo_del_vxlan_port
= qede_del_vxlan_port
,
2189 #ifdef CONFIG_QEDE_GENEVE
2190 .ndo_add_geneve_port
= qede_add_geneve_port
,
2191 .ndo_del_geneve_port
= qede_del_geneve_port
,
2195 /* -------------------------------------------------------------------------
2196 * START OF PROBE / REMOVE
2197 * -------------------------------------------------------------------------
2200 static struct qede_dev
*qede_alloc_etherdev(struct qed_dev
*cdev
,
2201 struct pci_dev
*pdev
,
2202 struct qed_dev_eth_info
*info
,
2206 struct net_device
*ndev
;
2207 struct qede_dev
*edev
;
2209 ndev
= alloc_etherdev_mqs(sizeof(*edev
),
2213 pr_err("etherdev allocation failed\n");
2217 edev
= netdev_priv(ndev
);
2221 edev
->dp_module
= dp_module
;
2222 edev
->dp_level
= dp_level
;
2223 edev
->ops
= qed_ops
;
2224 edev
->q_num_rx_buffers
= NUM_RX_BDS_DEF
;
2225 edev
->q_num_tx_buffers
= NUM_TX_BDS_DEF
;
2227 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
2229 memset(&edev
->stats
, 0, sizeof(edev
->stats
));
2230 memcpy(&edev
->dev_info
, info
, sizeof(*info
));
2232 edev
->num_tc
= edev
->dev_info
.num_tc
;
2234 INIT_LIST_HEAD(&edev
->vlan_list
);
2239 static void qede_init_ndev(struct qede_dev
*edev
)
2241 struct net_device
*ndev
= edev
->ndev
;
2242 struct pci_dev
*pdev
= edev
->pdev
;
2245 pci_set_drvdata(pdev
, ndev
);
2247 ndev
->mem_start
= edev
->dev_info
.common
.pci_mem_start
;
2248 ndev
->base_addr
= ndev
->mem_start
;
2249 ndev
->mem_end
= edev
->dev_info
.common
.pci_mem_end
;
2250 ndev
->irq
= edev
->dev_info
.common
.pci_irq
;
2252 ndev
->watchdog_timeo
= TX_TIMEOUT
;
2254 ndev
->netdev_ops
= &qede_netdev_ops
;
2256 qede_set_ethtool_ops(ndev
);
2258 /* user-changeble features */
2259 hw_features
= NETIF_F_GRO
| NETIF_F_SG
|
2260 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2261 NETIF_F_TSO
| NETIF_F_TSO6
;
2264 hw_features
|= NETIF_F_GSO_GRE
| NETIF_F_GSO_UDP_TUNNEL
|
2266 ndev
->hw_enc_features
= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2267 NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO_ECN
|
2268 NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
2269 NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_RXCSUM
;
2271 ndev
->vlan_features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
2273 ndev
->features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
2274 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HIGHDMA
|
2275 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_VLAN_CTAG_TX
;
2277 ndev
->hw_features
= hw_features
;
2279 /* Set network device HW mac */
2280 ether_addr_copy(edev
->ndev
->dev_addr
, edev
->dev_info
.common
.hw_mac
);
2283 /* This function converts from 32b param to two params of level and module
2284 * Input 32b decoding:
2285 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
2286 * 'happy' flow, e.g. memory allocation failed.
2287 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
2288 * and provide important parameters.
2289 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
2290 * module. VERBOSE prints are for tracking the specific flow in low level.
2292 * Notice that the level should be that of the lowest required logs.
2294 void qede_config_debug(uint debug
, u32
*p_dp_module
, u8
*p_dp_level
)
2296 *p_dp_level
= QED_LEVEL_NOTICE
;
2299 if (debug
& QED_LOG_VERBOSE_MASK
) {
2300 *p_dp_level
= QED_LEVEL_VERBOSE
;
2301 *p_dp_module
= (debug
& 0x3FFFFFFF);
2302 } else if (debug
& QED_LOG_INFO_MASK
) {
2303 *p_dp_level
= QED_LEVEL_INFO
;
2304 } else if (debug
& QED_LOG_NOTICE_MASK
) {
2305 *p_dp_level
= QED_LEVEL_NOTICE
;
2309 static void qede_free_fp_array(struct qede_dev
*edev
)
2311 if (edev
->fp_array
) {
2312 struct qede_fastpath
*fp
;
2316 fp
= &edev
->fp_array
[i
];
2322 kfree(edev
->fp_array
);
2327 static int qede_alloc_fp_array(struct qede_dev
*edev
)
2329 struct qede_fastpath
*fp
;
2332 edev
->fp_array
= kcalloc(QEDE_RSS_CNT(edev
),
2333 sizeof(*edev
->fp_array
), GFP_KERNEL
);
2334 if (!edev
->fp_array
) {
2335 DP_NOTICE(edev
, "fp array allocation failed\n");
2340 fp
= &edev
->fp_array
[i
];
2342 fp
->sb_info
= kcalloc(1, sizeof(*fp
->sb_info
), GFP_KERNEL
);
2344 DP_NOTICE(edev
, "sb info struct allocation failed\n");
2348 fp
->rxq
= kcalloc(1, sizeof(*fp
->rxq
), GFP_KERNEL
);
2350 DP_NOTICE(edev
, "RXQ struct allocation failed\n");
2354 fp
->txqs
= kcalloc(edev
->num_tc
, sizeof(*fp
->txqs
), GFP_KERNEL
);
2356 DP_NOTICE(edev
, "TXQ array allocation failed\n");
2363 qede_free_fp_array(edev
);
2367 static void qede_sp_task(struct work_struct
*work
)
2369 struct qede_dev
*edev
= container_of(work
, struct qede_dev
,
2371 struct qed_dev
*cdev
= edev
->cdev
;
2373 mutex_lock(&edev
->qede_lock
);
2375 if (edev
->state
== QEDE_STATE_OPEN
) {
2376 if (test_and_clear_bit(QEDE_SP_RX_MODE
, &edev
->sp_flags
))
2377 qede_config_rx_mode(edev
->ndev
);
2380 if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG
, &edev
->sp_flags
)) {
2381 struct qed_tunn_params tunn_params
;
2383 memset(&tunn_params
, 0, sizeof(tunn_params
));
2384 tunn_params
.update_vxlan_port
= 1;
2385 tunn_params
.vxlan_port
= edev
->vxlan_dst_port
;
2386 qed_ops
->tunn_config(cdev
, &tunn_params
);
2389 if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG
, &edev
->sp_flags
)) {
2390 struct qed_tunn_params tunn_params
;
2392 memset(&tunn_params
, 0, sizeof(tunn_params
));
2393 tunn_params
.update_geneve_port
= 1;
2394 tunn_params
.geneve_port
= edev
->geneve_dst_port
;
2395 qed_ops
->tunn_config(cdev
, &tunn_params
);
2398 mutex_unlock(&edev
->qede_lock
);
2401 static void qede_update_pf_params(struct qed_dev
*cdev
)
2403 struct qed_pf_params pf_params
;
2406 memset(&pf_params
, 0, sizeof(struct qed_pf_params
));
2407 pf_params
.eth_pf_params
.num_cons
= 128;
2408 qed_ops
->common
->update_pf_params(cdev
, &pf_params
);
2411 enum qede_probe_mode
{
2415 static int __qede_probe(struct pci_dev
*pdev
, u32 dp_module
, u8 dp_level
,
2416 bool is_vf
, enum qede_probe_mode mode
)
2418 struct qed_probe_params probe_params
;
2419 struct qed_slowpath_params params
;
2420 struct qed_dev_eth_info dev_info
;
2421 struct qede_dev
*edev
;
2422 struct qed_dev
*cdev
;
2425 if (unlikely(dp_level
& QED_LEVEL_INFO
))
2426 pr_notice("Starting qede probe\n");
2428 memset(&probe_params
, 0, sizeof(probe_params
));
2429 probe_params
.protocol
= QED_PROTOCOL_ETH
;
2430 probe_params
.dp_module
= dp_module
;
2431 probe_params
.dp_level
= dp_level
;
2432 probe_params
.is_vf
= is_vf
;
2433 cdev
= qed_ops
->common
->probe(pdev
, &probe_params
);
2439 qede_update_pf_params(cdev
);
2441 /* Start the Slowpath-process */
2442 memset(¶ms
, 0, sizeof(struct qed_slowpath_params
));
2443 params
.int_mode
= QED_INT_MODE_MSIX
;
2444 params
.drv_major
= QEDE_MAJOR_VERSION
;
2445 params
.drv_minor
= QEDE_MINOR_VERSION
;
2446 params
.drv_rev
= QEDE_REVISION_VERSION
;
2447 params
.drv_eng
= QEDE_ENGINEERING_VERSION
;
2448 strlcpy(params
.name
, "qede LAN", QED_DRV_VER_STR_SIZE
);
2449 rc
= qed_ops
->common
->slowpath_start(cdev
, ¶ms
);
2451 pr_notice("Cannot start slowpath\n");
2455 /* Learn information crucial for qede to progress */
2456 rc
= qed_ops
->fill_dev_info(cdev
, &dev_info
);
2460 edev
= qede_alloc_etherdev(cdev
, pdev
, &dev_info
, dp_module
,
2468 edev
->flags
|= QEDE_FLAG_IS_VF
;
2470 qede_init_ndev(edev
);
2472 rc
= register_netdev(edev
->ndev
);
2474 DP_NOTICE(edev
, "Cannot register net-device\n");
2478 edev
->ops
->common
->set_id(cdev
, edev
->ndev
->name
, DRV_MODULE_VERSION
);
2480 edev
->ops
->register_ops(cdev
, &qede_ll_ops
, edev
);
2482 INIT_DELAYED_WORK(&edev
->sp_task
, qede_sp_task
);
2483 mutex_init(&edev
->qede_lock
);
2485 DP_INFO(edev
, "Ending successfully qede probe\n");
2490 free_netdev(edev
->ndev
);
2492 qed_ops
->common
->slowpath_stop(cdev
);
2494 qed_ops
->common
->remove(cdev
);
2499 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2505 switch ((enum qede_pci_private
)id
->driver_data
) {
2506 case QEDE_PRIVATE_VF
:
2507 if (debug
& QED_LOG_VERBOSE_MASK
)
2508 dev_err(&pdev
->dev
, "Probing a VF\n");
2512 if (debug
& QED_LOG_VERBOSE_MASK
)
2513 dev_err(&pdev
->dev
, "Probing a PF\n");
2516 qede_config_debug(debug
, &dp_module
, &dp_level
);
2518 return __qede_probe(pdev
, dp_module
, dp_level
, is_vf
,
2522 enum qede_remove_mode
{
2526 static void __qede_remove(struct pci_dev
*pdev
, enum qede_remove_mode mode
)
2528 struct net_device
*ndev
= pci_get_drvdata(pdev
);
2529 struct qede_dev
*edev
= netdev_priv(ndev
);
2530 struct qed_dev
*cdev
= edev
->cdev
;
2532 DP_INFO(edev
, "Starting qede_remove\n");
2534 cancel_delayed_work_sync(&edev
->sp_task
);
2535 unregister_netdev(ndev
);
2537 edev
->ops
->common
->set_power_state(cdev
, PCI_D0
);
2539 pci_set_drvdata(pdev
, NULL
);
2543 /* Use global ops since we've freed edev */
2544 qed_ops
->common
->slowpath_stop(cdev
);
2545 qed_ops
->common
->remove(cdev
);
2547 pr_notice("Ending successfully qede_remove\n");
2550 static void qede_remove(struct pci_dev
*pdev
)
2552 __qede_remove(pdev
, QEDE_REMOVE_NORMAL
);
2555 /* -------------------------------------------------------------------------
2556 * START OF LOAD / UNLOAD
2557 * -------------------------------------------------------------------------
2560 static int qede_set_num_queues(struct qede_dev
*edev
)
2565 /* Setup queues according to possible resources*/
2567 rss_num
= edev
->req_rss
;
2569 rss_num
= netif_get_num_default_rss_queues() *
2570 edev
->dev_info
.common
.num_hwfns
;
2572 rss_num
= min_t(u16
, QEDE_MAX_RSS_CNT(edev
), rss_num
);
2574 rc
= edev
->ops
->common
->set_fp_int(edev
->cdev
, rss_num
);
2576 /* Managed to request interrupts for our queues */
2578 DP_INFO(edev
, "Managed %d [of %d] RSS queues\n",
2579 QEDE_RSS_CNT(edev
), rss_num
);
2585 static void qede_free_mem_sb(struct qede_dev
*edev
,
2586 struct qed_sb_info
*sb_info
)
2588 if (sb_info
->sb_virt
)
2589 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_info
->sb_virt
),
2590 (void *)sb_info
->sb_virt
, sb_info
->sb_phys
);
2593 /* This function allocates fast-path status block memory */
2594 static int qede_alloc_mem_sb(struct qede_dev
*edev
,
2595 struct qed_sb_info
*sb_info
,
2598 struct status_block
*sb_virt
;
2602 sb_virt
= dma_alloc_coherent(&edev
->pdev
->dev
,
2604 &sb_phys
, GFP_KERNEL
);
2606 DP_ERR(edev
, "Status block allocation failed\n");
2610 rc
= edev
->ops
->common
->sb_init(edev
->cdev
, sb_info
,
2611 sb_virt
, sb_phys
, sb_id
,
2612 QED_SB_TYPE_L2_QUEUE
);
2614 DP_ERR(edev
, "Status block initialization failed\n");
2615 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_virt
),
2623 static void qede_free_rx_buffers(struct qede_dev
*edev
,
2624 struct qede_rx_queue
*rxq
)
2628 for (i
= rxq
->sw_rx_cons
; i
!= rxq
->sw_rx_prod
; i
++) {
2629 struct sw_rx_data
*rx_buf
;
2632 rx_buf
= &rxq
->sw_rx_ring
[i
& NUM_RX_BDS_MAX
];
2633 data
= rx_buf
->data
;
2635 dma_unmap_page(&edev
->pdev
->dev
,
2637 PAGE_SIZE
, DMA_FROM_DEVICE
);
2639 rx_buf
->data
= NULL
;
2644 static void qede_free_sge_mem(struct qede_dev
*edev
,
2645 struct qede_rx_queue
*rxq
) {
2648 if (edev
->gro_disable
)
2651 for (i
= 0; i
< ETH_TPA_MAX_AGGS_NUM
; i
++) {
2652 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[i
];
2653 struct sw_rx_data
*replace_buf
= &tpa_info
->replace_buf
;
2655 if (replace_buf
->data
) {
2656 dma_unmap_page(&edev
->pdev
->dev
,
2657 replace_buf
->mapping
,
2658 PAGE_SIZE
, DMA_FROM_DEVICE
);
2659 __free_page(replace_buf
->data
);
2664 static void qede_free_mem_rxq(struct qede_dev
*edev
,
2665 struct qede_rx_queue
*rxq
)
2667 qede_free_sge_mem(edev
, rxq
);
2669 /* Free rx buffers */
2670 qede_free_rx_buffers(edev
, rxq
);
2672 /* Free the parallel SW ring */
2673 kfree(rxq
->sw_rx_ring
);
2675 /* Free the real RQ ring used by FW */
2676 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_bd_ring
);
2677 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_comp_ring
);
2680 static int qede_alloc_rx_buffer(struct qede_dev
*edev
,
2681 struct qede_rx_queue
*rxq
)
2683 struct sw_rx_data
*sw_rx_data
;
2684 struct eth_rx_bd
*rx_bd
;
2689 rx_buf_size
= rxq
->rx_buf_size
;
2691 data
= alloc_pages(GFP_ATOMIC
, 0);
2692 if (unlikely(!data
)) {
2693 DP_NOTICE(edev
, "Failed to allocate Rx data [page]\n");
2697 /* Map the entire page as it would be used
2698 * for multiple RX buffer segment size mapping.
2700 mapping
= dma_map_page(&edev
->pdev
->dev
, data
, 0,
2701 PAGE_SIZE
, DMA_FROM_DEVICE
);
2702 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
2704 DP_NOTICE(edev
, "Failed to map Rx buffer\n");
2708 sw_rx_data
= &rxq
->sw_rx_ring
[rxq
->sw_rx_prod
& NUM_RX_BDS_MAX
];
2709 sw_rx_data
->page_offset
= 0;
2710 sw_rx_data
->data
= data
;
2711 sw_rx_data
->mapping
= mapping
;
2713 /* Advance PROD and get BD pointer */
2714 rx_bd
= (struct eth_rx_bd
*)qed_chain_produce(&rxq
->rx_bd_ring
);
2716 rx_bd
->addr
.hi
= cpu_to_le32(upper_32_bits(mapping
));
2717 rx_bd
->addr
.lo
= cpu_to_le32(lower_32_bits(mapping
));
2724 static int qede_alloc_sge_mem(struct qede_dev
*edev
,
2725 struct qede_rx_queue
*rxq
)
2730 if (edev
->gro_disable
)
2733 if (edev
->ndev
->mtu
> PAGE_SIZE
) {
2734 edev
->gro_disable
= 1;
2738 for (i
= 0; i
< ETH_TPA_MAX_AGGS_NUM
; i
++) {
2739 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[i
];
2740 struct sw_rx_data
*replace_buf
= &tpa_info
->replace_buf
;
2742 replace_buf
->data
= alloc_pages(GFP_ATOMIC
, 0);
2743 if (unlikely(!replace_buf
->data
)) {
2745 "Failed to allocate TPA skb pool [replacement buffer]\n");
2749 mapping
= dma_map_page(&edev
->pdev
->dev
, replace_buf
->data
, 0,
2750 rxq
->rx_buf_size
, DMA_FROM_DEVICE
);
2751 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
2753 "Failed to map TPA replacement buffer\n");
2757 replace_buf
->mapping
= mapping
;
2758 tpa_info
->replace_buf
.page_offset
= 0;
2760 tpa_info
->replace_buf_mapping
= mapping
;
2761 tpa_info
->agg_state
= QEDE_AGG_STATE_NONE
;
2766 qede_free_sge_mem(edev
, rxq
);
2767 edev
->gro_disable
= 1;
2771 /* This function allocates all memory needed per Rx queue */
2772 static int qede_alloc_mem_rxq(struct qede_dev
*edev
,
2773 struct qede_rx_queue
*rxq
)
2777 rxq
->num_rx_buffers
= edev
->q_num_rx_buffers
;
2779 rxq
->rx_buf_size
= NET_IP_ALIGN
+ ETH_OVERHEAD
+
2781 if (rxq
->rx_buf_size
> PAGE_SIZE
)
2782 rxq
->rx_buf_size
= PAGE_SIZE
;
2784 /* Segment size to spilt a page in multiple equal parts */
2785 rxq
->rx_buf_seg_size
= roundup_pow_of_two(rxq
->rx_buf_size
);
2787 /* Allocate the parallel driver ring for Rx buffers */
2788 size
= sizeof(*rxq
->sw_rx_ring
) * RX_RING_SIZE
;
2789 rxq
->sw_rx_ring
= kzalloc(size
, GFP_KERNEL
);
2790 if (!rxq
->sw_rx_ring
) {
2791 DP_ERR(edev
, "Rx buffers ring allocation failed\n");
2796 /* Allocate FW Rx ring */
2797 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
2798 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
2799 QED_CHAIN_MODE_NEXT_PTR
,
2801 sizeof(struct eth_rx_bd
),
2807 /* Allocate FW completion ring */
2808 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
2809 QED_CHAIN_USE_TO_CONSUME
,
2812 sizeof(union eth_rx_cqe
),
2813 &rxq
->rx_comp_ring
);
2817 /* Allocate buffers for the Rx ring */
2818 for (i
= 0; i
< rxq
->num_rx_buffers
; i
++) {
2819 rc
= qede_alloc_rx_buffer(edev
, rxq
);
2822 "Rx buffers allocation failed at index %d\n", i
);
2827 rc
= qede_alloc_sge_mem(edev
, rxq
);
2832 static void qede_free_mem_txq(struct qede_dev
*edev
,
2833 struct qede_tx_queue
*txq
)
2835 /* Free the parallel SW ring */
2836 kfree(txq
->sw_tx_ring
);
2838 /* Free the real RQ ring used by FW */
2839 edev
->ops
->common
->chain_free(edev
->cdev
, &txq
->tx_pbl
);
2842 /* This function allocates all memory needed per Tx queue */
2843 static int qede_alloc_mem_txq(struct qede_dev
*edev
,
2844 struct qede_tx_queue
*txq
)
2847 union eth_tx_bd_types
*p_virt
;
2849 txq
->num_tx_buffers
= edev
->q_num_tx_buffers
;
2851 /* Allocate the parallel driver ring for Tx buffers */
2852 size
= sizeof(*txq
->sw_tx_ring
) * NUM_TX_BDS_MAX
;
2853 txq
->sw_tx_ring
= kzalloc(size
, GFP_KERNEL
);
2854 if (!txq
->sw_tx_ring
) {
2855 DP_NOTICE(edev
, "Tx buffers ring allocation failed\n");
2859 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
2860 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
2871 qede_free_mem_txq(edev
, txq
);
2875 /* This function frees all memory of a single fp */
2876 static void qede_free_mem_fp(struct qede_dev
*edev
,
2877 struct qede_fastpath
*fp
)
2881 qede_free_mem_sb(edev
, fp
->sb_info
);
2883 qede_free_mem_rxq(edev
, fp
->rxq
);
2885 for (tc
= 0; tc
< edev
->num_tc
; tc
++)
2886 qede_free_mem_txq(edev
, &fp
->txqs
[tc
]);
2889 /* This function allocates all memory needed for a single fp (i.e. an entity
2890 * which contains status block, one rx queue and multiple per-TC tx queues.
2892 static int qede_alloc_mem_fp(struct qede_dev
*edev
,
2893 struct qede_fastpath
*fp
)
2897 rc
= qede_alloc_mem_sb(edev
, fp
->sb_info
, fp
->rss_id
);
2901 rc
= qede_alloc_mem_rxq(edev
, fp
->rxq
);
2905 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
2906 rc
= qede_alloc_mem_txq(edev
, &fp
->txqs
[tc
]);
2916 static void qede_free_mem_load(struct qede_dev
*edev
)
2921 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
2923 qede_free_mem_fp(edev
, fp
);
2927 /* This function allocates all qede memory at NIC load. */
2928 static int qede_alloc_mem_load(struct qede_dev
*edev
)
2932 for (rss_id
= 0; rss_id
< QEDE_RSS_CNT(edev
); rss_id
++) {
2933 struct qede_fastpath
*fp
= &edev
->fp_array
[rss_id
];
2935 rc
= qede_alloc_mem_fp(edev
, fp
);
2938 "Failed to allocate memory for fastpath - rss id = %d\n",
2940 qede_free_mem_load(edev
);
2948 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
2949 static void qede_init_fp(struct qede_dev
*edev
)
2951 int rss_id
, txq_index
, tc
;
2952 struct qede_fastpath
*fp
;
2954 for_each_rss(rss_id
) {
2955 fp
= &edev
->fp_array
[rss_id
];
2958 fp
->rss_id
= rss_id
;
2960 memset((void *)&fp
->napi
, 0, sizeof(fp
->napi
));
2962 memset((void *)fp
->sb_info
, 0, sizeof(*fp
->sb_info
));
2964 memset((void *)fp
->rxq
, 0, sizeof(*fp
->rxq
));
2965 fp
->rxq
->rxq_id
= rss_id
;
2967 memset((void *)fp
->txqs
, 0, (edev
->num_tc
* sizeof(*fp
->txqs
)));
2968 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
2969 txq_index
= tc
* QEDE_RSS_CNT(edev
) + rss_id
;
2970 fp
->txqs
[tc
].index
= txq_index
;
2973 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
2974 edev
->ndev
->name
, rss_id
);
2977 edev
->gro_disable
= !(edev
->ndev
->features
& NETIF_F_GRO
);
2980 static int qede_set_real_num_queues(struct qede_dev
*edev
)
2984 rc
= netif_set_real_num_tx_queues(edev
->ndev
, QEDE_TSS_CNT(edev
));
2986 DP_NOTICE(edev
, "Failed to set real number of Tx queues\n");
2989 rc
= netif_set_real_num_rx_queues(edev
->ndev
, QEDE_RSS_CNT(edev
));
2991 DP_NOTICE(edev
, "Failed to set real number of Rx queues\n");
2998 static void qede_napi_disable_remove(struct qede_dev
*edev
)
3003 napi_disable(&edev
->fp_array
[i
].napi
);
3005 netif_napi_del(&edev
->fp_array
[i
].napi
);
3009 static void qede_napi_add_enable(struct qede_dev
*edev
)
3013 /* Add NAPI objects */
3015 netif_napi_add(edev
->ndev
, &edev
->fp_array
[i
].napi
,
3016 qede_poll
, NAPI_POLL_WEIGHT
);
3017 napi_enable(&edev
->fp_array
[i
].napi
);
3021 static void qede_sync_free_irqs(struct qede_dev
*edev
)
3025 for (i
= 0; i
< edev
->int_info
.used_cnt
; i
++) {
3026 if (edev
->int_info
.msix_cnt
) {
3027 synchronize_irq(edev
->int_info
.msix
[i
].vector
);
3028 free_irq(edev
->int_info
.msix
[i
].vector
,
3029 &edev
->fp_array
[i
]);
3031 edev
->ops
->common
->simd_handler_clean(edev
->cdev
, i
);
3035 edev
->int_info
.used_cnt
= 0;
3038 static int qede_req_msix_irqs(struct qede_dev
*edev
)
3042 /* Sanitize number of interrupts == number of prepared RSS queues */
3043 if (QEDE_RSS_CNT(edev
) > edev
->int_info
.msix_cnt
) {
3045 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
3046 QEDE_RSS_CNT(edev
), edev
->int_info
.msix_cnt
);
3050 for (i
= 0; i
< QEDE_RSS_CNT(edev
); i
++) {
3051 rc
= request_irq(edev
->int_info
.msix
[i
].vector
,
3052 qede_msix_fp_int
, 0, edev
->fp_array
[i
].name
,
3053 &edev
->fp_array
[i
]);
3055 DP_ERR(edev
, "Request fp %d irq failed\n", i
);
3056 qede_sync_free_irqs(edev
);
3059 DP_VERBOSE(edev
, NETIF_MSG_INTR
,
3060 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
3061 edev
->fp_array
[i
].name
, i
,
3062 &edev
->fp_array
[i
]);
3063 edev
->int_info
.used_cnt
++;
3069 static void qede_simd_fp_handler(void *cookie
)
3071 struct qede_fastpath
*fp
= (struct qede_fastpath
*)cookie
;
3073 napi_schedule_irqoff(&fp
->napi
);
3076 static int qede_setup_irqs(struct qede_dev
*edev
)
3080 /* Learn Interrupt configuration */
3081 rc
= edev
->ops
->common
->get_fp_int(edev
->cdev
, &edev
->int_info
);
3085 if (edev
->int_info
.msix_cnt
) {
3086 rc
= qede_req_msix_irqs(edev
);
3089 edev
->ndev
->irq
= edev
->int_info
.msix
[0].vector
;
3091 const struct qed_common_ops
*ops
;
3093 /* qed should learn receive the RSS ids and callbacks */
3094 ops
= edev
->ops
->common
;
3095 for (i
= 0; i
< QEDE_RSS_CNT(edev
); i
++)
3096 ops
->simd_handler_config(edev
->cdev
,
3097 &edev
->fp_array
[i
], i
,
3098 qede_simd_fp_handler
);
3099 edev
->int_info
.used_cnt
= QEDE_RSS_CNT(edev
);
3104 static int qede_drain_txq(struct qede_dev
*edev
,
3105 struct qede_tx_queue
*txq
,
3110 while (txq
->sw_tx_cons
!= txq
->sw_tx_prod
) {
3114 "Tx queue[%d] is stuck, requesting MCP to drain\n",
3116 rc
= edev
->ops
->common
->drain(edev
->cdev
);
3119 return qede_drain_txq(edev
, txq
, false);
3122 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
3123 txq
->index
, txq
->sw_tx_prod
,
3128 usleep_range(1000, 2000);
3132 /* FW finished processing, wait for HW to transmit all tx packets */
3133 usleep_range(1000, 2000);
3138 static int qede_stop_queues(struct qede_dev
*edev
)
3140 struct qed_update_vport_params vport_update_params
;
3141 struct qed_dev
*cdev
= edev
->cdev
;
3144 /* Disable the vport */
3145 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
3146 vport_update_params
.vport_id
= 0;
3147 vport_update_params
.update_vport_active_flg
= 1;
3148 vport_update_params
.vport_active_flg
= 0;
3149 vport_update_params
.update_rss_flg
= 0;
3151 rc
= edev
->ops
->vport_update(cdev
, &vport_update_params
);
3153 DP_ERR(edev
, "Failed to update vport\n");
3157 /* Flush Tx queues. If needed, request drain from MCP */
3159 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
3161 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
3162 struct qede_tx_queue
*txq
= &fp
->txqs
[tc
];
3164 rc
= qede_drain_txq(edev
, txq
, true);
3170 /* Stop all Queues in reverse order*/
3171 for (i
= QEDE_RSS_CNT(edev
) - 1; i
>= 0; i
--) {
3172 struct qed_stop_rxq_params rx_params
;
3174 /* Stop the Tx Queue(s)*/
3175 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
3176 struct qed_stop_txq_params tx_params
;
3178 tx_params
.rss_id
= i
;
3179 tx_params
.tx_queue_id
= tc
* QEDE_RSS_CNT(edev
) + i
;
3180 rc
= edev
->ops
->q_tx_stop(cdev
, &tx_params
);
3182 DP_ERR(edev
, "Failed to stop TXQ #%d\n",
3183 tx_params
.tx_queue_id
);
3188 /* Stop the Rx Queue*/
3189 memset(&rx_params
, 0, sizeof(rx_params
));
3190 rx_params
.rss_id
= i
;
3191 rx_params
.rx_queue_id
= i
;
3193 rc
= edev
->ops
->q_rx_stop(cdev
, &rx_params
);
3195 DP_ERR(edev
, "Failed to stop RXQ #%d\n", i
);
3200 /* Stop the vport */
3201 rc
= edev
->ops
->vport_stop(cdev
, 0);
3203 DP_ERR(edev
, "Failed to stop VPORT\n");
3208 static int qede_start_queues(struct qede_dev
*edev
)
3211 int vlan_removal_en
= 1;
3212 struct qed_dev
*cdev
= edev
->cdev
;
3213 struct qed_update_vport_params vport_update_params
;
3214 struct qed_queue_start_common_params q_params
;
3215 struct qed_dev_info
*qed_info
= &edev
->dev_info
.common
;
3216 struct qed_start_vport_params start
= {0};
3217 bool reset_rss_indir
= false;
3219 if (!edev
->num_rss
) {
3221 "Cannot update V-VPORT as active as there are no Rx queues\n");
3225 start
.gro_enable
= !edev
->gro_disable
;
3226 start
.mtu
= edev
->ndev
->mtu
;
3228 start
.drop_ttl0
= true;
3229 start
.remove_inner_vlan
= vlan_removal_en
;
3231 rc
= edev
->ops
->vport_start(cdev
, &start
);
3234 DP_ERR(edev
, "Start V-PORT failed %d\n", rc
);
3238 DP_VERBOSE(edev
, NETIF_MSG_IFUP
,
3239 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
3240 start
.vport_id
, edev
->ndev
->mtu
+ 0xe, vlan_removal_en
);
3243 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
3244 dma_addr_t phys_table
= fp
->rxq
->rx_comp_ring
.pbl
.p_phys_table
;
3246 memset(&q_params
, 0, sizeof(q_params
));
3247 q_params
.rss_id
= i
;
3248 q_params
.queue_id
= i
;
3249 q_params
.vport_id
= 0;
3250 q_params
.sb
= fp
->sb_info
->igu_sb_id
;
3251 q_params
.sb_idx
= RX_PI
;
3253 rc
= edev
->ops
->q_rx_start(cdev
, &q_params
,
3254 fp
->rxq
->rx_buf_size
,
3255 fp
->rxq
->rx_bd_ring
.p_phys_addr
,
3257 fp
->rxq
->rx_comp_ring
.page_cnt
,
3258 &fp
->rxq
->hw_rxq_prod_addr
);
3260 DP_ERR(edev
, "Start RXQ #%d failed %d\n", i
, rc
);
3264 fp
->rxq
->hw_cons_ptr
= &fp
->sb_info
->sb_virt
->pi_array
[RX_PI
];
3266 qede_update_rx_prod(edev
, fp
->rxq
);
3268 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
3269 struct qede_tx_queue
*txq
= &fp
->txqs
[tc
];
3270 int txq_index
= tc
* QEDE_RSS_CNT(edev
) + i
;
3272 memset(&q_params
, 0, sizeof(q_params
));
3273 q_params
.rss_id
= i
;
3274 q_params
.queue_id
= txq_index
;
3275 q_params
.vport_id
= 0;
3276 q_params
.sb
= fp
->sb_info
->igu_sb_id
;
3277 q_params
.sb_idx
= TX_PI(tc
);
3279 rc
= edev
->ops
->q_tx_start(cdev
, &q_params
,
3280 txq
->tx_pbl
.pbl
.p_phys_table
,
3281 txq
->tx_pbl
.page_cnt
,
3282 &txq
->doorbell_addr
);
3284 DP_ERR(edev
, "Start TXQ #%d failed %d\n",
3290 &fp
->sb_info
->sb_virt
->pi_array
[TX_PI(tc
)];
3291 SET_FIELD(txq
->tx_db
.data
.params
,
3292 ETH_DB_DATA_DEST
, DB_DEST_XCM
);
3293 SET_FIELD(txq
->tx_db
.data
.params
, ETH_DB_DATA_AGG_CMD
,
3295 SET_FIELD(txq
->tx_db
.data
.params
,
3296 ETH_DB_DATA_AGG_VAL_SEL
,
3297 DQ_XCM_ETH_TX_BD_PROD_CMD
);
3299 txq
->tx_db
.data
.agg_flags
= DQ_XCM_ETH_DQ_CF_CMD
;
3303 /* Prepare and send the vport enable */
3304 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
3305 vport_update_params
.vport_id
= start
.vport_id
;
3306 vport_update_params
.update_vport_active_flg
= 1;
3307 vport_update_params
.vport_active_flg
= 1;
3309 if ((qed_info
->mf_mode
== QED_MF_NPAR
|| pci_num_vf(edev
->pdev
)) &&
3310 qed_info
->tx_switching
) {
3311 vport_update_params
.update_tx_switching_flg
= 1;
3312 vport_update_params
.tx_switching_flg
= 1;
3315 /* Fill struct with RSS params */
3316 if (QEDE_RSS_CNT(edev
) > 1) {
3317 vport_update_params
.update_rss_flg
= 1;
3319 /* Need to validate current RSS config uses valid entries */
3320 for (i
= 0; i
< QED_RSS_IND_TABLE_SIZE
; i
++) {
3321 if (edev
->rss_params
.rss_ind_table
[i
] >=
3323 reset_rss_indir
= true;
3328 if (!(edev
->rss_params_inited
& QEDE_RSS_INDIR_INITED
) ||
3332 for (i
= 0; i
< QED_RSS_IND_TABLE_SIZE
; i
++) {
3335 val
= QEDE_RSS_CNT(edev
);
3336 indir_val
= ethtool_rxfh_indir_default(i
, val
);
3337 edev
->rss_params
.rss_ind_table
[i
] = indir_val
;
3339 edev
->rss_params_inited
|= QEDE_RSS_INDIR_INITED
;
3342 if (!(edev
->rss_params_inited
& QEDE_RSS_KEY_INITED
)) {
3343 netdev_rss_key_fill(edev
->rss_params
.rss_key
,
3344 sizeof(edev
->rss_params
.rss_key
));
3345 edev
->rss_params_inited
|= QEDE_RSS_KEY_INITED
;
3348 if (!(edev
->rss_params_inited
& QEDE_RSS_CAPS_INITED
)) {
3349 edev
->rss_params
.rss_caps
= QED_RSS_IPV4
|
3353 edev
->rss_params_inited
|= QEDE_RSS_CAPS_INITED
;
3356 memcpy(&vport_update_params
.rss_params
, &edev
->rss_params
,
3357 sizeof(vport_update_params
.rss_params
));
3359 memset(&vport_update_params
.rss_params
, 0,
3360 sizeof(vport_update_params
.rss_params
));
3363 rc
= edev
->ops
->vport_update(cdev
, &vport_update_params
);
3365 DP_ERR(edev
, "Update V-PORT failed %d\n", rc
);
3372 static int qede_set_mcast_rx_mac(struct qede_dev
*edev
,
3373 enum qed_filter_xcast_params_type opcode
,
3374 unsigned char *mac
, int num_macs
)
3376 struct qed_filter_params filter_cmd
;
3379 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
3380 filter_cmd
.type
= QED_FILTER_TYPE_MCAST
;
3381 filter_cmd
.filter
.mcast
.type
= opcode
;
3382 filter_cmd
.filter
.mcast
.num
= num_macs
;
3384 for (i
= 0; i
< num_macs
; i
++, mac
+= ETH_ALEN
)
3385 ether_addr_copy(filter_cmd
.filter
.mcast
.mac
[i
], mac
);
3387 return edev
->ops
->filter_config(edev
->cdev
, &filter_cmd
);
3390 enum qede_unload_mode
{
3394 static void qede_unload(struct qede_dev
*edev
, enum qede_unload_mode mode
)
3396 struct qed_link_params link_params
;
3399 DP_INFO(edev
, "Starting qede unload\n");
3401 mutex_lock(&edev
->qede_lock
);
3402 edev
->state
= QEDE_STATE_CLOSED
;
3405 netif_tx_disable(edev
->ndev
);
3406 netif_carrier_off(edev
->ndev
);
3408 /* Reset the link */
3409 memset(&link_params
, 0, sizeof(link_params
));
3410 link_params
.link_up
= false;
3411 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
3412 rc
= qede_stop_queues(edev
);
3414 qede_sync_free_irqs(edev
);
3418 DP_INFO(edev
, "Stopped Queues\n");
3420 qede_vlan_mark_nonconfigured(edev
);
3421 edev
->ops
->fastpath_stop(edev
->cdev
);
3423 /* Release the interrupts */
3424 qede_sync_free_irqs(edev
);
3425 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
3427 qede_napi_disable_remove(edev
);
3429 qede_free_mem_load(edev
);
3430 qede_free_fp_array(edev
);
3433 mutex_unlock(&edev
->qede_lock
);
3434 DP_INFO(edev
, "Ending qede unload\n");
3437 enum qede_load_mode
{
3441 static int qede_load(struct qede_dev
*edev
, enum qede_load_mode mode
)
3443 struct qed_link_params link_params
;
3444 struct qed_link_output link_output
;
3447 DP_INFO(edev
, "Starting qede load\n");
3449 rc
= qede_set_num_queues(edev
);
3453 rc
= qede_alloc_fp_array(edev
);
3459 rc
= qede_alloc_mem_load(edev
);
3462 DP_INFO(edev
, "Allocated %d RSS queues on %d TC/s\n",
3463 QEDE_RSS_CNT(edev
), edev
->num_tc
);
3465 rc
= qede_set_real_num_queues(edev
);
3469 qede_napi_add_enable(edev
);
3470 DP_INFO(edev
, "Napi added and enabled\n");
3472 rc
= qede_setup_irqs(edev
);
3475 DP_INFO(edev
, "Setup IRQs succeeded\n");
3477 rc
= qede_start_queues(edev
);
3480 DP_INFO(edev
, "Start VPORT, RXQ and TXQ succeeded\n");
3482 /* Add primary mac and set Rx filters */
3483 ether_addr_copy(edev
->primary_mac
, edev
->ndev
->dev_addr
);
3485 mutex_lock(&edev
->qede_lock
);
3486 edev
->state
= QEDE_STATE_OPEN
;
3487 mutex_unlock(&edev
->qede_lock
);
3489 /* Program un-configured VLANs */
3490 qede_configure_vlan_filters(edev
);
3492 /* Ask for link-up using current configuration */
3493 memset(&link_params
, 0, sizeof(link_params
));
3494 link_params
.link_up
= true;
3495 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
3497 /* Query whether link is already-up */
3498 memset(&link_output
, 0, sizeof(link_output
));
3499 edev
->ops
->common
->get_link(edev
->cdev
, &link_output
);
3500 qede_link_update(edev
, &link_output
);
3502 DP_INFO(edev
, "Ending successfully qede load\n");
3507 qede_sync_free_irqs(edev
);
3508 memset(&edev
->int_info
.msix_cnt
, 0, sizeof(struct qed_int_info
));
3510 qede_napi_disable_remove(edev
);
3512 qede_free_mem_load(edev
);
3514 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
3515 qede_free_fp_array(edev
);
3521 void qede_reload(struct qede_dev
*edev
,
3522 void (*func
)(struct qede_dev
*, union qede_reload_args
*),
3523 union qede_reload_args
*args
)
3525 qede_unload(edev
, QEDE_UNLOAD_NORMAL
);
3526 /* Call function handler to update parameters
3527 * needed for function load.
3532 qede_load(edev
, QEDE_LOAD_NORMAL
);
3534 mutex_lock(&edev
->qede_lock
);
3535 qede_config_rx_mode(edev
->ndev
);
3536 mutex_unlock(&edev
->qede_lock
);
3539 /* called with rtnl_lock */
3540 static int qede_open(struct net_device
*ndev
)
3542 struct qede_dev
*edev
= netdev_priv(ndev
);
3545 netif_carrier_off(ndev
);
3547 edev
->ops
->common
->set_power_state(edev
->cdev
, PCI_D0
);
3549 rc
= qede_load(edev
, QEDE_LOAD_NORMAL
);
3554 #ifdef CONFIG_QEDE_VXLAN
3555 vxlan_get_rx_port(ndev
);
3557 #ifdef CONFIG_QEDE_GENEVE
3558 geneve_get_rx_port(ndev
);
3563 static int qede_close(struct net_device
*ndev
)
3565 struct qede_dev
*edev
= netdev_priv(ndev
);
3567 qede_unload(edev
, QEDE_UNLOAD_NORMAL
);
3572 static void qede_link_update(void *dev
, struct qed_link_output
*link
)
3574 struct qede_dev
*edev
= dev
;
3576 if (!netif_running(edev
->ndev
)) {
3577 DP_VERBOSE(edev
, NETIF_MSG_LINK
, "Interface is not running\n");
3581 if (link
->link_up
) {
3582 if (!netif_carrier_ok(edev
->ndev
)) {
3583 DP_NOTICE(edev
, "Link is up\n");
3584 netif_tx_start_all_queues(edev
->ndev
);
3585 netif_carrier_on(edev
->ndev
);
3588 if (netif_carrier_ok(edev
->ndev
)) {
3589 DP_NOTICE(edev
, "Link is down\n");
3590 netif_tx_disable(edev
->ndev
);
3591 netif_carrier_off(edev
->ndev
);
3596 static int qede_set_mac_addr(struct net_device
*ndev
, void *p
)
3598 struct qede_dev
*edev
= netdev_priv(ndev
);
3599 struct sockaddr
*addr
= p
;
3602 ASSERT_RTNL(); /* @@@TBD To be removed */
3604 DP_INFO(edev
, "Set_mac_addr called\n");
3606 if (!is_valid_ether_addr(addr
->sa_data
)) {
3607 DP_NOTICE(edev
, "The MAC address is not valid\n");
3611 if (!edev
->ops
->check_mac(edev
->cdev
, addr
->sa_data
)) {
3612 DP_NOTICE(edev
, "qed prevents setting MAC\n");
3616 ether_addr_copy(ndev
->dev_addr
, addr
->sa_data
);
3618 if (!netif_running(ndev
)) {
3619 DP_NOTICE(edev
, "The device is currently down\n");
3623 /* Remove the previous primary mac */
3624 rc
= qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_DEL
,
3629 /* Add MAC filter according to the new unicast HW MAC address */
3630 ether_addr_copy(edev
->primary_mac
, ndev
->dev_addr
);
3631 return qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_ADD
,
3636 qede_configure_mcast_filtering(struct net_device
*ndev
,
3637 enum qed_filter_rx_mode_type
*accept_flags
)
3639 struct qede_dev
*edev
= netdev_priv(ndev
);
3640 unsigned char *mc_macs
, *temp
;
3641 struct netdev_hw_addr
*ha
;
3642 int rc
= 0, mc_count
;
3645 size
= 64 * ETH_ALEN
;
3647 mc_macs
= kzalloc(size
, GFP_KERNEL
);
3650 "Failed to allocate memory for multicast MACs\n");
3657 /* Remove all previously configured MAC filters */
3658 rc
= qede_set_mcast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_DEL
,
3663 netif_addr_lock_bh(ndev
);
3665 mc_count
= netdev_mc_count(ndev
);
3666 if (mc_count
< 64) {
3667 netdev_for_each_mc_addr(ha
, ndev
) {
3668 ether_addr_copy(temp
, ha
->addr
);
3673 netif_addr_unlock_bh(ndev
);
3675 /* Check for all multicast @@@TBD resource allocation */
3676 if ((ndev
->flags
& IFF_ALLMULTI
) ||
3678 if (*accept_flags
== QED_FILTER_RX_MODE_TYPE_REGULAR
)
3679 *accept_flags
= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
;
3681 /* Add all multicast MAC filters */
3682 rc
= qede_set_mcast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_ADD
,
3691 static void qede_set_rx_mode(struct net_device
*ndev
)
3693 struct qede_dev
*edev
= netdev_priv(ndev
);
3695 DP_INFO(edev
, "qede_set_rx_mode called\n");
3697 if (edev
->state
!= QEDE_STATE_OPEN
) {
3699 "qede_set_rx_mode called while interface is down\n");
3701 set_bit(QEDE_SP_RX_MODE
, &edev
->sp_flags
);
3702 schedule_delayed_work(&edev
->sp_task
, 0);
3706 /* Must be called with qede_lock held */
3707 static void qede_config_rx_mode(struct net_device
*ndev
)
3709 enum qed_filter_rx_mode_type accept_flags
= QED_FILTER_TYPE_UCAST
;
3710 struct qede_dev
*edev
= netdev_priv(ndev
);
3711 struct qed_filter_params rx_mode
;
3712 unsigned char *uc_macs
, *temp
;
3713 struct netdev_hw_addr
*ha
;
3717 netif_addr_lock_bh(ndev
);
3719 uc_count
= netdev_uc_count(ndev
);
3720 size
= uc_count
* ETH_ALEN
;
3722 uc_macs
= kzalloc(size
, GFP_ATOMIC
);
3724 DP_NOTICE(edev
, "Failed to allocate memory for unicast MACs\n");
3725 netif_addr_unlock_bh(ndev
);
3730 netdev_for_each_uc_addr(ha
, ndev
) {
3731 ether_addr_copy(temp
, ha
->addr
);
3735 netif_addr_unlock_bh(ndev
);
3737 /* Configure the struct for the Rx mode */
3738 memset(&rx_mode
, 0, sizeof(struct qed_filter_params
));
3739 rx_mode
.type
= QED_FILTER_TYPE_RX_MODE
;
3741 /* Remove all previous unicast secondary macs and multicast macs
3742 * (configrue / leave the primary mac)
3744 rc
= qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_REPLACE
,
3749 /* Check for promiscuous */
3750 if ((ndev
->flags
& IFF_PROMISC
) ||
3751 (uc_count
> 15)) { /* @@@TBD resource allocation - 1 */
3752 accept_flags
= QED_FILTER_RX_MODE_TYPE_PROMISC
;
3754 /* Add MAC filters according to the unicast secondary macs */
3758 for (i
= 0; i
< uc_count
; i
++) {
3759 rc
= qede_set_ucast_rx_mac(edev
,
3760 QED_FILTER_XCAST_TYPE_ADD
,
3768 rc
= qede_configure_mcast_filtering(ndev
, &accept_flags
);
3773 /* take care of VLAN mode */
3774 if (ndev
->flags
& IFF_PROMISC
) {
3775 qede_config_accept_any_vlan(edev
, true);
3776 } else if (!edev
->non_configured_vlans
) {
3777 /* It's possible that accept_any_vlan mode is set due to a
3778 * previous setting of IFF_PROMISC. If vlan credits are
3779 * sufficient, disable accept_any_vlan.
3781 qede_config_accept_any_vlan(edev
, false);
3784 rx_mode
.filter
.accept_flags
= accept_flags
;
3785 edev
->ops
->filter_config(edev
->cdev
, &rx_mode
);