1 /* QLogic qede NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/version.h>
12 #include <linux/device.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/skbuff.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/interrupt.h>
21 #include <asm/byteorder.h>
22 #include <asm/param.h>
24 #include <linux/netdev_features.h>
25 #include <linux/udp.h>
26 #include <linux/tcp.h>
27 #include <net/vxlan.h>
31 #include <linux/if_ether.h>
32 #include <linux/if_vlan.h>
33 #include <linux/pkt_sched.h>
34 #include <linux/ethtool.h>
36 #include <linux/random.h>
37 #include <net/ip6_checksum.h>
38 #include <linux/bitops.h>
42 static const char version
[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede "
43 DRV_MODULE_VERSION
"\n";
45 MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(DRV_MODULE_VERSION
);
50 module_param(debug
, uint
, 0);
51 MODULE_PARM_DESC(debug
, " Default debug msglevel");
53 static const struct qed_eth_ops
*qed_ops
;
55 #define CHIP_NUM_57980S_40 0x1634
56 #define CHIP_NUM_57980S_10 0x1635
57 #define CHIP_NUM_57980S_MF 0x1636
58 #define CHIP_NUM_57980S_100 0x1644
59 #define CHIP_NUM_57980S_50 0x1654
60 #define CHIP_NUM_57980S_25 0x1656
62 #ifndef PCI_DEVICE_ID_NX2_57980E
63 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
64 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
65 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
66 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
67 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
68 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
71 static const struct pci_device_id qede_pci_tbl
[] = {
72 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_40
), 0 },
73 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_10
), 0 },
74 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_MF
), 0 },
75 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_100
), 0 },
76 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_50
), 0 },
77 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_25
), 0 },
81 MODULE_DEVICE_TABLE(pci
, qede_pci_tbl
);
83 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
);
85 #define TX_TIMEOUT (5 * HZ)
87 static void qede_remove(struct pci_dev
*pdev
);
88 static int qede_alloc_rx_buffer(struct qede_dev
*edev
,
89 struct qede_rx_queue
*rxq
);
90 static void qede_link_update(void *dev
, struct qed_link_output
*link
);
92 static struct pci_driver qede_pci_driver
= {
94 .id_table
= qede_pci_tbl
,
96 .remove
= qede_remove
,
99 static struct qed_eth_cb_ops qede_ll_ops
= {
101 .link_update
= qede_link_update
,
105 static int qede_netdev_event(struct notifier_block
*this, unsigned long event
,
108 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
109 struct ethtool_drvinfo drvinfo
;
110 struct qede_dev
*edev
;
112 /* Currently only support name change */
113 if (event
!= NETDEV_CHANGENAME
)
116 /* Check whether this is a qede device */
117 if (!ndev
|| !ndev
->ethtool_ops
|| !ndev
->ethtool_ops
->get_drvinfo
)
120 memset(&drvinfo
, 0, sizeof(drvinfo
));
121 ndev
->ethtool_ops
->get_drvinfo(ndev
, &drvinfo
);
122 if (strcmp(drvinfo
.driver
, "qede"))
124 edev
= netdev_priv(ndev
);
126 /* Notify qed of the name change */
127 if (!edev
->ops
|| !edev
->ops
->common
)
129 edev
->ops
->common
->set_id(edev
->cdev
, edev
->ndev
->name
,
136 static struct notifier_block qede_netdev_notifier
= {
137 .notifier_call
= qede_netdev_event
,
141 int __init
qede_init(void)
146 pr_notice("qede_init: %s\n", version
);
148 qed_ver
= qed_get_protocol_version(QED_PROTOCOL_ETH
);
149 if (qed_ver
!= QEDE_ETH_INTERFACE_VERSION
) {
150 pr_notice("Version mismatch [%08x != %08x]\n",
152 QEDE_ETH_INTERFACE_VERSION
);
156 qed_ops
= qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION
);
158 pr_notice("Failed to get qed ethtool operations\n");
162 /* Must register notifier before pci ops, since we might miss
163 * interface rename after pci probe and netdev registeration.
165 ret
= register_netdevice_notifier(&qede_netdev_notifier
);
167 pr_notice("Failed to register netdevice_notifier\n");
172 ret
= pci_register_driver(&qede_pci_driver
);
174 pr_notice("Failed to register driver\n");
175 unregister_netdevice_notifier(&qede_netdev_notifier
);
183 static void __exit
qede_cleanup(void)
185 pr_notice("qede_cleanup called\n");
187 unregister_netdevice_notifier(&qede_netdev_notifier
);
188 pci_unregister_driver(&qede_pci_driver
);
192 module_init(qede_init
);
193 module_exit(qede_cleanup
);
195 /* -------------------------------------------------------------------------
197 * -------------------------------------------------------------------------
200 /* Unmap the data and free skb */
201 static int qede_free_tx_pkt(struct qede_dev
*edev
,
202 struct qede_tx_queue
*txq
,
205 u16 idx
= txq
->sw_tx_cons
& NUM_TX_BDS_MAX
;
206 struct sk_buff
*skb
= txq
->sw_tx_ring
[idx
].skb
;
207 struct eth_tx_1st_bd
*first_bd
;
208 struct eth_tx_bd
*tx_data_bd
;
209 int bds_consumed
= 0;
211 bool data_split
= txq
->sw_tx_ring
[idx
].flags
& QEDE_TSO_SPLIT_BD
;
212 int i
, split_bd_len
= 0;
214 if (unlikely(!skb
)) {
216 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
217 idx
, txq
->sw_tx_cons
, txq
->sw_tx_prod
);
223 first_bd
= (struct eth_tx_1st_bd
*)qed_chain_consume(&txq
->tx_pbl
);
227 nbds
= first_bd
->data
.nbds
;
230 struct eth_tx_bd
*split
= (struct eth_tx_bd
*)
231 qed_chain_consume(&txq
->tx_pbl
);
232 split_bd_len
= BD_UNMAP_LEN(split
);
235 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(first_bd
),
236 BD_UNMAP_LEN(first_bd
) + split_bd_len
, DMA_TO_DEVICE
);
238 /* Unmap the data of the skb frags */
239 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++, bds_consumed
++) {
240 tx_data_bd
= (struct eth_tx_bd
*)
241 qed_chain_consume(&txq
->tx_pbl
);
242 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
243 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
246 while (bds_consumed
++ < nbds
)
247 qed_chain_consume(&txq
->tx_pbl
);
250 dev_kfree_skb_any(skb
);
251 txq
->sw_tx_ring
[idx
].skb
= NULL
;
252 txq
->sw_tx_ring
[idx
].flags
= 0;
257 /* Unmap the data and free skb when mapping failed during start_xmit */
258 static void qede_free_failed_tx_pkt(struct qede_dev
*edev
,
259 struct qede_tx_queue
*txq
,
260 struct eth_tx_1st_bd
*first_bd
,
264 u16 idx
= txq
->sw_tx_prod
& NUM_TX_BDS_MAX
;
265 struct sk_buff
*skb
= txq
->sw_tx_ring
[idx
].skb
;
266 struct eth_tx_bd
*tx_data_bd
;
267 int i
, split_bd_len
= 0;
269 /* Return prod to its position before this skb was handled */
270 qed_chain_set_prod(&txq
->tx_pbl
,
271 le16_to_cpu(txq
->tx_db
.data
.bd_prod
),
274 first_bd
= (struct eth_tx_1st_bd
*)qed_chain_produce(&txq
->tx_pbl
);
277 struct eth_tx_bd
*split
= (struct eth_tx_bd
*)
278 qed_chain_produce(&txq
->tx_pbl
);
279 split_bd_len
= BD_UNMAP_LEN(split
);
283 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(first_bd
),
284 BD_UNMAP_LEN(first_bd
) + split_bd_len
, DMA_TO_DEVICE
);
286 /* Unmap the data of the skb frags */
287 for (i
= 0; i
< nbd
; i
++) {
288 tx_data_bd
= (struct eth_tx_bd
*)
289 qed_chain_produce(&txq
->tx_pbl
);
290 if (tx_data_bd
->nbytes
)
291 dma_unmap_page(&edev
->pdev
->dev
,
292 BD_UNMAP_ADDR(tx_data_bd
),
293 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
296 /* Return again prod to its position before this skb was handled */
297 qed_chain_set_prod(&txq
->tx_pbl
,
298 le16_to_cpu(txq
->tx_db
.data
.bd_prod
),
302 dev_kfree_skb_any(skb
);
303 txq
->sw_tx_ring
[idx
].skb
= NULL
;
304 txq
->sw_tx_ring
[idx
].flags
= 0;
307 static u32
qede_xmit_type(struct qede_dev
*edev
,
311 u32 rc
= XMIT_L4_CSUM
;
314 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
317 l3_proto
= vlan_get_protocol(skb
);
318 if (l3_proto
== htons(ETH_P_IPV6
) &&
319 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
328 static void qede_set_params_for_ipv6_ext(struct sk_buff
*skb
,
329 struct eth_tx_2nd_bd
*second_bd
,
330 struct eth_tx_3rd_bd
*third_bd
)
333 u16 bd2_bits
= 0, bd2_bits2
= 0;
335 bd2_bits2
|= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT
);
337 bd2_bits
|= ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) &
338 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK
)
339 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT
;
341 bd2_bits2
|= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH
<<
342 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT
);
344 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
))
345 l4_proto
= ipv6_hdr(skb
)->nexthdr
;
347 l4_proto
= ip_hdr(skb
)->protocol
;
349 if (l4_proto
== IPPROTO_UDP
)
350 bd2_bits2
|= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT
;
353 third_bd
->data
.bitfields
|=
354 ((tcp_hdrlen(skb
) / 4) &
355 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK
) <<
356 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT
;
359 second_bd
->data
.bitfields
= cpu_to_le16(bd2_bits
);
360 second_bd
->data
.bitfields2
= cpu_to_le16(bd2_bits2
);
363 static int map_frag_to_bd(struct qede_dev
*edev
,
365 struct eth_tx_bd
*bd
)
369 /* Map skb non-linear frag data for DMA */
370 mapping
= skb_frag_dma_map(&edev
->pdev
->dev
, frag
, 0,
373 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
374 DP_NOTICE(edev
, "Unable to map frag - dropping packet\n");
378 /* Setup the data pointer of the frag data */
379 BD_SET_UNMAP_ADDR_LEN(bd
, mapping
, skb_frag_size(frag
));
384 /* Main transmit function */
386 netdev_tx_t
qede_start_xmit(struct sk_buff
*skb
,
387 struct net_device
*ndev
)
389 struct qede_dev
*edev
= netdev_priv(ndev
);
390 struct netdev_queue
*netdev_txq
;
391 struct qede_tx_queue
*txq
;
392 struct eth_tx_1st_bd
*first_bd
;
393 struct eth_tx_2nd_bd
*second_bd
= NULL
;
394 struct eth_tx_3rd_bd
*third_bd
= NULL
;
395 struct eth_tx_bd
*tx_data_bd
= NULL
;
399 int rc
, frag_idx
= 0, ipv6_ext
= 0;
405 /* Get tx-queue context and netdev index */
406 txq_index
= skb_get_queue_mapping(skb
);
407 WARN_ON(txq_index
>= QEDE_TSS_CNT(edev
));
408 txq
= QEDE_TX_QUEUE(edev
, txq_index
);
409 netdev_txq
= netdev_get_tx_queue(ndev
, txq_index
);
411 /* Current code doesn't support SKB linearization, since the max number
412 * of skb frags can be passed in the FW HSI.
414 BUILD_BUG_ON(MAX_SKB_FRAGS
> ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
);
416 WARN_ON(qed_chain_get_elem_left(&txq
->tx_pbl
) <
417 (MAX_SKB_FRAGS
+ 1));
419 xmit_type
= qede_xmit_type(edev
, skb
, &ipv6_ext
);
421 /* Fill the entry in the SW ring and the BDs in the FW ring */
422 idx
= txq
->sw_tx_prod
& NUM_TX_BDS_MAX
;
423 txq
->sw_tx_ring
[idx
].skb
= skb
;
424 first_bd
= (struct eth_tx_1st_bd
*)
425 qed_chain_produce(&txq
->tx_pbl
);
426 memset(first_bd
, 0, sizeof(*first_bd
));
427 first_bd
->data
.bd_flags
.bitfields
=
428 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT
;
430 /* Map skb linear data for DMA and set in the first BD */
431 mapping
= dma_map_single(&edev
->pdev
->dev
, skb
->data
,
432 skb_headlen(skb
), DMA_TO_DEVICE
);
433 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
434 DP_NOTICE(edev
, "SKB mapping failed\n");
435 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, 0, false);
439 BD_SET_UNMAP_ADDR_LEN(first_bd
, mapping
, skb_headlen(skb
));
441 /* In case there is IPv6 with extension headers or LSO we need 2nd and
444 if (unlikely((xmit_type
& XMIT_LSO
) | ipv6_ext
)) {
445 second_bd
= (struct eth_tx_2nd_bd
*)
446 qed_chain_produce(&txq
->tx_pbl
);
447 memset(second_bd
, 0, sizeof(*second_bd
));
450 third_bd
= (struct eth_tx_3rd_bd
*)
451 qed_chain_produce(&txq
->tx_pbl
);
452 memset(third_bd
, 0, sizeof(*third_bd
));
455 /* We need to fill in additional data in second_bd... */
456 tx_data_bd
= (struct eth_tx_bd
*)second_bd
;
459 if (skb_vlan_tag_present(skb
)) {
460 first_bd
->data
.vlan
= cpu_to_le16(skb_vlan_tag_get(skb
));
461 first_bd
->data
.bd_flags
.bitfields
|=
462 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT
;
465 /* Fill the parsing flags & params according to the requested offload */
466 if (xmit_type
& XMIT_L4_CSUM
) {
467 /* We don't re-calculate IP checksum as it is already done by
470 first_bd
->data
.bd_flags
.bitfields
|=
471 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT
;
473 /* If the packet is IPv6 with extension header, indicate that
474 * to FW and pass few params, since the device cracker doesn't
475 * support parsing IPv6 with extension header/s.
477 if (unlikely(ipv6_ext
))
478 qede_set_params_for_ipv6_ext(skb
, second_bd
, third_bd
);
481 if (xmit_type
& XMIT_LSO
) {
482 first_bd
->data
.bd_flags
.bitfields
|=
483 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT
);
484 third_bd
->data
.lso_mss
=
485 cpu_to_le16(skb_shinfo(skb
)->gso_size
);
487 first_bd
->data
.bd_flags
.bitfields
|=
488 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT
;
489 hlen
= skb_transport_header(skb
) +
490 tcp_hdrlen(skb
) - skb
->data
;
492 /* @@@TBD - if will not be removed need to check */
493 third_bd
->data
.bitfields
|=
494 (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT
);
496 /* Make life easier for FW guys who can't deal with header and
497 * data on same BD. If we need to split, use the second bd...
499 if (unlikely(skb_headlen(skb
) > hlen
)) {
500 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
501 "TSO split header size is %d (%x:%x)\n",
502 first_bd
->nbytes
, first_bd
->addr
.hi
,
505 mapping
= HILO_U64(le32_to_cpu(first_bd
->addr
.hi
),
506 le32_to_cpu(first_bd
->addr
.lo
)) +
509 BD_SET_UNMAP_ADDR_LEN(tx_data_bd
, mapping
,
510 le16_to_cpu(first_bd
->nbytes
) -
513 /* this marks the BD as one that has no
516 txq
->sw_tx_ring
[idx
].flags
|= QEDE_TSO_SPLIT_BD
;
518 first_bd
->nbytes
= cpu_to_le16(hlen
);
520 tx_data_bd
= (struct eth_tx_bd
*)third_bd
;
525 /* Handle fragmented skb */
526 /* special handle for frags inside 2nd and 3rd bds.. */
527 while (tx_data_bd
&& frag_idx
< skb_shinfo(skb
)->nr_frags
) {
528 rc
= map_frag_to_bd(edev
,
529 &skb_shinfo(skb
)->frags
[frag_idx
],
532 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, nbd
,
537 if (tx_data_bd
== (struct eth_tx_bd
*)second_bd
)
538 tx_data_bd
= (struct eth_tx_bd
*)third_bd
;
545 /* map last frags into 4th, 5th .... */
546 for (; frag_idx
< skb_shinfo(skb
)->nr_frags
; frag_idx
++, nbd
++) {
547 tx_data_bd
= (struct eth_tx_bd
*)
548 qed_chain_produce(&txq
->tx_pbl
);
550 memset(tx_data_bd
, 0, sizeof(*tx_data_bd
));
552 rc
= map_frag_to_bd(edev
,
553 &skb_shinfo(skb
)->frags
[frag_idx
],
556 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, nbd
,
562 /* update the first BD with the actual num BDs */
563 first_bd
->data
.nbds
= nbd
;
565 netdev_tx_sent_queue(netdev_txq
, skb
->len
);
567 skb_tx_timestamp(skb
);
569 /* Advance packet producer only before sending the packet since mapping
574 /* 'next page' entries are counted in the producer value */
575 txq
->tx_db
.data
.bd_prod
=
576 cpu_to_le16(qed_chain_get_prod_idx(&txq
->tx_pbl
));
578 /* wmb makes sure that the BDs data is updated before updating the
579 * producer, otherwise FW may read old data from the BDs.
583 writel(txq
->tx_db
.raw
, txq
->doorbell_addr
);
585 /* mmiowb is needed to synchronize doorbell writes from more than one
586 * processor. It guarantees that the write arrives to the device before
587 * the queue lock is released and another start_xmit is called (possibly
588 * on another CPU). Without this barrier, the next doorbell can bypass
589 * this doorbell. This is applicable to IA64/Altix systems.
593 if (unlikely(qed_chain_get_elem_left(&txq
->tx_pbl
)
594 < (MAX_SKB_FRAGS
+ 1))) {
595 netif_tx_stop_queue(netdev_txq
);
596 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
597 "Stop queue was called\n");
598 /* paired memory barrier is in qede_tx_int(), we have to keep
599 * ordering of set_bit() in netif_tx_stop_queue() and read of
604 if (qed_chain_get_elem_left(&txq
->tx_pbl
)
605 >= (MAX_SKB_FRAGS
+ 1) &&
606 (edev
->state
== QEDE_STATE_OPEN
)) {
607 netif_tx_wake_queue(netdev_txq
);
608 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
609 "Wake queue was called\n");
616 static int qede_txq_has_work(struct qede_tx_queue
*txq
)
620 /* Tell compiler that consumer and producer can change */
622 hw_bd_cons
= le16_to_cpu(*txq
->hw_cons_ptr
);
623 if (qed_chain_get_cons_idx(&txq
->tx_pbl
) == hw_bd_cons
+ 1)
626 return hw_bd_cons
!= qed_chain_get_cons_idx(&txq
->tx_pbl
);
629 static int qede_tx_int(struct qede_dev
*edev
,
630 struct qede_tx_queue
*txq
)
632 struct netdev_queue
*netdev_txq
;
634 unsigned int pkts_compl
= 0, bytes_compl
= 0;
637 netdev_txq
= netdev_get_tx_queue(edev
->ndev
, txq
->index
);
639 hw_bd_cons
= le16_to_cpu(*txq
->hw_cons_ptr
);
642 while (hw_bd_cons
!= qed_chain_get_cons_idx(&txq
->tx_pbl
)) {
645 rc
= qede_free_tx_pkt(edev
, txq
, &len
);
647 DP_NOTICE(edev
, "hw_bd_cons = %d, chain_cons=%d\n",
649 qed_chain_get_cons_idx(&txq
->tx_pbl
));
658 netdev_tx_completed_queue(netdev_txq
, pkts_compl
, bytes_compl
);
660 /* Need to make the tx_bd_cons update visible to start_xmit()
661 * before checking for netif_tx_queue_stopped(). Without the
662 * memory barrier, there is a small possibility that
663 * start_xmit() will miss it and cause the queue to be stopped
665 * On the other hand we need an rmb() here to ensure the proper
666 * ordering of bit testing in the following
667 * netif_tx_queue_stopped(txq) call.
671 if (unlikely(netif_tx_queue_stopped(netdev_txq
))) {
672 /* Taking tx_lock is needed to prevent reenabling the queue
673 * while it's empty. This could have happen if rx_action() gets
674 * suspended in qede_tx_int() after the condition before
675 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
677 * stops the queue->sees fresh tx_bd_cons->releases the queue->
678 * sends some packets consuming the whole queue again->
682 __netif_tx_lock(netdev_txq
, smp_processor_id());
684 if ((netif_tx_queue_stopped(netdev_txq
)) &&
685 (edev
->state
== QEDE_STATE_OPEN
) &&
686 (qed_chain_get_elem_left(&txq
->tx_pbl
)
687 >= (MAX_SKB_FRAGS
+ 1))) {
688 netif_tx_wake_queue(netdev_txq
);
689 DP_VERBOSE(edev
, NETIF_MSG_TX_DONE
,
690 "Wake queue was called\n");
693 __netif_tx_unlock(netdev_txq
);
699 static bool qede_has_rx_work(struct qede_rx_queue
*rxq
)
701 u16 hw_comp_cons
, sw_comp_cons
;
703 /* Tell compiler that status block fields can change */
706 hw_comp_cons
= le16_to_cpu(*rxq
->hw_cons_ptr
);
707 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
709 return hw_comp_cons
!= sw_comp_cons
;
712 static bool qede_has_tx_work(struct qede_fastpath
*fp
)
716 for (tc
= 0; tc
< fp
->edev
->num_tc
; tc
++)
717 if (qede_txq_has_work(&fp
->txqs
[tc
]))
722 /* This function copies the Rx buffer from the CONS position to the PROD
723 * position, since we failed to allocate a new Rx buffer.
725 static void qede_reuse_rx_data(struct qede_rx_queue
*rxq
)
727 struct eth_rx_bd
*rx_bd_cons
= qed_chain_consume(&rxq
->rx_bd_ring
);
728 struct eth_rx_bd
*rx_bd_prod
= qed_chain_produce(&rxq
->rx_bd_ring
);
729 struct sw_rx_data
*sw_rx_data_cons
=
730 &rxq
->sw_rx_ring
[rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
];
731 struct sw_rx_data
*sw_rx_data_prod
=
732 &rxq
->sw_rx_ring
[rxq
->sw_rx_prod
& NUM_RX_BDS_MAX
];
734 dma_unmap_addr_set(sw_rx_data_prod
, mapping
,
735 dma_unmap_addr(sw_rx_data_cons
, mapping
));
737 sw_rx_data_prod
->data
= sw_rx_data_cons
->data
;
738 memcpy(rx_bd_prod
, rx_bd_cons
, sizeof(struct eth_rx_bd
));
744 static inline void qede_update_rx_prod(struct qede_dev
*edev
,
745 struct qede_rx_queue
*rxq
)
747 u16 bd_prod
= qed_chain_get_prod_idx(&rxq
->rx_bd_ring
);
748 u16 cqe_prod
= qed_chain_get_prod_idx(&rxq
->rx_comp_ring
);
749 struct eth_rx_prod_data rx_prods
= {0};
751 /* Update producers */
752 rx_prods
.bd_prod
= cpu_to_le16(bd_prod
);
753 rx_prods
.cqe_prod
= cpu_to_le16(cqe_prod
);
755 /* Make sure that the BD and SGE data is updated before updating the
756 * producers since FW might read the BD/SGE right after the producer
761 internal_ram_wr(rxq
->hw_rxq_prod_addr
, sizeof(rx_prods
),
764 /* mmiowb is needed to synchronize doorbell writes from more than one
765 * processor. It guarantees that the write arrives to the device before
766 * the napi lock is released and another qede_poll is called (possibly
767 * on another CPU). Without this barrier, the next doorbell can bypass
768 * this doorbell. This is applicable to IA64/Altix systems.
773 static u32
qede_get_rxhash(struct qede_dev
*edev
,
776 enum pkt_hash_types
*rxhash_type
)
778 enum rss_hash_type htype
;
780 htype
= GET_FIELD(bitfields
, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE
);
782 if ((edev
->ndev
->features
& NETIF_F_RXHASH
) && htype
) {
783 *rxhash_type
= ((htype
== RSS_HASH_TYPE_IPV4
) ||
784 (htype
== RSS_HASH_TYPE_IPV6
)) ?
785 PKT_HASH_TYPE_L3
: PKT_HASH_TYPE_L4
;
786 return le32_to_cpu(rss_hash
);
788 *rxhash_type
= PKT_HASH_TYPE_NONE
;
792 static void qede_set_skb_csum(struct sk_buff
*skb
, u8 csum_flag
)
794 skb_checksum_none_assert(skb
);
796 if (csum_flag
& QEDE_CSUM_UNNECESSARY
)
797 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
800 static inline void qede_skb_receive(struct qede_dev
*edev
,
801 struct qede_fastpath
*fp
,
806 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
809 napi_gro_receive(&fp
->napi
, skb
);
812 static u8
qede_check_csum(u16 flag
)
817 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK
<<
818 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT
) & flag
) {
819 csum_flag
|= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK
<<
820 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT
;
821 csum
= QEDE_CSUM_UNNECESSARY
;
824 csum_flag
|= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK
<<
825 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT
;
827 if (csum_flag
& flag
)
828 return QEDE_CSUM_ERROR
;
833 static int qede_rx_int(struct qede_fastpath
*fp
, int budget
)
835 struct qede_dev
*edev
= fp
->edev
;
836 struct qede_rx_queue
*rxq
= fp
->rxq
;
838 u16 hw_comp_cons
, sw_comp_cons
, sw_rx_index
, parse_flag
;
842 hw_comp_cons
= le16_to_cpu(*rxq
->hw_cons_ptr
);
843 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
845 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
846 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
847 * read before it is written by FW, then FW writes CQE and SB, and then
848 * the CPU reads the hw_comp_cons, it will use an old CQE.
852 /* Loop to complete all indicated BDs */
853 while (sw_comp_cons
!= hw_comp_cons
) {
854 struct eth_fast_path_rx_reg_cqe
*fp_cqe
;
855 enum pkt_hash_types rxhash_type
;
856 enum eth_rx_cqe_type cqe_type
;
857 struct sw_rx_data
*sw_rx_data
;
858 union eth_rx_cqe
*cqe
;
864 /* Get the CQE from the completion ring */
865 cqe
= (union eth_rx_cqe
*)
866 qed_chain_consume(&rxq
->rx_comp_ring
);
867 cqe_type
= cqe
->fast_path_regular
.type
;
869 if (unlikely(cqe_type
== ETH_RX_CQE_TYPE_SLOW_PATH
)) {
870 edev
->ops
->eth_cqe_completion(
871 edev
->cdev
, fp
->rss_id
,
872 (struct eth_slow_path_rx_cqe
*)cqe
);
876 /* Get the data from the SW ring */
877 sw_rx_index
= rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
;
878 sw_rx_data
= &rxq
->sw_rx_ring
[sw_rx_index
];
879 data
= sw_rx_data
->data
;
881 fp_cqe
= &cqe
->fast_path_regular
;
882 len
= le16_to_cpu(fp_cqe
->pkt_len
);
883 pad
= fp_cqe
->placement_offset
;
885 /* For every Rx BD consumed, we allocate a new BD so the BD ring
886 * is always with a fixed size. If allocation fails, we take the
887 * consumed BD and return it to the ring in the PROD position.
888 * The packet that was received on that BD will be dropped (and
889 * not passed to the upper stack).
891 if (likely(qede_alloc_rx_buffer(edev
, rxq
) == 0)) {
892 dma_unmap_single(&edev
->pdev
->dev
,
893 dma_unmap_addr(sw_rx_data
, mapping
),
894 rxq
->rx_buf_size
, DMA_FROM_DEVICE
);
896 /* If this is an error packet then drop it */
898 le16_to_cpu(cqe
->fast_path_regular
.pars_flags
.flags
);
899 csum_flag
= qede_check_csum(parse_flag
);
900 if (csum_flag
== QEDE_CSUM_ERROR
) {
902 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
903 sw_comp_cons
, parse_flag
);
909 skb
= build_skb(data
, 0);
911 if (unlikely(!skb
)) {
913 "Build_skb failed, dropping incoming packet\n");
915 rxq
->rx_alloc_errors
++;
919 skb_reserve(skb
, pad
);
923 "New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
924 qede_reuse_rx_data(rxq
);
925 rxq
->rx_alloc_errors
++;
929 sw_rx_data
->data
= NULL
;
933 skb
->protocol
= eth_type_trans(skb
, edev
->ndev
);
935 rx_hash
= qede_get_rxhash(edev
, fp_cqe
->bitfields
,
939 skb_set_hash(skb
, rx_hash
, rxhash_type
);
941 qede_set_skb_csum(skb
, csum_flag
);
943 skb_record_rx_queue(skb
, fp
->rss_id
);
945 qede_skb_receive(edev
, fp
, skb
, le16_to_cpu(fp_cqe
->vlan_tag
));
947 qed_chain_consume(&rxq
->rx_bd_ring
);
953 next_cqe
: /* don't consume bd rx buffer */
954 qed_chain_recycle_consumed(&rxq
->rx_comp_ring
);
955 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
956 /* CR TPA - revisit how to handle budget in TPA perhaps
959 if (rx_pkt
== budget
)
961 } /* repeat while sw_comp_cons != hw_comp_cons... */
963 /* Update producers */
964 qede_update_rx_prod(edev
, rxq
);
969 static int qede_poll(struct napi_struct
*napi
, int budget
)
972 struct qede_fastpath
*fp
= container_of(napi
, struct qede_fastpath
,
974 struct qede_dev
*edev
= fp
->edev
;
979 for (tc
= 0; tc
< edev
->num_tc
; tc
++)
980 if (qede_txq_has_work(&fp
->txqs
[tc
]))
981 qede_tx_int(edev
, &fp
->txqs
[tc
]);
983 if (qede_has_rx_work(fp
->rxq
)) {
984 work_done
+= qede_rx_int(fp
, budget
- work_done
);
986 /* must not complete if we consumed full budget */
987 if (work_done
>= budget
)
991 /* Fall out from the NAPI loop if needed */
992 if (!(qede_has_rx_work(fp
->rxq
) || qede_has_tx_work(fp
))) {
993 qed_sb_update_sb_idx(fp
->sb_info
);
994 /* *_has_*_work() reads the status block,
995 * thus we need to ensure that status block indices
996 * have been actually read (qed_sb_update_sb_idx)
997 * prior to this check (*_has_*_work) so that
998 * we won't write the "newer" value of the status block
999 * to HW (if there was a DMA right after
1000 * qede_has_rx_work and if there is no rmb, the memory
1001 * reading (qed_sb_update_sb_idx) may be postponed
1002 * to right before *_ack_sb). In this case there
1003 * will never be another interrupt until there is
1004 * another update of the status block, while there
1005 * is still unhandled work.
1009 if (!(qede_has_rx_work(fp
->rxq
) ||
1010 qede_has_tx_work(fp
))) {
1011 napi_complete(napi
);
1012 /* Update and reenable interrupts */
1013 qed_sb_ack(fp
->sb_info
, IGU_INT_ENABLE
,
1023 static irqreturn_t
qede_msix_fp_int(int irq
, void *fp_cookie
)
1025 struct qede_fastpath
*fp
= fp_cookie
;
1027 qed_sb_ack(fp
->sb_info
, IGU_INT_DISABLE
, 0 /*do not update*/);
1029 napi_schedule_irqoff(&fp
->napi
);
1033 /* -------------------------------------------------------------------------
1035 * -------------------------------------------------------------------------
1038 static int qede_open(struct net_device
*ndev
);
1039 static int qede_close(struct net_device
*ndev
);
1040 static int qede_set_mac_addr(struct net_device
*ndev
, void *p
);
1041 static void qede_set_rx_mode(struct net_device
*ndev
);
1042 static void qede_config_rx_mode(struct net_device
*ndev
);
1044 static int qede_set_ucast_rx_mac(struct qede_dev
*edev
,
1045 enum qed_filter_xcast_params_type opcode
,
1046 unsigned char mac
[ETH_ALEN
])
1048 struct qed_filter_params filter_cmd
;
1050 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
1051 filter_cmd
.type
= QED_FILTER_TYPE_UCAST
;
1052 filter_cmd
.filter
.ucast
.type
= opcode
;
1053 filter_cmd
.filter
.ucast
.mac_valid
= 1;
1054 ether_addr_copy(filter_cmd
.filter
.ucast
.mac
, mac
);
1056 return edev
->ops
->filter_config(edev
->cdev
, &filter_cmd
);
1059 void qede_fill_by_demand_stats(struct qede_dev
*edev
)
1061 struct qed_eth_stats stats
;
1063 edev
->ops
->get_vport_stats(edev
->cdev
, &stats
);
1064 edev
->stats
.no_buff_discards
= stats
.no_buff_discards
;
1065 edev
->stats
.rx_ucast_bytes
= stats
.rx_ucast_bytes
;
1066 edev
->stats
.rx_mcast_bytes
= stats
.rx_mcast_bytes
;
1067 edev
->stats
.rx_bcast_bytes
= stats
.rx_bcast_bytes
;
1068 edev
->stats
.rx_ucast_pkts
= stats
.rx_ucast_pkts
;
1069 edev
->stats
.rx_mcast_pkts
= stats
.rx_mcast_pkts
;
1070 edev
->stats
.rx_bcast_pkts
= stats
.rx_bcast_pkts
;
1071 edev
->stats
.mftag_filter_discards
= stats
.mftag_filter_discards
;
1072 edev
->stats
.mac_filter_discards
= stats
.mac_filter_discards
;
1074 edev
->stats
.tx_ucast_bytes
= stats
.tx_ucast_bytes
;
1075 edev
->stats
.tx_mcast_bytes
= stats
.tx_mcast_bytes
;
1076 edev
->stats
.tx_bcast_bytes
= stats
.tx_bcast_bytes
;
1077 edev
->stats
.tx_ucast_pkts
= stats
.tx_ucast_pkts
;
1078 edev
->stats
.tx_mcast_pkts
= stats
.tx_mcast_pkts
;
1079 edev
->stats
.tx_bcast_pkts
= stats
.tx_bcast_pkts
;
1080 edev
->stats
.tx_err_drop_pkts
= stats
.tx_err_drop_pkts
;
1081 edev
->stats
.coalesced_pkts
= stats
.tpa_coalesced_pkts
;
1082 edev
->stats
.coalesced_events
= stats
.tpa_coalesced_events
;
1083 edev
->stats
.coalesced_aborts_num
= stats
.tpa_aborts_num
;
1084 edev
->stats
.non_coalesced_pkts
= stats
.tpa_not_coalesced_pkts
;
1085 edev
->stats
.coalesced_bytes
= stats
.tpa_coalesced_bytes
;
1087 edev
->stats
.rx_64_byte_packets
= stats
.rx_64_byte_packets
;
1088 edev
->stats
.rx_127_byte_packets
= stats
.rx_127_byte_packets
;
1089 edev
->stats
.rx_255_byte_packets
= stats
.rx_255_byte_packets
;
1090 edev
->stats
.rx_511_byte_packets
= stats
.rx_511_byte_packets
;
1091 edev
->stats
.rx_1023_byte_packets
= stats
.rx_1023_byte_packets
;
1092 edev
->stats
.rx_1518_byte_packets
= stats
.rx_1518_byte_packets
;
1093 edev
->stats
.rx_1522_byte_packets
= stats
.rx_1522_byte_packets
;
1094 edev
->stats
.rx_2047_byte_packets
= stats
.rx_2047_byte_packets
;
1095 edev
->stats
.rx_4095_byte_packets
= stats
.rx_4095_byte_packets
;
1096 edev
->stats
.rx_9216_byte_packets
= stats
.rx_9216_byte_packets
;
1097 edev
->stats
.rx_16383_byte_packets
= stats
.rx_16383_byte_packets
;
1098 edev
->stats
.rx_crc_errors
= stats
.rx_crc_errors
;
1099 edev
->stats
.rx_mac_crtl_frames
= stats
.rx_mac_crtl_frames
;
1100 edev
->stats
.rx_pause_frames
= stats
.rx_pause_frames
;
1101 edev
->stats
.rx_pfc_frames
= stats
.rx_pfc_frames
;
1102 edev
->stats
.rx_align_errors
= stats
.rx_align_errors
;
1103 edev
->stats
.rx_carrier_errors
= stats
.rx_carrier_errors
;
1104 edev
->stats
.rx_oversize_packets
= stats
.rx_oversize_packets
;
1105 edev
->stats
.rx_jabbers
= stats
.rx_jabbers
;
1106 edev
->stats
.rx_undersize_packets
= stats
.rx_undersize_packets
;
1107 edev
->stats
.rx_fragments
= stats
.rx_fragments
;
1108 edev
->stats
.tx_64_byte_packets
= stats
.tx_64_byte_packets
;
1109 edev
->stats
.tx_65_to_127_byte_packets
= stats
.tx_65_to_127_byte_packets
;
1110 edev
->stats
.tx_128_to_255_byte_packets
=
1111 stats
.tx_128_to_255_byte_packets
;
1112 edev
->stats
.tx_256_to_511_byte_packets
=
1113 stats
.tx_256_to_511_byte_packets
;
1114 edev
->stats
.tx_512_to_1023_byte_packets
=
1115 stats
.tx_512_to_1023_byte_packets
;
1116 edev
->stats
.tx_1024_to_1518_byte_packets
=
1117 stats
.tx_1024_to_1518_byte_packets
;
1118 edev
->stats
.tx_1519_to_2047_byte_packets
=
1119 stats
.tx_1519_to_2047_byte_packets
;
1120 edev
->stats
.tx_2048_to_4095_byte_packets
=
1121 stats
.tx_2048_to_4095_byte_packets
;
1122 edev
->stats
.tx_4096_to_9216_byte_packets
=
1123 stats
.tx_4096_to_9216_byte_packets
;
1124 edev
->stats
.tx_9217_to_16383_byte_packets
=
1125 stats
.tx_9217_to_16383_byte_packets
;
1126 edev
->stats
.tx_pause_frames
= stats
.tx_pause_frames
;
1127 edev
->stats
.tx_pfc_frames
= stats
.tx_pfc_frames
;
1128 edev
->stats
.tx_lpi_entry_count
= stats
.tx_lpi_entry_count
;
1129 edev
->stats
.tx_total_collisions
= stats
.tx_total_collisions
;
1130 edev
->stats
.brb_truncates
= stats
.brb_truncates
;
1131 edev
->stats
.brb_discards
= stats
.brb_discards
;
1132 edev
->stats
.tx_mac_ctrl_frames
= stats
.tx_mac_ctrl_frames
;
1135 static struct rtnl_link_stats64
*qede_get_stats64(
1136 struct net_device
*dev
,
1137 struct rtnl_link_stats64
*stats
)
1139 struct qede_dev
*edev
= netdev_priv(dev
);
1141 qede_fill_by_demand_stats(edev
);
1143 stats
->rx_packets
= edev
->stats
.rx_ucast_pkts
+
1144 edev
->stats
.rx_mcast_pkts
+
1145 edev
->stats
.rx_bcast_pkts
;
1146 stats
->tx_packets
= edev
->stats
.tx_ucast_pkts
+
1147 edev
->stats
.tx_mcast_pkts
+
1148 edev
->stats
.tx_bcast_pkts
;
1150 stats
->rx_bytes
= edev
->stats
.rx_ucast_bytes
+
1151 edev
->stats
.rx_mcast_bytes
+
1152 edev
->stats
.rx_bcast_bytes
;
1154 stats
->tx_bytes
= edev
->stats
.tx_ucast_bytes
+
1155 edev
->stats
.tx_mcast_bytes
+
1156 edev
->stats
.tx_bcast_bytes
;
1158 stats
->tx_errors
= edev
->stats
.tx_err_drop_pkts
;
1159 stats
->multicast
= edev
->stats
.rx_mcast_pkts
+
1160 edev
->stats
.rx_bcast_pkts
;
1162 stats
->rx_fifo_errors
= edev
->stats
.no_buff_discards
;
1164 stats
->collisions
= edev
->stats
.tx_total_collisions
;
1165 stats
->rx_crc_errors
= edev
->stats
.rx_crc_errors
;
1166 stats
->rx_frame_errors
= edev
->stats
.rx_align_errors
;
1171 static const struct net_device_ops qede_netdev_ops
= {
1172 .ndo_open
= qede_open
,
1173 .ndo_stop
= qede_close
,
1174 .ndo_start_xmit
= qede_start_xmit
,
1175 .ndo_set_rx_mode
= qede_set_rx_mode
,
1176 .ndo_set_mac_address
= qede_set_mac_addr
,
1177 .ndo_validate_addr
= eth_validate_addr
,
1178 .ndo_change_mtu
= qede_change_mtu
,
1179 .ndo_get_stats64
= qede_get_stats64
,
1182 /* -------------------------------------------------------------------------
1183 * START OF PROBE / REMOVE
1184 * -------------------------------------------------------------------------
1187 static struct qede_dev
*qede_alloc_etherdev(struct qed_dev
*cdev
,
1188 struct pci_dev
*pdev
,
1189 struct qed_dev_eth_info
*info
,
1193 struct net_device
*ndev
;
1194 struct qede_dev
*edev
;
1196 ndev
= alloc_etherdev_mqs(sizeof(*edev
),
1200 pr_err("etherdev allocation failed\n");
1204 edev
= netdev_priv(ndev
);
1208 edev
->dp_module
= dp_module
;
1209 edev
->dp_level
= dp_level
;
1210 edev
->ops
= qed_ops
;
1211 edev
->q_num_rx_buffers
= NUM_RX_BDS_DEF
;
1212 edev
->q_num_tx_buffers
= NUM_TX_BDS_DEF
;
1214 DP_INFO(edev
, "Allocated netdev with 64 tx queues and 64 rx queues\n");
1216 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1218 memset(&edev
->stats
, 0, sizeof(edev
->stats
));
1219 memcpy(&edev
->dev_info
, info
, sizeof(*info
));
1221 edev
->num_tc
= edev
->dev_info
.num_tc
;
1226 static void qede_init_ndev(struct qede_dev
*edev
)
1228 struct net_device
*ndev
= edev
->ndev
;
1229 struct pci_dev
*pdev
= edev
->pdev
;
1232 pci_set_drvdata(pdev
, ndev
);
1234 ndev
->mem_start
= edev
->dev_info
.common
.pci_mem_start
;
1235 ndev
->base_addr
= ndev
->mem_start
;
1236 ndev
->mem_end
= edev
->dev_info
.common
.pci_mem_end
;
1237 ndev
->irq
= edev
->dev_info
.common
.pci_irq
;
1239 ndev
->watchdog_timeo
= TX_TIMEOUT
;
1241 ndev
->netdev_ops
= &qede_netdev_ops
;
1243 qede_set_ethtool_ops(ndev
);
1245 /* user-changeble features */
1246 hw_features
= NETIF_F_GRO
| NETIF_F_SG
|
1247 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1248 NETIF_F_TSO
| NETIF_F_TSO6
;
1250 ndev
->vlan_features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
1252 ndev
->features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
1253 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HIGHDMA
|
1254 NETIF_F_HW_VLAN_CTAG_TX
;
1256 ndev
->hw_features
= hw_features
;
1258 /* Set network device HW mac */
1259 ether_addr_copy(edev
->ndev
->dev_addr
, edev
->dev_info
.common
.hw_mac
);
1262 /* This function converts from 32b param to two params of level and module
1263 * Input 32b decoding:
1264 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
1265 * 'happy' flow, e.g. memory allocation failed.
1266 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
1267 * and provide important parameters.
1268 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
1269 * module. VERBOSE prints are for tracking the specific flow in low level.
1271 * Notice that the level should be that of the lowest required logs.
1273 void qede_config_debug(uint debug
, u32
*p_dp_module
, u8
*p_dp_level
)
1275 *p_dp_level
= QED_LEVEL_NOTICE
;
1278 if (debug
& QED_LOG_VERBOSE_MASK
) {
1279 *p_dp_level
= QED_LEVEL_VERBOSE
;
1280 *p_dp_module
= (debug
& 0x3FFFFFFF);
1281 } else if (debug
& QED_LOG_INFO_MASK
) {
1282 *p_dp_level
= QED_LEVEL_INFO
;
1283 } else if (debug
& QED_LOG_NOTICE_MASK
) {
1284 *p_dp_level
= QED_LEVEL_NOTICE
;
1288 static void qede_free_fp_array(struct qede_dev
*edev
)
1290 if (edev
->fp_array
) {
1291 struct qede_fastpath
*fp
;
1295 fp
= &edev
->fp_array
[i
];
1301 kfree(edev
->fp_array
);
1306 static int qede_alloc_fp_array(struct qede_dev
*edev
)
1308 struct qede_fastpath
*fp
;
1311 edev
->fp_array
= kcalloc(QEDE_RSS_CNT(edev
),
1312 sizeof(*edev
->fp_array
), GFP_KERNEL
);
1313 if (!edev
->fp_array
) {
1314 DP_NOTICE(edev
, "fp array allocation failed\n");
1319 fp
= &edev
->fp_array
[i
];
1321 fp
->sb_info
= kcalloc(1, sizeof(*fp
->sb_info
), GFP_KERNEL
);
1323 DP_NOTICE(edev
, "sb info struct allocation failed\n");
1327 fp
->rxq
= kcalloc(1, sizeof(*fp
->rxq
), GFP_KERNEL
);
1329 DP_NOTICE(edev
, "RXQ struct allocation failed\n");
1333 fp
->txqs
= kcalloc(edev
->num_tc
, sizeof(*fp
->txqs
), GFP_KERNEL
);
1335 DP_NOTICE(edev
, "TXQ array allocation failed\n");
1342 qede_free_fp_array(edev
);
1346 static void qede_sp_task(struct work_struct
*work
)
1348 struct qede_dev
*edev
= container_of(work
, struct qede_dev
,
1350 mutex_lock(&edev
->qede_lock
);
1352 if (edev
->state
== QEDE_STATE_OPEN
) {
1353 if (test_and_clear_bit(QEDE_SP_RX_MODE
, &edev
->sp_flags
))
1354 qede_config_rx_mode(edev
->ndev
);
1357 mutex_unlock(&edev
->qede_lock
);
1360 static void qede_update_pf_params(struct qed_dev
*cdev
)
1362 struct qed_pf_params pf_params
;
1365 memset(&pf_params
, 0, sizeof(struct qed_pf_params
));
1366 pf_params
.eth_pf_params
.num_cons
= 32;
1367 qed_ops
->common
->update_pf_params(cdev
, &pf_params
);
1370 enum qede_probe_mode
{
1374 static int __qede_probe(struct pci_dev
*pdev
, u32 dp_module
, u8 dp_level
,
1375 enum qede_probe_mode mode
)
1377 struct qed_slowpath_params params
;
1378 struct qed_dev_eth_info dev_info
;
1379 struct qede_dev
*edev
;
1380 struct qed_dev
*cdev
;
1383 if (unlikely(dp_level
& QED_LEVEL_INFO
))
1384 pr_notice("Starting qede probe\n");
1386 cdev
= qed_ops
->common
->probe(pdev
, QED_PROTOCOL_ETH
,
1387 dp_module
, dp_level
);
1393 qede_update_pf_params(cdev
);
1395 /* Start the Slowpath-process */
1396 memset(¶ms
, 0, sizeof(struct qed_slowpath_params
));
1397 params
.int_mode
= QED_INT_MODE_MSIX
;
1398 params
.drv_major
= QEDE_MAJOR_VERSION
;
1399 params
.drv_minor
= QEDE_MINOR_VERSION
;
1400 params
.drv_rev
= QEDE_REVISION_VERSION
;
1401 params
.drv_eng
= QEDE_ENGINEERING_VERSION
;
1402 strlcpy(params
.name
, "qede LAN", QED_DRV_VER_STR_SIZE
);
1403 rc
= qed_ops
->common
->slowpath_start(cdev
, ¶ms
);
1405 pr_notice("Cannot start slowpath\n");
1409 /* Learn information crucial for qede to progress */
1410 rc
= qed_ops
->fill_dev_info(cdev
, &dev_info
);
1414 edev
= qede_alloc_etherdev(cdev
, pdev
, &dev_info
, dp_module
,
1421 qede_init_ndev(edev
);
1423 rc
= register_netdev(edev
->ndev
);
1425 DP_NOTICE(edev
, "Cannot register net-device\n");
1429 edev
->ops
->common
->set_id(cdev
, edev
->ndev
->name
, DRV_MODULE_VERSION
);
1431 edev
->ops
->register_ops(cdev
, &qede_ll_ops
, edev
);
1433 INIT_DELAYED_WORK(&edev
->sp_task
, qede_sp_task
);
1434 mutex_init(&edev
->qede_lock
);
1436 DP_INFO(edev
, "Ending successfully qede probe\n");
1441 free_netdev(edev
->ndev
);
1443 qed_ops
->common
->slowpath_stop(cdev
);
1445 qed_ops
->common
->remove(cdev
);
1450 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1455 qede_config_debug(debug
, &dp_module
, &dp_level
);
1457 return __qede_probe(pdev
, dp_module
, dp_level
,
1461 enum qede_remove_mode
{
1465 static void __qede_remove(struct pci_dev
*pdev
, enum qede_remove_mode mode
)
1467 struct net_device
*ndev
= pci_get_drvdata(pdev
);
1468 struct qede_dev
*edev
= netdev_priv(ndev
);
1469 struct qed_dev
*cdev
= edev
->cdev
;
1471 DP_INFO(edev
, "Starting qede_remove\n");
1473 cancel_delayed_work_sync(&edev
->sp_task
);
1474 unregister_netdev(ndev
);
1476 edev
->ops
->common
->set_power_state(cdev
, PCI_D0
);
1478 pci_set_drvdata(pdev
, NULL
);
1482 /* Use global ops since we've freed edev */
1483 qed_ops
->common
->slowpath_stop(cdev
);
1484 qed_ops
->common
->remove(cdev
);
1486 pr_notice("Ending successfully qede_remove\n");
1489 static void qede_remove(struct pci_dev
*pdev
)
1491 __qede_remove(pdev
, QEDE_REMOVE_NORMAL
);
1494 /* -------------------------------------------------------------------------
1495 * START OF LOAD / UNLOAD
1496 * -------------------------------------------------------------------------
1499 static int qede_set_num_queues(struct qede_dev
*edev
)
1504 /* Setup queues according to possible resources*/
1506 rss_num
= edev
->req_rss
;
1508 rss_num
= netif_get_num_default_rss_queues() *
1509 edev
->dev_info
.common
.num_hwfns
;
1511 rss_num
= min_t(u16
, QEDE_MAX_RSS_CNT(edev
), rss_num
);
1513 rc
= edev
->ops
->common
->set_fp_int(edev
->cdev
, rss_num
);
1515 /* Managed to request interrupts for our queues */
1517 DP_INFO(edev
, "Managed %d [of %d] RSS queues\n",
1518 QEDE_RSS_CNT(edev
), rss_num
);
1524 static void qede_free_mem_sb(struct qede_dev
*edev
,
1525 struct qed_sb_info
*sb_info
)
1527 if (sb_info
->sb_virt
)
1528 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_info
->sb_virt
),
1529 (void *)sb_info
->sb_virt
, sb_info
->sb_phys
);
1532 /* This function allocates fast-path status block memory */
1533 static int qede_alloc_mem_sb(struct qede_dev
*edev
,
1534 struct qed_sb_info
*sb_info
,
1537 struct status_block
*sb_virt
;
1541 sb_virt
= dma_alloc_coherent(&edev
->pdev
->dev
,
1543 &sb_phys
, GFP_KERNEL
);
1545 DP_ERR(edev
, "Status block allocation failed\n");
1549 rc
= edev
->ops
->common
->sb_init(edev
->cdev
, sb_info
,
1550 sb_virt
, sb_phys
, sb_id
,
1551 QED_SB_TYPE_L2_QUEUE
);
1553 DP_ERR(edev
, "Status block initialization failed\n");
1554 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_virt
),
1562 static void qede_free_rx_buffers(struct qede_dev
*edev
,
1563 struct qede_rx_queue
*rxq
)
1567 for (i
= rxq
->sw_rx_cons
; i
!= rxq
->sw_rx_prod
; i
++) {
1568 struct sw_rx_data
*rx_buf
;
1571 rx_buf
= &rxq
->sw_rx_ring
[i
& NUM_RX_BDS_MAX
];
1572 data
= rx_buf
->data
;
1574 dma_unmap_single(&edev
->pdev
->dev
,
1575 dma_unmap_addr(rx_buf
, mapping
),
1576 rxq
->rx_buf_size
, DMA_FROM_DEVICE
);
1578 rx_buf
->data
= NULL
;
1583 static void qede_free_mem_rxq(struct qede_dev
*edev
,
1584 struct qede_rx_queue
*rxq
)
1586 /* Free rx buffers */
1587 qede_free_rx_buffers(edev
, rxq
);
1589 /* Free the parallel SW ring */
1590 kfree(rxq
->sw_rx_ring
);
1592 /* Free the real RQ ring used by FW */
1593 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_bd_ring
);
1594 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_comp_ring
);
1597 static int qede_alloc_rx_buffer(struct qede_dev
*edev
,
1598 struct qede_rx_queue
*rxq
)
1600 struct sw_rx_data
*sw_rx_data
;
1601 struct eth_rx_bd
*rx_bd
;
1606 rx_buf_size
= rxq
->rx_buf_size
;
1608 data
= kmalloc(rx_buf_size
, GFP_ATOMIC
);
1609 if (unlikely(!data
)) {
1610 DP_NOTICE(edev
, "Failed to allocate Rx data\n");
1614 mapping
= dma_map_single(&edev
->pdev
->dev
, data
,
1615 rx_buf_size
, DMA_FROM_DEVICE
);
1616 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
1618 DP_NOTICE(edev
, "Failed to map Rx buffer\n");
1622 sw_rx_data
= &rxq
->sw_rx_ring
[rxq
->sw_rx_prod
& NUM_RX_BDS_MAX
];
1623 sw_rx_data
->data
= data
;
1625 dma_unmap_addr_set(sw_rx_data
, mapping
, mapping
);
1627 /* Advance PROD and get BD pointer */
1628 rx_bd
= (struct eth_rx_bd
*)qed_chain_produce(&rxq
->rx_bd_ring
);
1630 rx_bd
->addr
.hi
= cpu_to_le32(upper_32_bits(mapping
));
1631 rx_bd
->addr
.lo
= cpu_to_le32(lower_32_bits(mapping
));
1638 /* This function allocates all memory needed per Rx queue */
1639 static int qede_alloc_mem_rxq(struct qede_dev
*edev
,
1640 struct qede_rx_queue
*rxq
)
1642 int i
, rc
, size
, num_allocated
;
1644 rxq
->num_rx_buffers
= edev
->q_num_rx_buffers
;
1646 rxq
->rx_buf_size
= NET_IP_ALIGN
+
1649 QEDE_FW_RX_ALIGN_END
;
1651 /* Allocate the parallel driver ring for Rx buffers */
1652 size
= sizeof(*rxq
->sw_rx_ring
) * NUM_RX_BDS_MAX
;
1653 rxq
->sw_rx_ring
= kzalloc(size
, GFP_KERNEL
);
1654 if (!rxq
->sw_rx_ring
) {
1655 DP_ERR(edev
, "Rx buffers ring allocation failed\n");
1659 /* Allocate FW Rx ring */
1660 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
1661 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1662 QED_CHAIN_MODE_NEXT_PTR
,
1664 sizeof(struct eth_rx_bd
),
1670 /* Allocate FW completion ring */
1671 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
1672 QED_CHAIN_USE_TO_CONSUME
,
1675 sizeof(union eth_rx_cqe
),
1676 &rxq
->rx_comp_ring
);
1680 /* Allocate buffers for the Rx ring */
1681 for (i
= 0; i
< rxq
->num_rx_buffers
; i
++) {
1682 rc
= qede_alloc_rx_buffer(edev
, rxq
);
1687 if (!num_allocated
) {
1688 DP_ERR(edev
, "Rx buffers allocation failed\n");
1690 } else if (num_allocated
< rxq
->num_rx_buffers
) {
1692 "Allocated less buffers than desired (%d allocated)\n",
1699 qede_free_mem_rxq(edev
, rxq
);
1703 static void qede_free_mem_txq(struct qede_dev
*edev
,
1704 struct qede_tx_queue
*txq
)
1706 /* Free the parallel SW ring */
1707 kfree(txq
->sw_tx_ring
);
1709 /* Free the real RQ ring used by FW */
1710 edev
->ops
->common
->chain_free(edev
->cdev
, &txq
->tx_pbl
);
1713 /* This function allocates all memory needed per Tx queue */
1714 static int qede_alloc_mem_txq(struct qede_dev
*edev
,
1715 struct qede_tx_queue
*txq
)
1718 union eth_tx_bd_types
*p_virt
;
1720 txq
->num_tx_buffers
= edev
->q_num_tx_buffers
;
1722 /* Allocate the parallel driver ring for Tx buffers */
1723 size
= sizeof(*txq
->sw_tx_ring
) * NUM_TX_BDS_MAX
;
1724 txq
->sw_tx_ring
= kzalloc(size
, GFP_KERNEL
);
1725 if (!txq
->sw_tx_ring
) {
1726 DP_NOTICE(edev
, "Tx buffers ring allocation failed\n");
1730 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
1731 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1742 qede_free_mem_txq(edev
, txq
);
1746 /* This function frees all memory of a single fp */
1747 static void qede_free_mem_fp(struct qede_dev
*edev
,
1748 struct qede_fastpath
*fp
)
1752 qede_free_mem_sb(edev
, fp
->sb_info
);
1754 qede_free_mem_rxq(edev
, fp
->rxq
);
1756 for (tc
= 0; tc
< edev
->num_tc
; tc
++)
1757 qede_free_mem_txq(edev
, &fp
->txqs
[tc
]);
1760 /* This function allocates all memory needed for a single fp (i.e. an entity
1761 * which contains status block, one rx queue and multiple per-TC tx queues.
1763 static int qede_alloc_mem_fp(struct qede_dev
*edev
,
1764 struct qede_fastpath
*fp
)
1768 rc
= qede_alloc_mem_sb(edev
, fp
->sb_info
, fp
->rss_id
);
1772 rc
= qede_alloc_mem_rxq(edev
, fp
->rxq
);
1776 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
1777 rc
= qede_alloc_mem_txq(edev
, &fp
->txqs
[tc
]);
1785 qede_free_mem_fp(edev
, fp
);
1789 static void qede_free_mem_load(struct qede_dev
*edev
)
1794 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
1796 qede_free_mem_fp(edev
, fp
);
1800 /* This function allocates all qede memory at NIC load. */
1801 static int qede_alloc_mem_load(struct qede_dev
*edev
)
1805 for (rss_id
= 0; rss_id
< QEDE_RSS_CNT(edev
); rss_id
++) {
1806 struct qede_fastpath
*fp
= &edev
->fp_array
[rss_id
];
1808 rc
= qede_alloc_mem_fp(edev
, fp
);
1813 if (rss_id
!= QEDE_RSS_CNT(edev
)) {
1814 /* Failed allocating memory for all the queues */
1817 "Failed to allocate memory for the leading queue\n");
1821 "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
1822 QEDE_RSS_CNT(edev
), rss_id
);
1824 edev
->num_rss
= rss_id
;
1830 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1831 static void qede_init_fp(struct qede_dev
*edev
)
1833 int rss_id
, txq_index
, tc
;
1834 struct qede_fastpath
*fp
;
1836 for_each_rss(rss_id
) {
1837 fp
= &edev
->fp_array
[rss_id
];
1840 fp
->rss_id
= rss_id
;
1842 memset((void *)&fp
->napi
, 0, sizeof(fp
->napi
));
1844 memset((void *)fp
->sb_info
, 0, sizeof(*fp
->sb_info
));
1846 memset((void *)fp
->rxq
, 0, sizeof(*fp
->rxq
));
1847 fp
->rxq
->rxq_id
= rss_id
;
1849 memset((void *)fp
->txqs
, 0, (edev
->num_tc
* sizeof(*fp
->txqs
)));
1850 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
1851 txq_index
= tc
* QEDE_RSS_CNT(edev
) + rss_id
;
1852 fp
->txqs
[tc
].index
= txq_index
;
1855 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1856 edev
->ndev
->name
, rss_id
);
1860 static int qede_set_real_num_queues(struct qede_dev
*edev
)
1864 rc
= netif_set_real_num_tx_queues(edev
->ndev
, QEDE_TSS_CNT(edev
));
1866 DP_NOTICE(edev
, "Failed to set real number of Tx queues\n");
1869 rc
= netif_set_real_num_rx_queues(edev
->ndev
, QEDE_RSS_CNT(edev
));
1871 DP_NOTICE(edev
, "Failed to set real number of Rx queues\n");
1878 static void qede_napi_disable_remove(struct qede_dev
*edev
)
1883 napi_disable(&edev
->fp_array
[i
].napi
);
1885 netif_napi_del(&edev
->fp_array
[i
].napi
);
1889 static void qede_napi_add_enable(struct qede_dev
*edev
)
1893 /* Add NAPI objects */
1895 netif_napi_add(edev
->ndev
, &edev
->fp_array
[i
].napi
,
1896 qede_poll
, NAPI_POLL_WEIGHT
);
1897 napi_enable(&edev
->fp_array
[i
].napi
);
1901 static void qede_sync_free_irqs(struct qede_dev
*edev
)
1905 for (i
= 0; i
< edev
->int_info
.used_cnt
; i
++) {
1906 if (edev
->int_info
.msix_cnt
) {
1907 synchronize_irq(edev
->int_info
.msix
[i
].vector
);
1908 free_irq(edev
->int_info
.msix
[i
].vector
,
1909 &edev
->fp_array
[i
]);
1911 edev
->ops
->common
->simd_handler_clean(edev
->cdev
, i
);
1915 edev
->int_info
.used_cnt
= 0;
1918 static int qede_req_msix_irqs(struct qede_dev
*edev
)
1922 /* Sanitize number of interrupts == number of prepared RSS queues */
1923 if (QEDE_RSS_CNT(edev
) > edev
->int_info
.msix_cnt
) {
1925 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1926 QEDE_RSS_CNT(edev
), edev
->int_info
.msix_cnt
);
1930 for (i
= 0; i
< QEDE_RSS_CNT(edev
); i
++) {
1931 rc
= request_irq(edev
->int_info
.msix
[i
].vector
,
1932 qede_msix_fp_int
, 0, edev
->fp_array
[i
].name
,
1933 &edev
->fp_array
[i
]);
1935 DP_ERR(edev
, "Request fp %d irq failed\n", i
);
1936 qede_sync_free_irqs(edev
);
1939 DP_VERBOSE(edev
, NETIF_MSG_INTR
,
1940 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1941 edev
->fp_array
[i
].name
, i
,
1942 &edev
->fp_array
[i
]);
1943 edev
->int_info
.used_cnt
++;
1949 static void qede_simd_fp_handler(void *cookie
)
1951 struct qede_fastpath
*fp
= (struct qede_fastpath
*)cookie
;
1953 napi_schedule_irqoff(&fp
->napi
);
1956 static int qede_setup_irqs(struct qede_dev
*edev
)
1960 /* Learn Interrupt configuration */
1961 rc
= edev
->ops
->common
->get_fp_int(edev
->cdev
, &edev
->int_info
);
1965 if (edev
->int_info
.msix_cnt
) {
1966 rc
= qede_req_msix_irqs(edev
);
1969 edev
->ndev
->irq
= edev
->int_info
.msix
[0].vector
;
1971 const struct qed_common_ops
*ops
;
1973 /* qed should learn receive the RSS ids and callbacks */
1974 ops
= edev
->ops
->common
;
1975 for (i
= 0; i
< QEDE_RSS_CNT(edev
); i
++)
1976 ops
->simd_handler_config(edev
->cdev
,
1977 &edev
->fp_array
[i
], i
,
1978 qede_simd_fp_handler
);
1979 edev
->int_info
.used_cnt
= QEDE_RSS_CNT(edev
);
1984 static int qede_drain_txq(struct qede_dev
*edev
,
1985 struct qede_tx_queue
*txq
,
1990 while (txq
->sw_tx_cons
!= txq
->sw_tx_prod
) {
1994 "Tx queue[%d] is stuck, requesting MCP to drain\n",
1996 rc
= edev
->ops
->common
->drain(edev
->cdev
);
1999 return qede_drain_txq(edev
, txq
, false);
2002 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
2003 txq
->index
, txq
->sw_tx_prod
,
2008 usleep_range(1000, 2000);
2012 /* FW finished processing, wait for HW to transmit all tx packets */
2013 usleep_range(1000, 2000);
2018 static int qede_stop_queues(struct qede_dev
*edev
)
2020 struct qed_update_vport_params vport_update_params
;
2021 struct qed_dev
*cdev
= edev
->cdev
;
2024 /* Disable the vport */
2025 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
2026 vport_update_params
.vport_id
= 0;
2027 vport_update_params
.update_vport_active_flg
= 1;
2028 vport_update_params
.vport_active_flg
= 0;
2029 vport_update_params
.update_rss_flg
= 0;
2031 rc
= edev
->ops
->vport_update(cdev
, &vport_update_params
);
2033 DP_ERR(edev
, "Failed to update vport\n");
2037 /* Flush Tx queues. If needed, request drain from MCP */
2039 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
2041 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
2042 struct qede_tx_queue
*txq
= &fp
->txqs
[tc
];
2044 rc
= qede_drain_txq(edev
, txq
, true);
2050 /* Stop all Queues in reverse order*/
2051 for (i
= QEDE_RSS_CNT(edev
) - 1; i
>= 0; i
--) {
2052 struct qed_stop_rxq_params rx_params
;
2054 /* Stop the Tx Queue(s)*/
2055 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
2056 struct qed_stop_txq_params tx_params
;
2058 tx_params
.rss_id
= i
;
2059 tx_params
.tx_queue_id
= tc
* QEDE_RSS_CNT(edev
) + i
;
2060 rc
= edev
->ops
->q_tx_stop(cdev
, &tx_params
);
2062 DP_ERR(edev
, "Failed to stop TXQ #%d\n",
2063 tx_params
.tx_queue_id
);
2068 /* Stop the Rx Queue*/
2069 memset(&rx_params
, 0, sizeof(rx_params
));
2070 rx_params
.rss_id
= i
;
2071 rx_params
.rx_queue_id
= i
;
2073 rc
= edev
->ops
->q_rx_stop(cdev
, &rx_params
);
2075 DP_ERR(edev
, "Failed to stop RXQ #%d\n", i
);
2080 /* Stop the vport */
2081 rc
= edev
->ops
->vport_stop(cdev
, 0);
2083 DP_ERR(edev
, "Failed to stop VPORT\n");
2088 static int qede_start_queues(struct qede_dev
*edev
)
2091 int vport_id
= 0, drop_ttl0_flg
= 1, vlan_removal_en
= 1;
2092 struct qed_dev
*cdev
= edev
->cdev
;
2093 struct qed_update_vport_rss_params
*rss_params
= &edev
->rss_params
;
2094 struct qed_update_vport_params vport_update_params
;
2095 struct qed_queue_start_common_params q_params
;
2097 if (!edev
->num_rss
) {
2099 "Cannot update V-VPORT as active as there are no Rx queues\n");
2103 rc
= edev
->ops
->vport_start(cdev
, vport_id
,
2109 DP_ERR(edev
, "Start V-PORT failed %d\n", rc
);
2113 DP_VERBOSE(edev
, NETIF_MSG_IFUP
,
2114 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2115 vport_id
, edev
->ndev
->mtu
+ 0xe, vlan_removal_en
);
2118 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
2119 dma_addr_t phys_table
= fp
->rxq
->rx_comp_ring
.pbl
.p_phys_table
;
2121 memset(&q_params
, 0, sizeof(q_params
));
2122 q_params
.rss_id
= i
;
2123 q_params
.queue_id
= i
;
2124 q_params
.vport_id
= 0;
2125 q_params
.sb
= fp
->sb_info
->igu_sb_id
;
2126 q_params
.sb_idx
= RX_PI
;
2128 rc
= edev
->ops
->q_rx_start(cdev
, &q_params
,
2129 fp
->rxq
->rx_buf_size
,
2130 fp
->rxq
->rx_bd_ring
.p_phys_addr
,
2132 fp
->rxq
->rx_comp_ring
.page_cnt
,
2133 &fp
->rxq
->hw_rxq_prod_addr
);
2135 DP_ERR(edev
, "Start RXQ #%d failed %d\n", i
, rc
);
2139 fp
->rxq
->hw_cons_ptr
= &fp
->sb_info
->sb_virt
->pi_array
[RX_PI
];
2141 qede_update_rx_prod(edev
, fp
->rxq
);
2143 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
2144 struct qede_tx_queue
*txq
= &fp
->txqs
[tc
];
2145 int txq_index
= tc
* QEDE_RSS_CNT(edev
) + i
;
2147 memset(&q_params
, 0, sizeof(q_params
));
2148 q_params
.rss_id
= i
;
2149 q_params
.queue_id
= txq_index
;
2150 q_params
.vport_id
= 0;
2151 q_params
.sb
= fp
->sb_info
->igu_sb_id
;
2152 q_params
.sb_idx
= TX_PI(tc
);
2154 rc
= edev
->ops
->q_tx_start(cdev
, &q_params
,
2155 txq
->tx_pbl
.pbl
.p_phys_table
,
2156 txq
->tx_pbl
.page_cnt
,
2157 &txq
->doorbell_addr
);
2159 DP_ERR(edev
, "Start TXQ #%d failed %d\n",
2165 &fp
->sb_info
->sb_virt
->pi_array
[TX_PI(tc
)];
2166 SET_FIELD(txq
->tx_db
.data
.params
,
2167 ETH_DB_DATA_DEST
, DB_DEST_XCM
);
2168 SET_FIELD(txq
->tx_db
.data
.params
, ETH_DB_DATA_AGG_CMD
,
2170 SET_FIELD(txq
->tx_db
.data
.params
,
2171 ETH_DB_DATA_AGG_VAL_SEL
,
2172 DQ_XCM_ETH_TX_BD_PROD_CMD
);
2174 txq
->tx_db
.data
.agg_flags
= DQ_XCM_ETH_DQ_CF_CMD
;
2178 /* Prepare and send the vport enable */
2179 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
2180 vport_update_params
.vport_id
= vport_id
;
2181 vport_update_params
.update_vport_active_flg
= 1;
2182 vport_update_params
.vport_active_flg
= 1;
2184 /* Fill struct with RSS params */
2185 if (QEDE_RSS_CNT(edev
) > 1) {
2186 vport_update_params
.update_rss_flg
= 1;
2187 for (i
= 0; i
< 128; i
++)
2188 rss_params
->rss_ind_table
[i
] =
2189 ethtool_rxfh_indir_default(i
, QEDE_RSS_CNT(edev
));
2190 netdev_rss_key_fill(rss_params
->rss_key
,
2191 sizeof(rss_params
->rss_key
));
2193 memset(rss_params
, 0, sizeof(*rss_params
));
2195 memcpy(&vport_update_params
.rss_params
, rss_params
,
2196 sizeof(*rss_params
));
2198 rc
= edev
->ops
->vport_update(cdev
, &vport_update_params
);
2200 DP_ERR(edev
, "Update V-PORT failed %d\n", rc
);
2207 static int qede_set_mcast_rx_mac(struct qede_dev
*edev
,
2208 enum qed_filter_xcast_params_type opcode
,
2209 unsigned char *mac
, int num_macs
)
2211 struct qed_filter_params filter_cmd
;
2214 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
2215 filter_cmd
.type
= QED_FILTER_TYPE_MCAST
;
2216 filter_cmd
.filter
.mcast
.type
= opcode
;
2217 filter_cmd
.filter
.mcast
.num
= num_macs
;
2219 for (i
= 0; i
< num_macs
; i
++, mac
+= ETH_ALEN
)
2220 ether_addr_copy(filter_cmd
.filter
.mcast
.mac
[i
], mac
);
2222 return edev
->ops
->filter_config(edev
->cdev
, &filter_cmd
);
2225 enum qede_unload_mode
{
2229 static void qede_unload(struct qede_dev
*edev
, enum qede_unload_mode mode
)
2231 struct qed_link_params link_params
;
2234 DP_INFO(edev
, "Starting qede unload\n");
2236 mutex_lock(&edev
->qede_lock
);
2237 edev
->state
= QEDE_STATE_CLOSED
;
2240 netif_tx_disable(edev
->ndev
);
2241 netif_carrier_off(edev
->ndev
);
2243 /* Reset the link */
2244 memset(&link_params
, 0, sizeof(link_params
));
2245 link_params
.link_up
= false;
2246 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
2247 rc
= qede_stop_queues(edev
);
2249 qede_sync_free_irqs(edev
);
2253 DP_INFO(edev
, "Stopped Queues\n");
2255 edev
->ops
->fastpath_stop(edev
->cdev
);
2257 /* Release the interrupts */
2258 qede_sync_free_irqs(edev
);
2259 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
2261 qede_napi_disable_remove(edev
);
2263 qede_free_mem_load(edev
);
2264 qede_free_fp_array(edev
);
2267 mutex_unlock(&edev
->qede_lock
);
2268 DP_INFO(edev
, "Ending qede unload\n");
2271 enum qede_load_mode
{
2275 static int qede_load(struct qede_dev
*edev
, enum qede_load_mode mode
)
2277 struct qed_link_params link_params
;
2278 struct qed_link_output link_output
;
2281 DP_INFO(edev
, "Starting qede load\n");
2283 rc
= qede_set_num_queues(edev
);
2287 rc
= qede_alloc_fp_array(edev
);
2293 rc
= qede_alloc_mem_load(edev
);
2296 DP_INFO(edev
, "Allocated %d RSS queues on %d TC/s\n",
2297 QEDE_RSS_CNT(edev
), edev
->num_tc
);
2299 rc
= qede_set_real_num_queues(edev
);
2303 qede_napi_add_enable(edev
);
2304 DP_INFO(edev
, "Napi added and enabled\n");
2306 rc
= qede_setup_irqs(edev
);
2309 DP_INFO(edev
, "Setup IRQs succeeded\n");
2311 rc
= qede_start_queues(edev
);
2314 DP_INFO(edev
, "Start VPORT, RXQ and TXQ succeeded\n");
2316 /* Add primary mac and set Rx filters */
2317 ether_addr_copy(edev
->primary_mac
, edev
->ndev
->dev_addr
);
2319 mutex_lock(&edev
->qede_lock
);
2320 edev
->state
= QEDE_STATE_OPEN
;
2321 mutex_unlock(&edev
->qede_lock
);
2323 /* Ask for link-up using current configuration */
2324 memset(&link_params
, 0, sizeof(link_params
));
2325 link_params
.link_up
= true;
2326 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
2328 /* Query whether link is already-up */
2329 memset(&link_output
, 0, sizeof(link_output
));
2330 edev
->ops
->common
->get_link(edev
->cdev
, &link_output
);
2331 qede_link_update(edev
, &link_output
);
2333 DP_INFO(edev
, "Ending successfully qede load\n");
2338 qede_sync_free_irqs(edev
);
2339 memset(&edev
->int_info
.msix_cnt
, 0, sizeof(struct qed_int_info
));
2341 qede_napi_disable_remove(edev
);
2343 qede_free_mem_load(edev
);
2345 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
2346 qede_free_fp_array(edev
);
2352 void qede_reload(struct qede_dev
*edev
,
2353 void (*func
)(struct qede_dev
*, union qede_reload_args
*),
2354 union qede_reload_args
*args
)
2356 qede_unload(edev
, QEDE_UNLOAD_NORMAL
);
2357 /* Call function handler to update parameters
2358 * needed for function load.
2363 qede_load(edev
, QEDE_LOAD_NORMAL
);
2365 mutex_lock(&edev
->qede_lock
);
2366 qede_config_rx_mode(edev
->ndev
);
2367 mutex_unlock(&edev
->qede_lock
);
2370 /* called with rtnl_lock */
2371 static int qede_open(struct net_device
*ndev
)
2373 struct qede_dev
*edev
= netdev_priv(ndev
);
2375 netif_carrier_off(ndev
);
2377 edev
->ops
->common
->set_power_state(edev
->cdev
, PCI_D0
);
2379 return qede_load(edev
, QEDE_LOAD_NORMAL
);
2382 static int qede_close(struct net_device
*ndev
)
2384 struct qede_dev
*edev
= netdev_priv(ndev
);
2386 qede_unload(edev
, QEDE_UNLOAD_NORMAL
);
2391 static void qede_link_update(void *dev
, struct qed_link_output
*link
)
2393 struct qede_dev
*edev
= dev
;
2395 if (!netif_running(edev
->ndev
)) {
2396 DP_VERBOSE(edev
, NETIF_MSG_LINK
, "Interface is not running\n");
2400 if (link
->link_up
) {
2401 DP_NOTICE(edev
, "Link is up\n");
2402 netif_tx_start_all_queues(edev
->ndev
);
2403 netif_carrier_on(edev
->ndev
);
2405 DP_NOTICE(edev
, "Link is down\n");
2406 netif_tx_disable(edev
->ndev
);
2407 netif_carrier_off(edev
->ndev
);
2411 static int qede_set_mac_addr(struct net_device
*ndev
, void *p
)
2413 struct qede_dev
*edev
= netdev_priv(ndev
);
2414 struct sockaddr
*addr
= p
;
2417 ASSERT_RTNL(); /* @@@TBD To be removed */
2419 DP_INFO(edev
, "Set_mac_addr called\n");
2421 if (!is_valid_ether_addr(addr
->sa_data
)) {
2422 DP_NOTICE(edev
, "The MAC address is not valid\n");
2426 ether_addr_copy(ndev
->dev_addr
, addr
->sa_data
);
2428 if (!netif_running(ndev
)) {
2429 DP_NOTICE(edev
, "The device is currently down\n");
2433 /* Remove the previous primary mac */
2434 rc
= qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_DEL
,
2439 /* Add MAC filter according to the new unicast HW MAC address */
2440 ether_addr_copy(edev
->primary_mac
, ndev
->dev_addr
);
2441 return qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_ADD
,
2446 qede_configure_mcast_filtering(struct net_device
*ndev
,
2447 enum qed_filter_rx_mode_type
*accept_flags
)
2449 struct qede_dev
*edev
= netdev_priv(ndev
);
2450 unsigned char *mc_macs
, *temp
;
2451 struct netdev_hw_addr
*ha
;
2452 int rc
= 0, mc_count
;
2455 size
= 64 * ETH_ALEN
;
2457 mc_macs
= kzalloc(size
, GFP_KERNEL
);
2460 "Failed to allocate memory for multicast MACs\n");
2467 /* Remove all previously configured MAC filters */
2468 rc
= qede_set_mcast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_DEL
,
2473 netif_addr_lock_bh(ndev
);
2475 mc_count
= netdev_mc_count(ndev
);
2476 if (mc_count
< 64) {
2477 netdev_for_each_mc_addr(ha
, ndev
) {
2478 ether_addr_copy(temp
, ha
->addr
);
2483 netif_addr_unlock_bh(ndev
);
2485 /* Check for all multicast @@@TBD resource allocation */
2486 if ((ndev
->flags
& IFF_ALLMULTI
) ||
2488 if (*accept_flags
== QED_FILTER_RX_MODE_TYPE_REGULAR
)
2489 *accept_flags
= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
;
2491 /* Add all multicast MAC filters */
2492 rc
= qede_set_mcast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_ADD
,
2501 static void qede_set_rx_mode(struct net_device
*ndev
)
2503 struct qede_dev
*edev
= netdev_priv(ndev
);
2505 DP_INFO(edev
, "qede_set_rx_mode called\n");
2507 if (edev
->state
!= QEDE_STATE_OPEN
) {
2509 "qede_set_rx_mode called while interface is down\n");
2511 set_bit(QEDE_SP_RX_MODE
, &edev
->sp_flags
);
2512 schedule_delayed_work(&edev
->sp_task
, 0);
2516 /* Must be called with qede_lock held */
2517 static void qede_config_rx_mode(struct net_device
*ndev
)
2519 enum qed_filter_rx_mode_type accept_flags
= QED_FILTER_TYPE_UCAST
;
2520 struct qede_dev
*edev
= netdev_priv(ndev
);
2521 struct qed_filter_params rx_mode
;
2522 unsigned char *uc_macs
, *temp
;
2523 struct netdev_hw_addr
*ha
;
2527 netif_addr_lock_bh(ndev
);
2529 uc_count
= netdev_uc_count(ndev
);
2530 size
= uc_count
* ETH_ALEN
;
2532 uc_macs
= kzalloc(size
, GFP_ATOMIC
);
2534 DP_NOTICE(edev
, "Failed to allocate memory for unicast MACs\n");
2535 netif_addr_unlock_bh(ndev
);
2540 netdev_for_each_uc_addr(ha
, ndev
) {
2541 ether_addr_copy(temp
, ha
->addr
);
2545 netif_addr_unlock_bh(ndev
);
2547 /* Configure the struct for the Rx mode */
2548 memset(&rx_mode
, 0, sizeof(struct qed_filter_params
));
2549 rx_mode
.type
= QED_FILTER_TYPE_RX_MODE
;
2551 /* Remove all previous unicast secondary macs and multicast macs
2552 * (configrue / leave the primary mac)
2554 rc
= qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_REPLACE
,
2559 /* Check for promiscuous */
2560 if ((ndev
->flags
& IFF_PROMISC
) ||
2561 (uc_count
> 15)) { /* @@@TBD resource allocation - 1 */
2562 accept_flags
= QED_FILTER_RX_MODE_TYPE_PROMISC
;
2564 /* Add MAC filters according to the unicast secondary macs */
2568 for (i
= 0; i
< uc_count
; i
++) {
2569 rc
= qede_set_ucast_rx_mac(edev
,
2570 QED_FILTER_XCAST_TYPE_ADD
,
2578 rc
= qede_configure_mcast_filtering(ndev
, &accept_flags
);
2583 rx_mode
.filter
.accept_flags
= accept_flags
;
2584 edev
->ops
->filter_config(edev
->cdev
, &rx_mode
);