qede: Add basic network device support
[deliverable/linux.git] / drivers / net / ethernet / qlogic / qede / qede_main.c
CommitLineData
e712d52b
YM
1/* QLogic qede NIC Driver
2* Copyright (c) 2015 QLogic Corporation
3*
4* This software is available under the terms of the GNU General Public License
5* (GPL) Version 2, available from the file COPYING in the main directory of
6* this source tree.
7*/
8
9#include <linux/module.h>
10#include <linux/pci.h>
11#include <linux/version.h>
12#include <linux/device.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/skbuff.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/string.h>
19#include <linux/dma-mapping.h>
20#include <linux/interrupt.h>
21#include <asm/byteorder.h>
22#include <asm/param.h>
23#include <linux/io.h>
24#include <linux/netdev_features.h>
25#include <linux/udp.h>
26#include <linux/tcp.h>
27#include <net/vxlan.h>
28#include <linux/ip.h>
29#include <net/ipv6.h>
30#include <net/tcp.h>
31#include <linux/if_ether.h>
32#include <linux/if_vlan.h>
33#include <linux/pkt_sched.h>
34#include <linux/ethtool.h>
35#include <linux/in.h>
36#include <linux/random.h>
37#include <net/ip6_checksum.h>
38#include <linux/bitops.h>
39
40#include "qede.h"
41
42static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede "
43 DRV_MODULE_VERSION "\n";
44
45MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver");
46MODULE_LICENSE("GPL");
47MODULE_VERSION(DRV_MODULE_VERSION);
48
49static uint debug;
50module_param(debug, uint, 0);
51MODULE_PARM_DESC(debug, " Default debug msglevel");
52
53static const struct qed_eth_ops *qed_ops;
54
55#define CHIP_NUM_57980S_40 0x1634
56#define CHIP_NUM_57980S_10 0x1635
57#define CHIP_NUM_57980S_MF 0x1636
58#define CHIP_NUM_57980S_100 0x1644
59#define CHIP_NUM_57980S_50 0x1654
60#define CHIP_NUM_57980S_25 0x1656
61
62#ifndef PCI_DEVICE_ID_NX2_57980E
63#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
64#define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
65#define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
66#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
67#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
68#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
69#endif
70
71static const struct pci_device_id qede_pci_tbl[] = {
72 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
73 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
74 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
75 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
76 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
77 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
78 { 0 }
79};
80
81MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
82
83static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
84
85#define TX_TIMEOUT (5 * HZ)
86
87static void qede_remove(struct pci_dev *pdev);
2950219d
YM
88static int qede_alloc_rx_buffer(struct qede_dev *edev,
89 struct qede_rx_queue *rxq);
e712d52b
YM
90
91static struct pci_driver qede_pci_driver = {
92 .name = "qede",
93 .id_table = qede_pci_tbl,
94 .probe = qede_probe,
95 .remove = qede_remove,
96};
97
2950219d
YM
98static int qede_netdev_event(struct notifier_block *this, unsigned long event,
99 void *ptr)
100{
101 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
102 struct ethtool_drvinfo drvinfo;
103 struct qede_dev *edev;
104
105 /* Currently only support name change */
106 if (event != NETDEV_CHANGENAME)
107 goto done;
108
109 /* Check whether this is a qede device */
110 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
111 goto done;
112
113 memset(&drvinfo, 0, sizeof(drvinfo));
114 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
115 if (strcmp(drvinfo.driver, "qede"))
116 goto done;
117 edev = netdev_priv(ndev);
118
119 /* Notify qed of the name change */
120 if (!edev->ops || !edev->ops->common)
121 goto done;
122 edev->ops->common->set_id(edev->cdev, edev->ndev->name,
123 "qede");
124
125done:
126 return NOTIFY_DONE;
127}
128
129static struct notifier_block qede_netdev_notifier = {
130 .notifier_call = qede_netdev_event,
131};
132
e712d52b
YM
133static
134int __init qede_init(void)
135{
136 int ret;
137 u32 qed_ver;
138
139 pr_notice("qede_init: %s\n", version);
140
141 qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
142 if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
143 pr_notice("Version mismatch [%08x != %08x]\n",
144 qed_ver,
145 QEDE_ETH_INTERFACE_VERSION);
146 return -EINVAL;
147 }
148
149 qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
150 if (!qed_ops) {
151 pr_notice("Failed to get qed ethtool operations\n");
152 return -EINVAL;
153 }
154
2950219d
YM
155 /* Must register notifier before pci ops, since we might miss
156 * interface rename after pci probe and netdev registeration.
157 */
158 ret = register_netdevice_notifier(&qede_netdev_notifier);
159 if (ret) {
160 pr_notice("Failed to register netdevice_notifier\n");
161 qed_put_eth_ops();
162 return -EINVAL;
163 }
164
e712d52b
YM
165 ret = pci_register_driver(&qede_pci_driver);
166 if (ret) {
167 pr_notice("Failed to register driver\n");
2950219d 168 unregister_netdevice_notifier(&qede_netdev_notifier);
e712d52b
YM
169 qed_put_eth_ops();
170 return -EINVAL;
171 }
172
173 return 0;
174}
175
176static void __exit qede_cleanup(void)
177{
178 pr_notice("qede_cleanup called\n");
179
2950219d 180 unregister_netdevice_notifier(&qede_netdev_notifier);
e712d52b
YM
181 pci_unregister_driver(&qede_pci_driver);
182 qed_put_eth_ops();
183}
184
185module_init(qede_init);
186module_exit(qede_cleanup);
187
2950219d
YM
188/* -------------------------------------------------------------------------
189 * START OF FAST-PATH
190 * -------------------------------------------------------------------------
191 */
192
193/* Unmap the data and free skb */
194static int qede_free_tx_pkt(struct qede_dev *edev,
195 struct qede_tx_queue *txq,
196 int *len)
197{
198 u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
199 struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
200 struct eth_tx_1st_bd *first_bd;
201 struct eth_tx_bd *tx_data_bd;
202 int bds_consumed = 0;
203 int nbds;
204 bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
205 int i, split_bd_len = 0;
206
207 if (unlikely(!skb)) {
208 DP_ERR(edev,
209 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
210 idx, txq->sw_tx_cons, txq->sw_tx_prod);
211 return -1;
212 }
213
214 *len = skb->len;
215
216 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
217
218 bds_consumed++;
219
220 nbds = first_bd->data.nbds;
221
222 if (data_split) {
223 struct eth_tx_bd *split = (struct eth_tx_bd *)
224 qed_chain_consume(&txq->tx_pbl);
225 split_bd_len = BD_UNMAP_LEN(split);
226 bds_consumed++;
227 }
228 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
229 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
230
231 /* Unmap the data of the skb frags */
232 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
233 tx_data_bd = (struct eth_tx_bd *)
234 qed_chain_consume(&txq->tx_pbl);
235 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
236 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
237 }
238
239 while (bds_consumed++ < nbds)
240 qed_chain_consume(&txq->tx_pbl);
241
242 /* Free skb */
243 dev_kfree_skb_any(skb);
244 txq->sw_tx_ring[idx].skb = NULL;
245 txq->sw_tx_ring[idx].flags = 0;
246
247 return 0;
248}
249
250/* Unmap the data and free skb when mapping failed during start_xmit */
251static void qede_free_failed_tx_pkt(struct qede_dev *edev,
252 struct qede_tx_queue *txq,
253 struct eth_tx_1st_bd *first_bd,
254 int nbd,
255 bool data_split)
256{
257 u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
258 struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
259 struct eth_tx_bd *tx_data_bd;
260 int i, split_bd_len = 0;
261
262 /* Return prod to its position before this skb was handled */
263 qed_chain_set_prod(&txq->tx_pbl,
264 le16_to_cpu(txq->tx_db.data.bd_prod),
265 first_bd);
266
267 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
268
269 if (data_split) {
270 struct eth_tx_bd *split = (struct eth_tx_bd *)
271 qed_chain_produce(&txq->tx_pbl);
272 split_bd_len = BD_UNMAP_LEN(split);
273 nbd--;
274 }
275
276 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
277 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
278
279 /* Unmap the data of the skb frags */
280 for (i = 0; i < nbd; i++) {
281 tx_data_bd = (struct eth_tx_bd *)
282 qed_chain_produce(&txq->tx_pbl);
283 if (tx_data_bd->nbytes)
284 dma_unmap_page(&edev->pdev->dev,
285 BD_UNMAP_ADDR(tx_data_bd),
286 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
287 }
288
289 /* Return again prod to its position before this skb was handled */
290 qed_chain_set_prod(&txq->tx_pbl,
291 le16_to_cpu(txq->tx_db.data.bd_prod),
292 first_bd);
293
294 /* Free skb */
295 dev_kfree_skb_any(skb);
296 txq->sw_tx_ring[idx].skb = NULL;
297 txq->sw_tx_ring[idx].flags = 0;
298}
299
300static u32 qede_xmit_type(struct qede_dev *edev,
301 struct sk_buff *skb,
302 int *ipv6_ext)
303{
304 u32 rc = XMIT_L4_CSUM;
305 __be16 l3_proto;
306
307 if (skb->ip_summed != CHECKSUM_PARTIAL)
308 return XMIT_PLAIN;
309
310 l3_proto = vlan_get_protocol(skb);
311 if (l3_proto == htons(ETH_P_IPV6) &&
312 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
313 *ipv6_ext = 1;
314
315 if (skb_is_gso(skb))
316 rc |= XMIT_LSO;
317
318 return rc;
319}
320
321static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
322 struct eth_tx_2nd_bd *second_bd,
323 struct eth_tx_3rd_bd *third_bd)
324{
325 u8 l4_proto;
326 u16 bd2_bits = 0, bd2_bits2 = 0;
327
328 bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
329
330 bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
331 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
332 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
333
334 bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
335 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
336
337 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
338 l4_proto = ipv6_hdr(skb)->nexthdr;
339 else
340 l4_proto = ip_hdr(skb)->protocol;
341
342 if (l4_proto == IPPROTO_UDP)
343 bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
344
345 if (third_bd) {
346 third_bd->data.bitfields |=
347 ((tcp_hdrlen(skb) / 4) &
348 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
349 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT;
350 }
351
352 second_bd->data.bitfields = cpu_to_le16(bd2_bits);
353 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
354}
355
356static int map_frag_to_bd(struct qede_dev *edev,
357 skb_frag_t *frag,
358 struct eth_tx_bd *bd)
359{
360 dma_addr_t mapping;
361
362 /* Map skb non-linear frag data for DMA */
363 mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
364 skb_frag_size(frag),
365 DMA_TO_DEVICE);
366 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
367 DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
368 return -ENOMEM;
369 }
370
371 /* Setup the data pointer of the frag data */
372 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
373
374 return 0;
375}
376
377/* Main transmit function */
378static
379netdev_tx_t qede_start_xmit(struct sk_buff *skb,
380 struct net_device *ndev)
381{
382 struct qede_dev *edev = netdev_priv(ndev);
383 struct netdev_queue *netdev_txq;
384 struct qede_tx_queue *txq;
385 struct eth_tx_1st_bd *first_bd;
386 struct eth_tx_2nd_bd *second_bd = NULL;
387 struct eth_tx_3rd_bd *third_bd = NULL;
388 struct eth_tx_bd *tx_data_bd = NULL;
389 u16 txq_index;
390 u8 nbd = 0;
391 dma_addr_t mapping;
392 int rc, frag_idx = 0, ipv6_ext = 0;
393 u8 xmit_type;
394 u16 idx;
395 u16 hlen;
396 bool data_split;
397
398 /* Get tx-queue context and netdev index */
399 txq_index = skb_get_queue_mapping(skb);
400 WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
401 txq = QEDE_TX_QUEUE(edev, txq_index);
402 netdev_txq = netdev_get_tx_queue(ndev, txq_index);
403
404 /* Current code doesn't support SKB linearization, since the max number
405 * of skb frags can be passed in the FW HSI.
406 */
407 BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET);
408
409 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
410 (MAX_SKB_FRAGS + 1));
411
412 xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
413
414 /* Fill the entry in the SW ring and the BDs in the FW ring */
415 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
416 txq->sw_tx_ring[idx].skb = skb;
417 first_bd = (struct eth_tx_1st_bd *)
418 qed_chain_produce(&txq->tx_pbl);
419 memset(first_bd, 0, sizeof(*first_bd));
420 first_bd->data.bd_flags.bitfields =
421 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
422
423 /* Map skb linear data for DMA and set in the first BD */
424 mapping = dma_map_single(&edev->pdev->dev, skb->data,
425 skb_headlen(skb), DMA_TO_DEVICE);
426 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
427 DP_NOTICE(edev, "SKB mapping failed\n");
428 qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
429 return NETDEV_TX_OK;
430 }
431 nbd++;
432 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
433
434 /* In case there is IPv6 with extension headers or LSO we need 2nd and
435 * 3rd BDs.
436 */
437 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
438 second_bd = (struct eth_tx_2nd_bd *)
439 qed_chain_produce(&txq->tx_pbl);
440 memset(second_bd, 0, sizeof(*second_bd));
441
442 nbd++;
443 third_bd = (struct eth_tx_3rd_bd *)
444 qed_chain_produce(&txq->tx_pbl);
445 memset(third_bd, 0, sizeof(*third_bd));
446
447 nbd++;
448 /* We need to fill in additional data in second_bd... */
449 tx_data_bd = (struct eth_tx_bd *)second_bd;
450 }
451
452 if (skb_vlan_tag_present(skb)) {
453 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
454 first_bd->data.bd_flags.bitfields |=
455 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
456 }
457
458 /* Fill the parsing flags & params according to the requested offload */
459 if (xmit_type & XMIT_L4_CSUM) {
460 /* We don't re-calculate IP checksum as it is already done by
461 * the upper stack
462 */
463 first_bd->data.bd_flags.bitfields |=
464 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
465
466 /* If the packet is IPv6 with extension header, indicate that
467 * to FW and pass few params, since the device cracker doesn't
468 * support parsing IPv6 with extension header/s.
469 */
470 if (unlikely(ipv6_ext))
471 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
472 }
473
474 if (xmit_type & XMIT_LSO) {
475 first_bd->data.bd_flags.bitfields |=
476 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
477 third_bd->data.lso_mss =
478 cpu_to_le16(skb_shinfo(skb)->gso_size);
479
480 first_bd->data.bd_flags.bitfields |=
481 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
482 hlen = skb_transport_header(skb) +
483 tcp_hdrlen(skb) - skb->data;
484
485 /* @@@TBD - if will not be removed need to check */
486 third_bd->data.bitfields |=
487 (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
488
489 /* Make life easier for FW guys who can't deal with header and
490 * data on same BD. If we need to split, use the second bd...
491 */
492 if (unlikely(skb_headlen(skb) > hlen)) {
493 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
494 "TSO split header size is %d (%x:%x)\n",
495 first_bd->nbytes, first_bd->addr.hi,
496 first_bd->addr.lo);
497
498 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
499 le32_to_cpu(first_bd->addr.lo)) +
500 hlen;
501
502 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
503 le16_to_cpu(first_bd->nbytes) -
504 hlen);
505
506 /* this marks the BD as one that has no
507 * individual mapping
508 */
509 txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
510
511 first_bd->nbytes = cpu_to_le16(hlen);
512
513 tx_data_bd = (struct eth_tx_bd *)third_bd;
514 data_split = true;
515 }
516 }
517
518 /* Handle fragmented skb */
519 /* special handle for frags inside 2nd and 3rd bds.. */
520 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
521 rc = map_frag_to_bd(edev,
522 &skb_shinfo(skb)->frags[frag_idx],
523 tx_data_bd);
524 if (rc) {
525 qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
526 data_split);
527 return NETDEV_TX_OK;
528 }
529
530 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
531 tx_data_bd = (struct eth_tx_bd *)third_bd;
532 else
533 tx_data_bd = NULL;
534
535 frag_idx++;
536 }
537
538 /* map last frags into 4th, 5th .... */
539 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
540 tx_data_bd = (struct eth_tx_bd *)
541 qed_chain_produce(&txq->tx_pbl);
542
543 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
544
545 rc = map_frag_to_bd(edev,
546 &skb_shinfo(skb)->frags[frag_idx],
547 tx_data_bd);
548 if (rc) {
549 qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
550 data_split);
551 return NETDEV_TX_OK;
552 }
553 }
554
555 /* update the first BD with the actual num BDs */
556 first_bd->data.nbds = nbd;
557
558 netdev_tx_sent_queue(netdev_txq, skb->len);
559
560 skb_tx_timestamp(skb);
561
562 /* Advance packet producer only before sending the packet since mapping
563 * of pages may fail.
564 */
565 txq->sw_tx_prod++;
566
567 /* 'next page' entries are counted in the producer value */
568 txq->tx_db.data.bd_prod =
569 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
570
571 /* wmb makes sure that the BDs data is updated before updating the
572 * producer, otherwise FW may read old data from the BDs.
573 */
574 wmb();
575 barrier();
576 writel(txq->tx_db.raw, txq->doorbell_addr);
577
578 /* mmiowb is needed to synchronize doorbell writes from more than one
579 * processor. It guarantees that the write arrives to the device before
580 * the queue lock is released and another start_xmit is called (possibly
581 * on another CPU). Without this barrier, the next doorbell can bypass
582 * this doorbell. This is applicable to IA64/Altix systems.
583 */
584 mmiowb();
585
586 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
587 < (MAX_SKB_FRAGS + 1))) {
588 netif_tx_stop_queue(netdev_txq);
589 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
590 "Stop queue was called\n");
591 /* paired memory barrier is in qede_tx_int(), we have to keep
592 * ordering of set_bit() in netif_tx_stop_queue() and read of
593 * fp->bd_tx_cons
594 */
595 smp_mb();
596
597 if (qed_chain_get_elem_left(&txq->tx_pbl)
598 >= (MAX_SKB_FRAGS + 1) &&
599 (edev->state == QEDE_STATE_OPEN)) {
600 netif_tx_wake_queue(netdev_txq);
601 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
602 "Wake queue was called\n");
603 }
604 }
605
606 return NETDEV_TX_OK;
607}
608
609static int qede_txq_has_work(struct qede_tx_queue *txq)
610{
611 u16 hw_bd_cons;
612
613 /* Tell compiler that consumer and producer can change */
614 barrier();
615 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
616 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
617 return 0;
618
619 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
620}
621
622static int qede_tx_int(struct qede_dev *edev,
623 struct qede_tx_queue *txq)
624{
625 struct netdev_queue *netdev_txq;
626 u16 hw_bd_cons;
627 unsigned int pkts_compl = 0, bytes_compl = 0;
628 int rc;
629
630 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
631
632 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
633 barrier();
634
635 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
636 int len = 0;
637
638 rc = qede_free_tx_pkt(edev, txq, &len);
639 if (rc) {
640 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
641 hw_bd_cons,
642 qed_chain_get_cons_idx(&txq->tx_pbl));
643 break;
644 }
645
646 bytes_compl += len;
647 pkts_compl++;
648 txq->sw_tx_cons++;
649 }
650
651 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
652
653 /* Need to make the tx_bd_cons update visible to start_xmit()
654 * before checking for netif_tx_queue_stopped(). Without the
655 * memory barrier, there is a small possibility that
656 * start_xmit() will miss it and cause the queue to be stopped
657 * forever.
658 * On the other hand we need an rmb() here to ensure the proper
659 * ordering of bit testing in the following
660 * netif_tx_queue_stopped(txq) call.
661 */
662 smp_mb();
663
664 if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
665 /* Taking tx_lock is needed to prevent reenabling the queue
666 * while it's empty. This could have happen if rx_action() gets
667 * suspended in qede_tx_int() after the condition before
668 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
669 *
670 * stops the queue->sees fresh tx_bd_cons->releases the queue->
671 * sends some packets consuming the whole queue again->
672 * stops the queue
673 */
674
675 __netif_tx_lock(netdev_txq, smp_processor_id());
676
677 if ((netif_tx_queue_stopped(netdev_txq)) &&
678 (edev->state == QEDE_STATE_OPEN) &&
679 (qed_chain_get_elem_left(&txq->tx_pbl)
680 >= (MAX_SKB_FRAGS + 1))) {
681 netif_tx_wake_queue(netdev_txq);
682 DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
683 "Wake queue was called\n");
684 }
685
686 __netif_tx_unlock(netdev_txq);
687 }
688
689 return 0;
690}
691
692static bool qede_has_rx_work(struct qede_rx_queue *rxq)
693{
694 u16 hw_comp_cons, sw_comp_cons;
695
696 /* Tell compiler that status block fields can change */
697 barrier();
698
699 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
700 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
701
702 return hw_comp_cons != sw_comp_cons;
703}
704
705static bool qede_has_tx_work(struct qede_fastpath *fp)
706{
707 u8 tc;
708
709 for (tc = 0; tc < fp->edev->num_tc; tc++)
710 if (qede_txq_has_work(&fp->txqs[tc]))
711 return true;
712 return false;
713}
714
715/* This function copies the Rx buffer from the CONS position to the PROD
716 * position, since we failed to allocate a new Rx buffer.
717 */
718static void qede_reuse_rx_data(struct qede_rx_queue *rxq)
719{
720 struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
721 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
722 struct sw_rx_data *sw_rx_data_cons =
723 &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
724 struct sw_rx_data *sw_rx_data_prod =
725 &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
726
727 dma_unmap_addr_set(sw_rx_data_prod, mapping,
728 dma_unmap_addr(sw_rx_data_cons, mapping));
729
730 sw_rx_data_prod->data = sw_rx_data_cons->data;
731 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
732
733 rxq->sw_rx_cons++;
734 rxq->sw_rx_prod++;
735}
736
737static inline void qede_update_rx_prod(struct qede_dev *edev,
738 struct qede_rx_queue *rxq)
739{
740 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
741 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
742 struct eth_rx_prod_data rx_prods = {0};
743
744 /* Update producers */
745 rx_prods.bd_prod = cpu_to_le16(bd_prod);
746 rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
747
748 /* Make sure that the BD and SGE data is updated before updating the
749 * producers since FW might read the BD/SGE right after the producer
750 * is updated.
751 */
752 wmb();
753
754 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
755 (u32 *)&rx_prods);
756
757 /* mmiowb is needed to synchronize doorbell writes from more than one
758 * processor. It guarantees that the write arrives to the device before
759 * the napi lock is released and another qede_poll is called (possibly
760 * on another CPU). Without this barrier, the next doorbell can bypass
761 * this doorbell. This is applicable to IA64/Altix systems.
762 */
763 mmiowb();
764}
765
766static u32 qede_get_rxhash(struct qede_dev *edev,
767 u8 bitfields,
768 __le32 rss_hash,
769 enum pkt_hash_types *rxhash_type)
770{
771 enum rss_hash_type htype;
772
773 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
774
775 if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
776 *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
777 (htype == RSS_HASH_TYPE_IPV6)) ?
778 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
779 return le32_to_cpu(rss_hash);
780 }
781 *rxhash_type = PKT_HASH_TYPE_NONE;
782 return 0;
783}
784
785static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
786{
787 skb_checksum_none_assert(skb);
788
789 if (csum_flag & QEDE_CSUM_UNNECESSARY)
790 skb->ip_summed = CHECKSUM_UNNECESSARY;
791}
792
793static inline void qede_skb_receive(struct qede_dev *edev,
794 struct qede_fastpath *fp,
795 struct sk_buff *skb,
796 u16 vlan_tag)
797{
798 if (vlan_tag)
799 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
800 vlan_tag);
801
802 napi_gro_receive(&fp->napi, skb);
803}
804
805static u8 qede_check_csum(u16 flag)
806{
807 u16 csum_flag = 0;
808 u8 csum = 0;
809
810 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
811 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
812 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
813 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
814 csum = QEDE_CSUM_UNNECESSARY;
815 }
816
817 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
818 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
819
820 if (csum_flag & flag)
821 return QEDE_CSUM_ERROR;
822
823 return csum;
824}
825
826static int qede_rx_int(struct qede_fastpath *fp, int budget)
827{
828 struct qede_dev *edev = fp->edev;
829 struct qede_rx_queue *rxq = fp->rxq;
830
831 u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
832 int rx_pkt = 0;
833 u8 csum_flag;
834
835 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
836 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
837
838 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
839 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
840 * read before it is written by FW, then FW writes CQE and SB, and then
841 * the CPU reads the hw_comp_cons, it will use an old CQE.
842 */
843 rmb();
844
845 /* Loop to complete all indicated BDs */
846 while (sw_comp_cons != hw_comp_cons) {
847 struct eth_fast_path_rx_reg_cqe *fp_cqe;
848 enum pkt_hash_types rxhash_type;
849 enum eth_rx_cqe_type cqe_type;
850 struct sw_rx_data *sw_rx_data;
851 union eth_rx_cqe *cqe;
852 struct sk_buff *skb;
853 u16 len, pad;
854 u32 rx_hash;
855 u8 *data;
856
857 /* Get the CQE from the completion ring */
858 cqe = (union eth_rx_cqe *)
859 qed_chain_consume(&rxq->rx_comp_ring);
860 cqe_type = cqe->fast_path_regular.type;
861
862 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
863 edev->ops->eth_cqe_completion(
864 edev->cdev, fp->rss_id,
865 (struct eth_slow_path_rx_cqe *)cqe);
866 goto next_cqe;
867 }
868
869 /* Get the data from the SW ring */
870 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
871 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
872 data = sw_rx_data->data;
873
874 fp_cqe = &cqe->fast_path_regular;
875 len = le16_to_cpu(fp_cqe->pkt_len);
876 pad = fp_cqe->placement_offset;
877
878 /* For every Rx BD consumed, we allocate a new BD so the BD ring
879 * is always with a fixed size. If allocation fails, we take the
880 * consumed BD and return it to the ring in the PROD position.
881 * The packet that was received on that BD will be dropped (and
882 * not passed to the upper stack).
883 */
884 if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) {
885 dma_unmap_single(&edev->pdev->dev,
886 dma_unmap_addr(sw_rx_data, mapping),
887 rxq->rx_buf_size, DMA_FROM_DEVICE);
888
889 /* If this is an error packet then drop it */
890 parse_flag =
891 le16_to_cpu(cqe->fast_path_regular.pars_flags.flags);
892 csum_flag = qede_check_csum(parse_flag);
893 if (csum_flag == QEDE_CSUM_ERROR) {
894 DP_NOTICE(edev,
895 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
896 sw_comp_cons, parse_flag);
897 rxq->rx_hw_errors++;
898 kfree(data);
899 goto next_rx;
900 }
901
902 skb = build_skb(data, 0);
903
904 if (unlikely(!skb)) {
905 DP_NOTICE(edev,
906 "Build_skb failed, dropping incoming packet\n");
907 kfree(data);
908 rxq->rx_alloc_errors++;
909 goto next_rx;
910 }
911
912 skb_reserve(skb, pad);
913
914 } else {
915 DP_NOTICE(edev,
916 "New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
917 qede_reuse_rx_data(rxq);
918 rxq->rx_alloc_errors++;
919 goto next_cqe;
920 }
921
922 sw_rx_data->data = NULL;
923
924 skb_put(skb, len);
925
926 skb->protocol = eth_type_trans(skb, edev->ndev);
927
928 rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
929 fp_cqe->rss_hash,
930 &rxhash_type);
931
932 skb_set_hash(skb, rx_hash, rxhash_type);
933
934 qede_set_skb_csum(skb, csum_flag);
935
936 skb_record_rx_queue(skb, fp->rss_id);
937
938 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
939
940 qed_chain_consume(&rxq->rx_bd_ring);
941
942next_rx:
943 rxq->sw_rx_cons++;
944 rx_pkt++;
945
946next_cqe: /* don't consume bd rx buffer */
947 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
948 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
949 /* CR TPA - revisit how to handle budget in TPA perhaps
950 * increase on "end"
951 */
952 if (rx_pkt == budget)
953 break;
954 } /* repeat while sw_comp_cons != hw_comp_cons... */
955
956 /* Update producers */
957 qede_update_rx_prod(edev, rxq);
958
959 return rx_pkt;
960}
961
962static int qede_poll(struct napi_struct *napi, int budget)
963{
964 int work_done = 0;
965 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
966 napi);
967 struct qede_dev *edev = fp->edev;
968
969 while (1) {
970 u8 tc;
971
972 for (tc = 0; tc < edev->num_tc; tc++)
973 if (qede_txq_has_work(&fp->txqs[tc]))
974 qede_tx_int(edev, &fp->txqs[tc]);
975
976 if (qede_has_rx_work(fp->rxq)) {
977 work_done += qede_rx_int(fp, budget - work_done);
978
979 /* must not complete if we consumed full budget */
980 if (work_done >= budget)
981 break;
982 }
983
984 /* Fall out from the NAPI loop if needed */
985 if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
986 qed_sb_update_sb_idx(fp->sb_info);
987 /* *_has_*_work() reads the status block,
988 * thus we need to ensure that status block indices
989 * have been actually read (qed_sb_update_sb_idx)
990 * prior to this check (*_has_*_work) so that
991 * we won't write the "newer" value of the status block
992 * to HW (if there was a DMA right after
993 * qede_has_rx_work and if there is no rmb, the memory
994 * reading (qed_sb_update_sb_idx) may be postponed
995 * to right before *_ack_sb). In this case there
996 * will never be another interrupt until there is
997 * another update of the status block, while there
998 * is still unhandled work.
999 */
1000 rmb();
1001
1002 if (!(qede_has_rx_work(fp->rxq) ||
1003 qede_has_tx_work(fp))) {
1004 napi_complete(napi);
1005 /* Update and reenable interrupts */
1006 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
1007 1 /*update*/);
1008 break;
1009 }
1010 }
1011 }
1012
1013 return work_done;
1014}
1015
1016static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1017{
1018 struct qede_fastpath *fp = fp_cookie;
1019
1020 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1021
1022 napi_schedule_irqoff(&fp->napi);
1023 return IRQ_HANDLED;
1024}
1025
1026/* -------------------------------------------------------------------------
1027 * END OF FAST-PATH
1028 * -------------------------------------------------------------------------
1029 */
1030
1031static int qede_open(struct net_device *ndev);
1032static int qede_close(struct net_device *ndev);
1033static const struct net_device_ops qede_netdev_ops = {
1034 .ndo_open = qede_open,
1035 .ndo_stop = qede_close,
1036 .ndo_start_xmit = qede_start_xmit,
1037 .ndo_validate_addr = eth_validate_addr,
1038};
1039
e712d52b
YM
1040/* -------------------------------------------------------------------------
1041 * START OF PROBE / REMOVE
1042 * -------------------------------------------------------------------------
1043 */
1044
1045static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
1046 struct pci_dev *pdev,
1047 struct qed_dev_eth_info *info,
1048 u32 dp_module,
1049 u8 dp_level)
1050{
1051 struct net_device *ndev;
1052 struct qede_dev *edev;
1053
1054 ndev = alloc_etherdev_mqs(sizeof(*edev),
1055 info->num_queues,
1056 info->num_queues);
1057 if (!ndev) {
1058 pr_err("etherdev allocation failed\n");
1059 return NULL;
1060 }
1061
1062 edev = netdev_priv(ndev);
1063 edev->ndev = ndev;
1064 edev->cdev = cdev;
1065 edev->pdev = pdev;
1066 edev->dp_module = dp_module;
1067 edev->dp_level = dp_level;
1068 edev->ops = qed_ops;
2950219d
YM
1069 edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
1070 edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
e712d52b
YM
1071
1072 DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
1073
1074 SET_NETDEV_DEV(ndev, &pdev->dev);
1075
1076 memcpy(&edev->dev_info, info, sizeof(*info));
1077
1078 edev->num_tc = edev->dev_info.num_tc;
1079
1080 return edev;
1081}
1082
1083static void qede_init_ndev(struct qede_dev *edev)
1084{
1085 struct net_device *ndev = edev->ndev;
1086 struct pci_dev *pdev = edev->pdev;
1087 u32 hw_features;
1088
1089 pci_set_drvdata(pdev, ndev);
1090
1091 ndev->mem_start = edev->dev_info.common.pci_mem_start;
1092 ndev->base_addr = ndev->mem_start;
1093 ndev->mem_end = edev->dev_info.common.pci_mem_end;
1094 ndev->irq = edev->dev_info.common.pci_irq;
1095
1096 ndev->watchdog_timeo = TX_TIMEOUT;
1097
2950219d
YM
1098 ndev->netdev_ops = &qede_netdev_ops;
1099
e712d52b
YM
1100 /* user-changeble features */
1101 hw_features = NETIF_F_GRO | NETIF_F_SG |
1102 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1103 NETIF_F_TSO | NETIF_F_TSO6;
1104
1105 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
1106 NETIF_F_HIGHDMA;
1107 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
1108 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
1109 NETIF_F_HW_VLAN_CTAG_TX;
1110
1111 ndev->hw_features = hw_features;
1112
1113 /* Set network device HW mac */
1114 ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
1115}
1116
1117/* This function converts from 32b param to two params of level and module
1118 * Input 32b decoding:
1119 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
1120 * 'happy' flow, e.g. memory allocation failed.
1121 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
1122 * and provide important parameters.
1123 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
1124 * module. VERBOSE prints are for tracking the specific flow in low level.
1125 *
1126 * Notice that the level should be that of the lowest required logs.
1127 */
1128static void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
1129{
1130 *p_dp_level = QED_LEVEL_NOTICE;
1131 *p_dp_module = 0;
1132
1133 if (debug & QED_LOG_VERBOSE_MASK) {
1134 *p_dp_level = QED_LEVEL_VERBOSE;
1135 *p_dp_module = (debug & 0x3FFFFFFF);
1136 } else if (debug & QED_LOG_INFO_MASK) {
1137 *p_dp_level = QED_LEVEL_INFO;
1138 } else if (debug & QED_LOG_NOTICE_MASK) {
1139 *p_dp_level = QED_LEVEL_NOTICE;
1140 }
1141}
1142
2950219d
YM
1143static void qede_free_fp_array(struct qede_dev *edev)
1144{
1145 if (edev->fp_array) {
1146 struct qede_fastpath *fp;
1147 int i;
1148
1149 for_each_rss(i) {
1150 fp = &edev->fp_array[i];
1151
1152 kfree(fp->sb_info);
1153 kfree(fp->rxq);
1154 kfree(fp->txqs);
1155 }
1156 kfree(edev->fp_array);
1157 }
1158 edev->num_rss = 0;
1159}
1160
1161static int qede_alloc_fp_array(struct qede_dev *edev)
1162{
1163 struct qede_fastpath *fp;
1164 int i;
1165
1166 edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
1167 sizeof(*edev->fp_array), GFP_KERNEL);
1168 if (!edev->fp_array) {
1169 DP_NOTICE(edev, "fp array allocation failed\n");
1170 goto err;
1171 }
1172
1173 for_each_rss(i) {
1174 fp = &edev->fp_array[i];
1175
1176 fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
1177 if (!fp->sb_info) {
1178 DP_NOTICE(edev, "sb info struct allocation failed\n");
1179 goto err;
1180 }
1181
1182 fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
1183 if (!fp->rxq) {
1184 DP_NOTICE(edev, "RXQ struct allocation failed\n");
1185 goto err;
1186 }
1187
1188 fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
1189 if (!fp->txqs) {
1190 DP_NOTICE(edev, "TXQ array allocation failed\n");
1191 goto err;
1192 }
1193 }
1194
1195 return 0;
1196err:
1197 qede_free_fp_array(edev);
1198 return -ENOMEM;
1199}
1200
e712d52b
YM
1201static void qede_update_pf_params(struct qed_dev *cdev)
1202{
1203 struct qed_pf_params pf_params;
1204
1205 /* 16 rx + 16 tx */
1206 memset(&pf_params, 0, sizeof(struct qed_pf_params));
1207 pf_params.eth_pf_params.num_cons = 32;
1208 qed_ops->common->update_pf_params(cdev, &pf_params);
1209}
1210
1211enum qede_probe_mode {
1212 QEDE_PROBE_NORMAL,
1213};
1214
1215static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1216 enum qede_probe_mode mode)
1217{
1218 struct qed_slowpath_params params;
1219 struct qed_dev_eth_info dev_info;
1220 struct qede_dev *edev;
1221 struct qed_dev *cdev;
1222 int rc;
1223
1224 if (unlikely(dp_level & QED_LEVEL_INFO))
1225 pr_notice("Starting qede probe\n");
1226
1227 cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
1228 dp_module, dp_level);
1229 if (!cdev) {
1230 rc = -ENODEV;
1231 goto err0;
1232 }
1233
1234 qede_update_pf_params(cdev);
1235
1236 /* Start the Slowpath-process */
1237 memset(&params, 0, sizeof(struct qed_slowpath_params));
1238 params.int_mode = QED_INT_MODE_MSIX;
1239 params.drv_major = QEDE_MAJOR_VERSION;
1240 params.drv_minor = QEDE_MINOR_VERSION;
1241 params.drv_rev = QEDE_REVISION_VERSION;
1242 params.drv_eng = QEDE_ENGINEERING_VERSION;
1243 strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1244 rc = qed_ops->common->slowpath_start(cdev, &params);
1245 if (rc) {
1246 pr_notice("Cannot start slowpath\n");
1247 goto err1;
1248 }
1249
1250 /* Learn information crucial for qede to progress */
1251 rc = qed_ops->fill_dev_info(cdev, &dev_info);
1252 if (rc)
1253 goto err2;
1254
1255 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1256 dp_level);
1257 if (!edev) {
1258 rc = -ENOMEM;
1259 goto err2;
1260 }
1261
1262 qede_init_ndev(edev);
1263
2950219d
YM
1264 rc = register_netdev(edev->ndev);
1265 if (rc) {
1266 DP_NOTICE(edev, "Cannot register net-device\n");
1267 goto err3;
1268 }
1269
e712d52b
YM
1270 edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
1271
1272 DP_INFO(edev, "Ending successfully qede probe\n");
1273
1274 return 0;
1275
2950219d
YM
1276err3:
1277 free_netdev(edev->ndev);
e712d52b
YM
1278err2:
1279 qed_ops->common->slowpath_stop(cdev);
1280err1:
1281 qed_ops->common->remove(cdev);
1282err0:
1283 return rc;
1284}
1285
1286static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1287{
1288 u32 dp_module = 0;
1289 u8 dp_level = 0;
1290
1291 qede_config_debug(debug, &dp_module, &dp_level);
1292
1293 return __qede_probe(pdev, dp_module, dp_level,
1294 QEDE_PROBE_NORMAL);
1295}
1296
1297enum qede_remove_mode {
1298 QEDE_REMOVE_NORMAL,
1299};
1300
1301static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1302{
1303 struct net_device *ndev = pci_get_drvdata(pdev);
1304 struct qede_dev *edev = netdev_priv(ndev);
1305 struct qed_dev *cdev = edev->cdev;
1306
1307 DP_INFO(edev, "Starting qede_remove\n");
1308
2950219d
YM
1309 unregister_netdev(ndev);
1310
e712d52b
YM
1311 edev->ops->common->set_power_state(cdev, PCI_D0);
1312
1313 pci_set_drvdata(pdev, NULL);
1314
1315 free_netdev(ndev);
1316
1317 /* Use global ops since we've freed edev */
1318 qed_ops->common->slowpath_stop(cdev);
1319 qed_ops->common->remove(cdev);
1320
1321 pr_notice("Ending successfully qede_remove\n");
1322}
1323
1324static void qede_remove(struct pci_dev *pdev)
1325{
1326 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1327}
2950219d
YM
1328
1329/* -------------------------------------------------------------------------
1330 * START OF LOAD / UNLOAD
1331 * -------------------------------------------------------------------------
1332 */
1333
1334static int qede_set_num_queues(struct qede_dev *edev)
1335{
1336 int rc;
1337 u16 rss_num;
1338
1339 /* Setup queues according to possible resources*/
1340 rss_num = netif_get_num_default_rss_queues() *
1341 edev->dev_info.common.num_hwfns;
1342
1343 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1344
1345 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1346 if (rc > 0) {
1347 /* Managed to request interrupts for our queues */
1348 edev->num_rss = rc;
1349 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1350 QEDE_RSS_CNT(edev), rss_num);
1351 rc = 0;
1352 }
1353 return rc;
1354}
1355
1356static void qede_free_mem_sb(struct qede_dev *edev,
1357 struct qed_sb_info *sb_info)
1358{
1359 if (sb_info->sb_virt)
1360 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1361 (void *)sb_info->sb_virt, sb_info->sb_phys);
1362}
1363
1364/* This function allocates fast-path status block memory */
1365static int qede_alloc_mem_sb(struct qede_dev *edev,
1366 struct qed_sb_info *sb_info,
1367 u16 sb_id)
1368{
1369 struct status_block *sb_virt;
1370 dma_addr_t sb_phys;
1371 int rc;
1372
1373 sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1374 sizeof(*sb_virt),
1375 &sb_phys, GFP_KERNEL);
1376 if (!sb_virt) {
1377 DP_ERR(edev, "Status block allocation failed\n");
1378 return -ENOMEM;
1379 }
1380
1381 rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1382 sb_virt, sb_phys, sb_id,
1383 QED_SB_TYPE_L2_QUEUE);
1384 if (rc) {
1385 DP_ERR(edev, "Status block initialization failed\n");
1386 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1387 sb_virt, sb_phys);
1388 return rc;
1389 }
1390
1391 return 0;
1392}
1393
1394static void qede_free_rx_buffers(struct qede_dev *edev,
1395 struct qede_rx_queue *rxq)
1396{
1397 u16 i;
1398
1399 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1400 struct sw_rx_data *rx_buf;
1401 u8 *data;
1402
1403 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1404 data = rx_buf->data;
1405
1406 dma_unmap_single(&edev->pdev->dev,
1407 dma_unmap_addr(rx_buf, mapping),
1408 rxq->rx_buf_size, DMA_FROM_DEVICE);
1409
1410 rx_buf->data = NULL;
1411 kfree(data);
1412 }
1413}
1414
1415static void qede_free_mem_rxq(struct qede_dev *edev,
1416 struct qede_rx_queue *rxq)
1417{
1418 /* Free rx buffers */
1419 qede_free_rx_buffers(edev, rxq);
1420
1421 /* Free the parallel SW ring */
1422 kfree(rxq->sw_rx_ring);
1423
1424 /* Free the real RQ ring used by FW */
1425 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1426 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1427}
1428
1429static int qede_alloc_rx_buffer(struct qede_dev *edev,
1430 struct qede_rx_queue *rxq)
1431{
1432 struct sw_rx_data *sw_rx_data;
1433 struct eth_rx_bd *rx_bd;
1434 dma_addr_t mapping;
1435 u16 rx_buf_size;
1436 u8 *data;
1437
1438 rx_buf_size = rxq->rx_buf_size;
1439
1440 data = kmalloc(rx_buf_size, GFP_ATOMIC);
1441 if (unlikely(!data)) {
1442 DP_NOTICE(edev, "Failed to allocate Rx data\n");
1443 return -ENOMEM;
1444 }
1445
1446 mapping = dma_map_single(&edev->pdev->dev, data,
1447 rx_buf_size, DMA_FROM_DEVICE);
1448 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
1449 kfree(data);
1450 DP_NOTICE(edev, "Failed to map Rx buffer\n");
1451 return -ENOMEM;
1452 }
1453
1454 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
1455 sw_rx_data->data = data;
1456
1457 dma_unmap_addr_set(sw_rx_data, mapping, mapping);
1458
1459 /* Advance PROD and get BD pointer */
1460 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
1461 WARN_ON(!rx_bd);
1462 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
1463 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
1464
1465 rxq->sw_rx_prod++;
1466
1467 return 0;
1468}
1469
1470/* This function allocates all memory needed per Rx queue */
1471static int qede_alloc_mem_rxq(struct qede_dev *edev,
1472 struct qede_rx_queue *rxq)
1473{
1474 int i, rc, size, num_allocated;
1475
1476 rxq->num_rx_buffers = edev->q_num_rx_buffers;
1477
1478 rxq->rx_buf_size = NET_IP_ALIGN +
1479 ETH_OVERHEAD +
1480 edev->ndev->mtu +
1481 QEDE_FW_RX_ALIGN_END;
1482
1483 /* Allocate the parallel driver ring for Rx buffers */
1484 size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX;
1485 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1486 if (!rxq->sw_rx_ring) {
1487 DP_ERR(edev, "Rx buffers ring allocation failed\n");
1488 goto err;
1489 }
1490
1491 /* Allocate FW Rx ring */
1492 rc = edev->ops->common->chain_alloc(edev->cdev,
1493 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1494 QED_CHAIN_MODE_NEXT_PTR,
1495 NUM_RX_BDS_MAX,
1496 sizeof(struct eth_rx_bd),
1497 &rxq->rx_bd_ring);
1498
1499 if (rc)
1500 goto err;
1501
1502 /* Allocate FW completion ring */
1503 rc = edev->ops->common->chain_alloc(edev->cdev,
1504 QED_CHAIN_USE_TO_CONSUME,
1505 QED_CHAIN_MODE_PBL,
1506 NUM_RX_BDS_MAX,
1507 sizeof(union eth_rx_cqe),
1508 &rxq->rx_comp_ring);
1509 if (rc)
1510 goto err;
1511
1512 /* Allocate buffers for the Rx ring */
1513 for (i = 0; i < rxq->num_rx_buffers; i++) {
1514 rc = qede_alloc_rx_buffer(edev, rxq);
1515 if (rc)
1516 break;
1517 }
1518 num_allocated = i;
1519 if (!num_allocated) {
1520 DP_ERR(edev, "Rx buffers allocation failed\n");
1521 goto err;
1522 } else if (num_allocated < rxq->num_rx_buffers) {
1523 DP_NOTICE(edev,
1524 "Allocated less buffers than desired (%d allocated)\n",
1525 num_allocated);
1526 }
1527
1528 return 0;
1529
1530err:
1531 qede_free_mem_rxq(edev, rxq);
1532 return -ENOMEM;
1533}
1534
1535static void qede_free_mem_txq(struct qede_dev *edev,
1536 struct qede_tx_queue *txq)
1537{
1538 /* Free the parallel SW ring */
1539 kfree(txq->sw_tx_ring);
1540
1541 /* Free the real RQ ring used by FW */
1542 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1543}
1544
1545/* This function allocates all memory needed per Tx queue */
1546static int qede_alloc_mem_txq(struct qede_dev *edev,
1547 struct qede_tx_queue *txq)
1548{
1549 int size, rc;
1550 union eth_tx_bd_types *p_virt;
1551
1552 txq->num_tx_buffers = edev->q_num_tx_buffers;
1553
1554 /* Allocate the parallel driver ring for Tx buffers */
1555 size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
1556 txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
1557 if (!txq->sw_tx_ring) {
1558 DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
1559 goto err;
1560 }
1561
1562 rc = edev->ops->common->chain_alloc(edev->cdev,
1563 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1564 QED_CHAIN_MODE_PBL,
1565 NUM_TX_BDS_MAX,
1566 sizeof(*p_virt),
1567 &txq->tx_pbl);
1568 if (rc)
1569 goto err;
1570
1571 return 0;
1572
1573err:
1574 qede_free_mem_txq(edev, txq);
1575 return -ENOMEM;
1576}
1577
1578/* This function frees all memory of a single fp */
1579static void qede_free_mem_fp(struct qede_dev *edev,
1580 struct qede_fastpath *fp)
1581{
1582 int tc;
1583
1584 qede_free_mem_sb(edev, fp->sb_info);
1585
1586 qede_free_mem_rxq(edev, fp->rxq);
1587
1588 for (tc = 0; tc < edev->num_tc; tc++)
1589 qede_free_mem_txq(edev, &fp->txqs[tc]);
1590}
1591
1592/* This function allocates all memory needed for a single fp (i.e. an entity
1593 * which contains status block, one rx queue and multiple per-TC tx queues.
1594 */
1595static int qede_alloc_mem_fp(struct qede_dev *edev,
1596 struct qede_fastpath *fp)
1597{
1598 int rc, tc;
1599
1600 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
1601 if (rc)
1602 goto err;
1603
1604 rc = qede_alloc_mem_rxq(edev, fp->rxq);
1605 if (rc)
1606 goto err;
1607
1608 for (tc = 0; tc < edev->num_tc; tc++) {
1609 rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
1610 if (rc)
1611 goto err;
1612 }
1613
1614 return 0;
1615
1616err:
1617 qede_free_mem_fp(edev, fp);
1618 return -ENOMEM;
1619}
1620
1621static void qede_free_mem_load(struct qede_dev *edev)
1622{
1623 int i;
1624
1625 for_each_rss(i) {
1626 struct qede_fastpath *fp = &edev->fp_array[i];
1627
1628 qede_free_mem_fp(edev, fp);
1629 }
1630}
1631
1632/* This function allocates all qede memory at NIC load. */
1633static int qede_alloc_mem_load(struct qede_dev *edev)
1634{
1635 int rc = 0, rss_id;
1636
1637 for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
1638 struct qede_fastpath *fp = &edev->fp_array[rss_id];
1639
1640 rc = qede_alloc_mem_fp(edev, fp);
1641 if (rc)
1642 break;
1643 }
1644
1645 if (rss_id != QEDE_RSS_CNT(edev)) {
1646 /* Failed allocating memory for all the queues */
1647 if (!rss_id) {
1648 DP_ERR(edev,
1649 "Failed to allocate memory for the leading queue\n");
1650 rc = -ENOMEM;
1651 } else {
1652 DP_NOTICE(edev,
1653 "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
1654 QEDE_RSS_CNT(edev), rss_id);
1655 }
1656 edev->num_rss = rss_id;
1657 }
1658
1659 return 0;
1660}
1661
1662/* This function inits fp content and resets the SB, RXQ and TXQ structures */
1663static void qede_init_fp(struct qede_dev *edev)
1664{
1665 int rss_id, txq_index, tc;
1666 struct qede_fastpath *fp;
1667
1668 for_each_rss(rss_id) {
1669 fp = &edev->fp_array[rss_id];
1670
1671 fp->edev = edev;
1672 fp->rss_id = rss_id;
1673
1674 memset((void *)&fp->napi, 0, sizeof(fp->napi));
1675
1676 memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
1677
1678 memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
1679 fp->rxq->rxq_id = rss_id;
1680
1681 memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
1682 for (tc = 0; tc < edev->num_tc; tc++) {
1683 txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
1684 fp->txqs[tc].index = txq_index;
1685 }
1686
1687 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1688 edev->ndev->name, rss_id);
1689 }
1690}
1691
1692static int qede_set_real_num_queues(struct qede_dev *edev)
1693{
1694 int rc = 0;
1695
1696 rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
1697 if (rc) {
1698 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1699 return rc;
1700 }
1701 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
1702 if (rc) {
1703 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1704 return rc;
1705 }
1706
1707 return 0;
1708}
1709
1710static void qede_napi_disable_remove(struct qede_dev *edev)
1711{
1712 int i;
1713
1714 for_each_rss(i) {
1715 napi_disable(&edev->fp_array[i].napi);
1716
1717 netif_napi_del(&edev->fp_array[i].napi);
1718 }
1719}
1720
1721static void qede_napi_add_enable(struct qede_dev *edev)
1722{
1723 int i;
1724
1725 /* Add NAPI objects */
1726 for_each_rss(i) {
1727 netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1728 qede_poll, NAPI_POLL_WEIGHT);
1729 napi_enable(&edev->fp_array[i].napi);
1730 }
1731}
1732
1733static void qede_sync_free_irqs(struct qede_dev *edev)
1734{
1735 int i;
1736
1737 for (i = 0; i < edev->int_info.used_cnt; i++) {
1738 if (edev->int_info.msix_cnt) {
1739 synchronize_irq(edev->int_info.msix[i].vector);
1740 free_irq(edev->int_info.msix[i].vector,
1741 &edev->fp_array[i]);
1742 } else {
1743 edev->ops->common->simd_handler_clean(edev->cdev, i);
1744 }
1745 }
1746
1747 edev->int_info.used_cnt = 0;
1748}
1749
1750static int qede_req_msix_irqs(struct qede_dev *edev)
1751{
1752 int i, rc;
1753
1754 /* Sanitize number of interrupts == number of prepared RSS queues */
1755 if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
1756 DP_ERR(edev,
1757 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1758 QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
1759 return -EINVAL;
1760 }
1761
1762 for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
1763 rc = request_irq(edev->int_info.msix[i].vector,
1764 qede_msix_fp_int, 0, edev->fp_array[i].name,
1765 &edev->fp_array[i]);
1766 if (rc) {
1767 DP_ERR(edev, "Request fp %d irq failed\n", i);
1768 qede_sync_free_irqs(edev);
1769 return rc;
1770 }
1771 DP_VERBOSE(edev, NETIF_MSG_INTR,
1772 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1773 edev->fp_array[i].name, i,
1774 &edev->fp_array[i]);
1775 edev->int_info.used_cnt++;
1776 }
1777
1778 return 0;
1779}
1780
1781static void qede_simd_fp_handler(void *cookie)
1782{
1783 struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1784
1785 napi_schedule_irqoff(&fp->napi);
1786}
1787
1788static int qede_setup_irqs(struct qede_dev *edev)
1789{
1790 int i, rc = 0;
1791
1792 /* Learn Interrupt configuration */
1793 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1794 if (rc)
1795 return rc;
1796
1797 if (edev->int_info.msix_cnt) {
1798 rc = qede_req_msix_irqs(edev);
1799 if (rc)
1800 return rc;
1801 edev->ndev->irq = edev->int_info.msix[0].vector;
1802 } else {
1803 const struct qed_common_ops *ops;
1804
1805 /* qed should learn receive the RSS ids and callbacks */
1806 ops = edev->ops->common;
1807 for (i = 0; i < QEDE_RSS_CNT(edev); i++)
1808 ops->simd_handler_config(edev->cdev,
1809 &edev->fp_array[i], i,
1810 qede_simd_fp_handler);
1811 edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
1812 }
1813 return 0;
1814}
1815
1816static int qede_drain_txq(struct qede_dev *edev,
1817 struct qede_tx_queue *txq,
1818 bool allow_drain)
1819{
1820 int rc, cnt = 1000;
1821
1822 while (txq->sw_tx_cons != txq->sw_tx_prod) {
1823 if (!cnt) {
1824 if (allow_drain) {
1825 DP_NOTICE(edev,
1826 "Tx queue[%d] is stuck, requesting MCP to drain\n",
1827 txq->index);
1828 rc = edev->ops->common->drain(edev->cdev);
1829 if (rc)
1830 return rc;
1831 return qede_drain_txq(edev, txq, false);
1832 }
1833 DP_NOTICE(edev,
1834 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1835 txq->index, txq->sw_tx_prod,
1836 txq->sw_tx_cons);
1837 return -ENODEV;
1838 }
1839 cnt--;
1840 usleep_range(1000, 2000);
1841 barrier();
1842 }
1843
1844 /* FW finished processing, wait for HW to transmit all tx packets */
1845 usleep_range(1000, 2000);
1846
1847 return 0;
1848}
1849
1850static int qede_stop_queues(struct qede_dev *edev)
1851{
1852 struct qed_update_vport_params vport_update_params;
1853 struct qed_dev *cdev = edev->cdev;
1854 int rc, tc, i;
1855
1856 /* Disable the vport */
1857 memset(&vport_update_params, 0, sizeof(vport_update_params));
1858 vport_update_params.vport_id = 0;
1859 vport_update_params.update_vport_active_flg = 1;
1860 vport_update_params.vport_active_flg = 0;
1861 vport_update_params.update_rss_flg = 0;
1862
1863 rc = edev->ops->vport_update(cdev, &vport_update_params);
1864 if (rc) {
1865 DP_ERR(edev, "Failed to update vport\n");
1866 return rc;
1867 }
1868
1869 /* Flush Tx queues. If needed, request drain from MCP */
1870 for_each_rss(i) {
1871 struct qede_fastpath *fp = &edev->fp_array[i];
1872
1873 for (tc = 0; tc < edev->num_tc; tc++) {
1874 struct qede_tx_queue *txq = &fp->txqs[tc];
1875
1876 rc = qede_drain_txq(edev, txq, true);
1877 if (rc)
1878 return rc;
1879 }
1880 }
1881
1882 /* Stop all Queues in reverse order*/
1883 for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
1884 struct qed_stop_rxq_params rx_params;
1885
1886 /* Stop the Tx Queue(s)*/
1887 for (tc = 0; tc < edev->num_tc; tc++) {
1888 struct qed_stop_txq_params tx_params;
1889
1890 tx_params.rss_id = i;
1891 tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
1892 rc = edev->ops->q_tx_stop(cdev, &tx_params);
1893 if (rc) {
1894 DP_ERR(edev, "Failed to stop TXQ #%d\n",
1895 tx_params.tx_queue_id);
1896 return rc;
1897 }
1898 }
1899
1900 /* Stop the Rx Queue*/
1901 memset(&rx_params, 0, sizeof(rx_params));
1902 rx_params.rss_id = i;
1903 rx_params.rx_queue_id = i;
1904
1905 rc = edev->ops->q_rx_stop(cdev, &rx_params);
1906 if (rc) {
1907 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1908 return rc;
1909 }
1910 }
1911
1912 /* Stop the vport */
1913 rc = edev->ops->vport_stop(cdev, 0);
1914 if (rc)
1915 DP_ERR(edev, "Failed to stop VPORT\n");
1916
1917 return rc;
1918}
1919
1920static int qede_start_queues(struct qede_dev *edev)
1921{
1922 int rc, tc, i;
1923 int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1;
1924 struct qed_dev *cdev = edev->cdev;
1925 struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
1926 struct qed_update_vport_params vport_update_params;
1927 struct qed_queue_start_common_params q_params;
1928
1929 if (!edev->num_rss) {
1930 DP_ERR(edev,
1931 "Cannot update V-VPORT as active as there are no Rx queues\n");
1932 return -EINVAL;
1933 }
1934
1935 rc = edev->ops->vport_start(cdev, vport_id,
1936 edev->ndev->mtu,
1937 drop_ttl0_flg,
1938 vlan_removal_en);
1939
1940 if (rc) {
1941 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
1942 return rc;
1943 }
1944
1945 DP_VERBOSE(edev, NETIF_MSG_IFUP,
1946 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
1947 vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
1948
1949 for_each_rss(i) {
1950 struct qede_fastpath *fp = &edev->fp_array[i];
1951 dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
1952
1953 memset(&q_params, 0, sizeof(q_params));
1954 q_params.rss_id = i;
1955 q_params.queue_id = i;
1956 q_params.vport_id = 0;
1957 q_params.sb = fp->sb_info->igu_sb_id;
1958 q_params.sb_idx = RX_PI;
1959
1960 rc = edev->ops->q_rx_start(cdev, &q_params,
1961 fp->rxq->rx_buf_size,
1962 fp->rxq->rx_bd_ring.p_phys_addr,
1963 phys_table,
1964 fp->rxq->rx_comp_ring.page_cnt,
1965 &fp->rxq->hw_rxq_prod_addr);
1966 if (rc) {
1967 DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
1968 return rc;
1969 }
1970
1971 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
1972
1973 qede_update_rx_prod(edev, fp->rxq);
1974
1975 for (tc = 0; tc < edev->num_tc; tc++) {
1976 struct qede_tx_queue *txq = &fp->txqs[tc];
1977 int txq_index = tc * QEDE_RSS_CNT(edev) + i;
1978
1979 memset(&q_params, 0, sizeof(q_params));
1980 q_params.rss_id = i;
1981 q_params.queue_id = txq_index;
1982 q_params.vport_id = 0;
1983 q_params.sb = fp->sb_info->igu_sb_id;
1984 q_params.sb_idx = TX_PI(tc);
1985
1986 rc = edev->ops->q_tx_start(cdev, &q_params,
1987 txq->tx_pbl.pbl.p_phys_table,
1988 txq->tx_pbl.page_cnt,
1989 &txq->doorbell_addr);
1990 if (rc) {
1991 DP_ERR(edev, "Start TXQ #%d failed %d\n",
1992 txq_index, rc);
1993 return rc;
1994 }
1995
1996 txq->hw_cons_ptr =
1997 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
1998 SET_FIELD(txq->tx_db.data.params,
1999 ETH_DB_DATA_DEST, DB_DEST_XCM);
2000 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
2001 DB_AGG_CMD_SET);
2002 SET_FIELD(txq->tx_db.data.params,
2003 ETH_DB_DATA_AGG_VAL_SEL,
2004 DQ_XCM_ETH_TX_BD_PROD_CMD);
2005
2006 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2007 }
2008 }
2009
2010 /* Prepare and send the vport enable */
2011 memset(&vport_update_params, 0, sizeof(vport_update_params));
2012 vport_update_params.vport_id = vport_id;
2013 vport_update_params.update_vport_active_flg = 1;
2014 vport_update_params.vport_active_flg = 1;
2015
2016 /* Fill struct with RSS params */
2017 if (QEDE_RSS_CNT(edev) > 1) {
2018 vport_update_params.update_rss_flg = 1;
2019 for (i = 0; i < 128; i++)
2020 rss_params->rss_ind_table[i] =
2021 ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
2022 netdev_rss_key_fill(rss_params->rss_key,
2023 sizeof(rss_params->rss_key));
2024 } else {
2025 memset(rss_params, 0, sizeof(*rss_params));
2026 }
2027 memcpy(&vport_update_params.rss_params, rss_params,
2028 sizeof(*rss_params));
2029
2030 rc = edev->ops->vport_update(cdev, &vport_update_params);
2031 if (rc) {
2032 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2033 return rc;
2034 }
2035
2036 return 0;
2037}
2038
2039enum qede_unload_mode {
2040 QEDE_UNLOAD_NORMAL,
2041};
2042
2043static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
2044{
2045 int rc;
2046
2047 DP_INFO(edev, "Starting qede unload\n");
2048
2049 /* Close OS Tx */
2050 netif_tx_disable(edev->ndev);
2051 netif_carrier_off(edev->ndev);
2052
2053 rc = qede_stop_queues(edev);
2054 if (rc) {
2055 qede_sync_free_irqs(edev);
2056 goto out;
2057 }
2058
2059 DP_INFO(edev, "Stopped Queues\n");
2060
2061 edev->ops->fastpath_stop(edev->cdev);
2062
2063 /* Release the interrupts */
2064 qede_sync_free_irqs(edev);
2065 edev->ops->common->set_fp_int(edev->cdev, 0);
2066
2067 qede_napi_disable_remove(edev);
2068
2069 qede_free_mem_load(edev);
2070 qede_free_fp_array(edev);
2071
2072out:
2073 mutex_unlock(&edev->qede_lock);
2074 DP_INFO(edev, "Ending qede unload\n");
2075}
2076
2077enum qede_load_mode {
2078 QEDE_LOAD_NORMAL,
2079};
2080
2081static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
2082{
2083 int rc;
2084
2085 DP_INFO(edev, "Starting qede load\n");
2086
2087 rc = qede_set_num_queues(edev);
2088 if (rc)
2089 goto err0;
2090
2091 rc = qede_alloc_fp_array(edev);
2092 if (rc)
2093 goto err0;
2094
2095 qede_init_fp(edev);
2096
2097 rc = qede_alloc_mem_load(edev);
2098 if (rc)
2099 goto err1;
2100 DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
2101 QEDE_RSS_CNT(edev), edev->num_tc);
2102
2103 rc = qede_set_real_num_queues(edev);
2104 if (rc)
2105 goto err2;
2106
2107 qede_napi_add_enable(edev);
2108 DP_INFO(edev, "Napi added and enabled\n");
2109
2110 rc = qede_setup_irqs(edev);
2111 if (rc)
2112 goto err3;
2113 DP_INFO(edev, "Setup IRQs succeeded\n");
2114
2115 rc = qede_start_queues(edev);
2116 if (rc)
2117 goto err4;
2118 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2119
2120 /* Add primary mac and set Rx filters */
2121 ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
2122
2123 DP_INFO(edev, "Ending successfully qede load\n");
2124
2125 return 0;
2126
2127err4:
2128 qede_sync_free_irqs(edev);
2129 memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
2130err3:
2131 qede_napi_disable_remove(edev);
2132err2:
2133 qede_free_mem_load(edev);
2134err1:
2135 edev->ops->common->set_fp_int(edev->cdev, 0);
2136 qede_free_fp_array(edev);
2137 edev->num_rss = 0;
2138err0:
2139 return rc;
2140}
2141
2142/* called with rtnl_lock */
2143static int qede_open(struct net_device *ndev)
2144{
2145 struct qede_dev *edev = netdev_priv(ndev);
2146
2147 netif_carrier_off(ndev);
2148
2149 edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2150
2151 return qede_load(edev, QEDE_LOAD_NORMAL);
2152}
2153
2154static int qede_close(struct net_device *ndev)
2155{
2156 struct qede_dev *edev = netdev_priv(ndev);
2157
2158 qede_unload(edev, QEDE_UNLOAD_NORMAL);
2159
2160 return 0;
2161}
This page took 0.116802 seconds and 5 git commands to generate.