net: thunderx: Receive hashing HW offload support
[deliverable/linux.git] / drivers / net / ethernet / cavium / thunder / nicvf_main.c
1 /*
2 * Copyright (C) 2015 Cavium, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 */
8
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/log2.h>
16 #include <linux/prefetch.h>
17 #include <linux/irq.h>
18
19 #include "nic_reg.h"
20 #include "nic.h"
21 #include "nicvf_queues.h"
22 #include "thunder_bgx.h"
23
24 #define DRV_NAME "thunder-nicvf"
25 #define DRV_VERSION "1.0"
26
27 /* Supported devices */
28 static const struct pci_device_id nicvf_id_table[] = {
29 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
30 PCI_DEVICE_ID_THUNDER_NIC_VF,
31 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
32 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
33 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
34 PCI_VENDOR_ID_CAVIUM, 0xA11E) },
35 { 0, } /* end of table */
36 };
37
38 MODULE_AUTHOR("Sunil Goutham");
39 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
40 MODULE_LICENSE("GPL v2");
41 MODULE_VERSION(DRV_VERSION);
42 MODULE_DEVICE_TABLE(pci, nicvf_id_table);
43
44 static int debug = 0x00;
45 module_param(debug, int, 0644);
46 MODULE_PARM_DESC(debug, "Debug message level bitmap");
47
48 static int cpi_alg = CPI_ALG_NONE;
49 module_param(cpi_alg, int, S_IRUGO);
50 MODULE_PARM_DESC(cpi_alg,
51 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
52
53 static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
54 struct sk_buff *skb)
55 {
56 if (skb->len <= 64)
57 nic->drv_stats.rx_frames_64++;
58 else if (skb->len <= 127)
59 nic->drv_stats.rx_frames_127++;
60 else if (skb->len <= 255)
61 nic->drv_stats.rx_frames_255++;
62 else if (skb->len <= 511)
63 nic->drv_stats.rx_frames_511++;
64 else if (skb->len <= 1023)
65 nic->drv_stats.rx_frames_1023++;
66 else if (skb->len <= 1518)
67 nic->drv_stats.rx_frames_1518++;
68 else
69 nic->drv_stats.rx_frames_jumbo++;
70 }
71
72 /* The Cavium ThunderX network controller can *only* be found in SoCs
73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
74 * registers on this platform are implicitly strongly ordered with respect
75 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
76 * with no memory barriers in this driver. The readq()/writeq() functions add
77 * explicit ordering operation which in this case are redundant, and only
78 * add overhead.
79 */
80
81 /* Register read/write APIs */
82 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
83 {
84 writeq_relaxed(val, nic->reg_base + offset);
85 }
86
87 u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
88 {
89 return readq_relaxed(nic->reg_base + offset);
90 }
91
92 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
93 u64 qidx, u64 val)
94 {
95 void __iomem *addr = nic->reg_base + offset;
96
97 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
98 }
99
100 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
101 {
102 void __iomem *addr = nic->reg_base + offset;
103
104 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
105 }
106
107 /* VF -> PF mailbox communication */
108 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
109 {
110 u64 *msg = (u64 *)mbx;
111
112 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
113 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
114 }
115
116 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
117 {
118 int timeout = NIC_MBOX_MSG_TIMEOUT;
119 int sleep = 10;
120
121 nic->pf_acked = false;
122 nic->pf_nacked = false;
123
124 nicvf_write_to_mbx(nic, mbx);
125
126 /* Wait for previous message to be acked, timeout 2sec */
127 while (!nic->pf_acked) {
128 if (nic->pf_nacked)
129 return -EINVAL;
130 msleep(sleep);
131 if (nic->pf_acked)
132 break;
133 timeout -= sleep;
134 if (!timeout) {
135 netdev_err(nic->netdev,
136 "PF didn't ack to mbox msg %d from VF%d\n",
137 (mbx->msg.msg & 0xFF), nic->vf_id);
138 return -EBUSY;
139 }
140 }
141 return 0;
142 }
143
144 /* Checks if VF is able to comminicate with PF
145 * and also gets the VNIC number this VF is associated to.
146 */
147 static int nicvf_check_pf_ready(struct nicvf *nic)
148 {
149 union nic_mbx mbx = {};
150
151 mbx.msg.msg = NIC_MBOX_MSG_READY;
152 if (nicvf_send_msg_to_pf(nic, &mbx)) {
153 netdev_err(nic->netdev,
154 "PF didn't respond to READY msg\n");
155 return 0;
156 }
157
158 return 1;
159 }
160
161 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
162 {
163 if (bgx->rx)
164 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
165 else
166 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
167 }
168
169 static void nicvf_handle_mbx_intr(struct nicvf *nic)
170 {
171 union nic_mbx mbx = {};
172 u64 *mbx_data;
173 u64 mbx_addr;
174 int i;
175
176 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
177 mbx_data = (u64 *)&mbx;
178
179 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
180 *mbx_data = nicvf_reg_read(nic, mbx_addr);
181 mbx_data++;
182 mbx_addr += sizeof(u64);
183 }
184
185 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
186 switch (mbx.msg.msg) {
187 case NIC_MBOX_MSG_READY:
188 nic->pf_acked = true;
189 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
190 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
191 nic->node = mbx.nic_cfg.node_id;
192 if (!nic->set_mac_pending)
193 ether_addr_copy(nic->netdev->dev_addr,
194 mbx.nic_cfg.mac_addr);
195 nic->link_up = false;
196 nic->duplex = 0;
197 nic->speed = 0;
198 break;
199 case NIC_MBOX_MSG_ACK:
200 nic->pf_acked = true;
201 break;
202 case NIC_MBOX_MSG_NACK:
203 nic->pf_nacked = true;
204 break;
205 case NIC_MBOX_MSG_RSS_SIZE:
206 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
207 nic->pf_acked = true;
208 break;
209 case NIC_MBOX_MSG_BGX_STATS:
210 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
211 nic->pf_acked = true;
212 break;
213 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
214 nic->pf_acked = true;
215 nic->link_up = mbx.link_status.link_up;
216 nic->duplex = mbx.link_status.duplex;
217 nic->speed = mbx.link_status.speed;
218 if (nic->link_up) {
219 netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
220 nic->netdev->name, nic->speed,
221 nic->duplex == DUPLEX_FULL ?
222 "Full duplex" : "Half duplex");
223 netif_carrier_on(nic->netdev);
224 netif_tx_start_all_queues(nic->netdev);
225 } else {
226 netdev_info(nic->netdev, "%s: Link is Down\n",
227 nic->netdev->name);
228 netif_carrier_off(nic->netdev);
229 netif_tx_stop_all_queues(nic->netdev);
230 }
231 break;
232 default:
233 netdev_err(nic->netdev,
234 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
235 break;
236 }
237 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
238 }
239
240 static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
241 {
242 union nic_mbx mbx = {};
243
244 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
245 mbx.mac.vf_id = nic->vf_id;
246 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
247
248 return nicvf_send_msg_to_pf(nic, &mbx);
249 }
250
251 static void nicvf_config_cpi(struct nicvf *nic)
252 {
253 union nic_mbx mbx = {};
254
255 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
256 mbx.cpi_cfg.vf_id = nic->vf_id;
257 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
258 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
259
260 nicvf_send_msg_to_pf(nic, &mbx);
261 }
262
263 static void nicvf_get_rss_size(struct nicvf *nic)
264 {
265 union nic_mbx mbx = {};
266
267 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
268 mbx.rss_size.vf_id = nic->vf_id;
269 nicvf_send_msg_to_pf(nic, &mbx);
270 }
271
272 void nicvf_config_rss(struct nicvf *nic)
273 {
274 union nic_mbx mbx = {};
275 struct nicvf_rss_info *rss = &nic->rss_info;
276 int ind_tbl_len = rss->rss_size;
277 int i, nextq = 0;
278
279 mbx.rss_cfg.vf_id = nic->vf_id;
280 mbx.rss_cfg.hash_bits = rss->hash_bits;
281 while (ind_tbl_len) {
282 mbx.rss_cfg.tbl_offset = nextq;
283 mbx.rss_cfg.tbl_len = min(ind_tbl_len,
284 RSS_IND_TBL_LEN_PER_MBX_MSG);
285 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
286 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
287
288 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
289 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
290
291 nicvf_send_msg_to_pf(nic, &mbx);
292
293 ind_tbl_len -= mbx.rss_cfg.tbl_len;
294 }
295 }
296
297 void nicvf_set_rss_key(struct nicvf *nic)
298 {
299 struct nicvf_rss_info *rss = &nic->rss_info;
300 u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
301 int idx;
302
303 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
304 nicvf_reg_write(nic, key_addr, rss->key[idx]);
305 key_addr += sizeof(u64);
306 }
307 }
308
309 static int nicvf_rss_init(struct nicvf *nic)
310 {
311 struct nicvf_rss_info *rss = &nic->rss_info;
312 int idx;
313
314 nicvf_get_rss_size(nic);
315
316 if (cpi_alg != CPI_ALG_NONE) {
317 rss->enable = false;
318 rss->hash_bits = 0;
319 return 0;
320 }
321
322 rss->enable = true;
323
324 /* Using the HW reset value for now */
325 rss->key[0] = 0xFEED0BADFEED0BADULL;
326 rss->key[1] = 0xFEED0BADFEED0BADULL;
327 rss->key[2] = 0xFEED0BADFEED0BADULL;
328 rss->key[3] = 0xFEED0BADFEED0BADULL;
329 rss->key[4] = 0xFEED0BADFEED0BADULL;
330
331 nicvf_set_rss_key(nic);
332
333 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
334 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
335
336 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
337
338 for (idx = 0; idx < rss->rss_size; idx++)
339 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
340 nic->qs->rq_cnt);
341 nicvf_config_rss(nic);
342 return 1;
343 }
344
345 int nicvf_set_real_num_queues(struct net_device *netdev,
346 int tx_queues, int rx_queues)
347 {
348 int err = 0;
349
350 err = netif_set_real_num_tx_queues(netdev, tx_queues);
351 if (err) {
352 netdev_err(netdev,
353 "Failed to set no of Tx queues: %d\n", tx_queues);
354 return err;
355 }
356
357 err = netif_set_real_num_rx_queues(netdev, rx_queues);
358 if (err)
359 netdev_err(netdev,
360 "Failed to set no of Rx queues: %d\n", rx_queues);
361 return err;
362 }
363
364 static int nicvf_init_resources(struct nicvf *nic)
365 {
366 int err;
367 union nic_mbx mbx = {};
368
369 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
370
371 /* Enable Qset */
372 nicvf_qset_config(nic, true);
373
374 /* Initialize queues and HW for data transfer */
375 err = nicvf_config_data_transfer(nic, true);
376 if (err) {
377 netdev_err(nic->netdev,
378 "Failed to alloc/config VF's QSet resources\n");
379 return err;
380 }
381
382 /* Send VF config done msg to PF */
383 nicvf_write_to_mbx(nic, &mbx);
384
385 return 0;
386 }
387
388 static void nicvf_snd_pkt_handler(struct net_device *netdev,
389 struct cmp_queue *cq,
390 struct cqe_send_t *cqe_tx, int cqe_type)
391 {
392 struct sk_buff *skb = NULL;
393 struct nicvf *nic = netdev_priv(netdev);
394 struct snd_queue *sq;
395 struct sq_hdr_subdesc *hdr;
396
397 sq = &nic->qs->sq[cqe_tx->sq_idx];
398
399 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
400 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
401 return;
402
403 netdev_dbg(nic->netdev,
404 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
405 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
406 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
407
408 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
409 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
410 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
411 /* For TSO offloaded packets only one head SKB needs to be freed */
412 if (skb) {
413 prefetch(skb);
414 dev_consume_skb_any(skb);
415 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
416 }
417 }
418
419 static inline void nicvf_set_rxhash(struct net_device *netdev,
420 struct cqe_rx_t *cqe_rx,
421 struct sk_buff *skb)
422 {
423 u8 hash_type;
424 u32 hash;
425
426 if (!(netdev->features & NETIF_F_RXHASH))
427 return;
428
429 switch (cqe_rx->rss_alg) {
430 case RSS_ALG_TCP_IP:
431 case RSS_ALG_UDP_IP:
432 hash_type = PKT_HASH_TYPE_L4;
433 hash = cqe_rx->rss_tag;
434 break;
435 case RSS_ALG_IP:
436 hash_type = PKT_HASH_TYPE_L3;
437 hash = cqe_rx->rss_tag;
438 break;
439 default:
440 hash_type = PKT_HASH_TYPE_NONE;
441 hash = 0;
442 }
443
444 skb_set_hash(skb, hash, hash_type);
445 }
446
447 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
448 struct napi_struct *napi,
449 struct cmp_queue *cq,
450 struct cqe_rx_t *cqe_rx, int cqe_type)
451 {
452 struct sk_buff *skb;
453 struct nicvf *nic = netdev_priv(netdev);
454 int err = 0;
455
456 /* Check for errors */
457 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
458 if (err && !cqe_rx->rb_cnt)
459 return;
460
461 skb = nicvf_get_rcv_skb(nic, cqe_rx);
462 if (!skb) {
463 netdev_dbg(nic->netdev, "Packet not received\n");
464 return;
465 }
466
467 if (netif_msg_pktdata(nic)) {
468 netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
469 skb, skb->len);
470 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
471 skb->data, skb->len, true);
472 }
473
474 /* If error packet, drop it here */
475 if (err) {
476 dev_kfree_skb_any(skb);
477 return;
478 }
479
480 nicvf_set_rx_frame_cnt(nic, skb);
481
482 nicvf_set_rxhash(netdev, cqe_rx, skb);
483
484 skb_record_rx_queue(skb, cqe_rx->rq_idx);
485 if (netdev->hw_features & NETIF_F_RXCSUM) {
486 /* HW by default verifies TCP/UDP/SCTP checksums */
487 skb->ip_summed = CHECKSUM_UNNECESSARY;
488 } else {
489 skb_checksum_none_assert(skb);
490 }
491
492 skb->protocol = eth_type_trans(skb, netdev);
493
494 if (napi && (netdev->features & NETIF_F_GRO))
495 napi_gro_receive(napi, skb);
496 else
497 netif_receive_skb(skb);
498 }
499
500 static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
501 struct napi_struct *napi, int budget)
502 {
503 int processed_cqe, work_done = 0, tx_done = 0;
504 int cqe_count, cqe_head;
505 struct nicvf *nic = netdev_priv(netdev);
506 struct queue_set *qs = nic->qs;
507 struct cmp_queue *cq = &qs->cq[cq_idx];
508 struct cqe_rx_t *cq_desc;
509 struct netdev_queue *txq;
510
511 spin_lock_bh(&cq->lock);
512 loop:
513 processed_cqe = 0;
514 /* Get no of valid CQ entries to process */
515 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
516 cqe_count &= CQ_CQE_COUNT;
517 if (!cqe_count)
518 goto done;
519
520 /* Get head of the valid CQ entries */
521 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
522 cqe_head &= 0xFFFF;
523
524 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
525 __func__, cq_idx, cqe_count, cqe_head);
526 while (processed_cqe < cqe_count) {
527 /* Get the CQ descriptor */
528 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
529 cqe_head++;
530 cqe_head &= (cq->dmem.q_len - 1);
531 /* Initiate prefetch for next descriptor */
532 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
533
534 if ((work_done >= budget) && napi &&
535 (cq_desc->cqe_type != CQE_TYPE_SEND)) {
536 break;
537 }
538
539 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
540 cq_idx, cq_desc->cqe_type);
541 switch (cq_desc->cqe_type) {
542 case CQE_TYPE_RX:
543 nicvf_rcv_pkt_handler(netdev, napi, cq,
544 cq_desc, CQE_TYPE_RX);
545 work_done++;
546 break;
547 case CQE_TYPE_SEND:
548 nicvf_snd_pkt_handler(netdev, cq,
549 (void *)cq_desc, CQE_TYPE_SEND);
550 tx_done++;
551 break;
552 case CQE_TYPE_INVALID:
553 case CQE_TYPE_RX_SPLIT:
554 case CQE_TYPE_RX_TCP:
555 case CQE_TYPE_SEND_PTP:
556 /* Ignore for now */
557 break;
558 }
559 processed_cqe++;
560 }
561 netdev_dbg(nic->netdev,
562 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
563 __func__, cq_idx, processed_cqe, work_done, budget);
564
565 /* Ring doorbell to inform H/W to reuse processed CQEs */
566 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
567 cq_idx, processed_cqe);
568
569 if ((work_done < budget) && napi)
570 goto loop;
571
572 done:
573 /* Wakeup TXQ if its stopped earlier due to SQ full */
574 if (tx_done) {
575 txq = netdev_get_tx_queue(netdev, cq_idx);
576 if (netif_tx_queue_stopped(txq)) {
577 netif_tx_start_queue(txq);
578 nic->drv_stats.txq_wake++;
579 if (netif_msg_tx_err(nic))
580 netdev_warn(netdev,
581 "%s: Transmit queue wakeup SQ%d\n",
582 netdev->name, cq_idx);
583 }
584 }
585
586 spin_unlock_bh(&cq->lock);
587 return work_done;
588 }
589
590 static int nicvf_poll(struct napi_struct *napi, int budget)
591 {
592 u64 cq_head;
593 int work_done = 0;
594 struct net_device *netdev = napi->dev;
595 struct nicvf *nic = netdev_priv(netdev);
596 struct nicvf_cq_poll *cq;
597
598 cq = container_of(napi, struct nicvf_cq_poll, napi);
599 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
600
601 if (work_done < budget) {
602 /* Slow packet rate, exit polling */
603 napi_complete(napi);
604 /* Re-enable interrupts */
605 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
606 cq->cq_idx);
607 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
608 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
609 cq->cq_idx, cq_head);
610 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
611 }
612 return work_done;
613 }
614
615 /* Qset error interrupt handler
616 *
617 * As of now only CQ errors are handled
618 */
619 static void nicvf_handle_qs_err(unsigned long data)
620 {
621 struct nicvf *nic = (struct nicvf *)data;
622 struct queue_set *qs = nic->qs;
623 int qidx;
624 u64 status;
625
626 netif_tx_disable(nic->netdev);
627
628 /* Check if it is CQ err */
629 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
630 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
631 qidx);
632 if (!(status & CQ_ERR_MASK))
633 continue;
634 /* Process already queued CQEs and reconfig CQ */
635 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
636 nicvf_sq_disable(nic, qidx);
637 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
638 nicvf_cmp_queue_config(nic, qs, qidx, true);
639 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
640 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
641
642 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
643 }
644
645 netif_tx_start_all_queues(nic->netdev);
646 /* Re-enable Qset error interrupt */
647 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
648 }
649
650 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
651 {
652 struct nicvf *nic = (struct nicvf *)nicvf_irq;
653 u64 intr;
654
655 intr = nicvf_reg_read(nic, NIC_VF_INT);
656 /* Check for spurious interrupt */
657 if (!(intr & NICVF_INTR_MBOX_MASK))
658 return IRQ_HANDLED;
659
660 nicvf_handle_mbx_intr(nic);
661
662 return IRQ_HANDLED;
663 }
664
665 static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
666 {
667 u64 qidx, intr, clear_intr = 0;
668 u64 cq_intr, rbdr_intr, qs_err_intr;
669 struct nicvf *nic = (struct nicvf *)nicvf_irq;
670 struct queue_set *qs = nic->qs;
671 struct nicvf_cq_poll *cq_poll = NULL;
672
673 intr = nicvf_reg_read(nic, NIC_VF_INT);
674 if (netif_msg_intr(nic))
675 netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
676 nic->netdev->name, intr);
677
678 qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
679 if (qs_err_intr) {
680 /* Disable Qset err interrupt and schedule softirq */
681 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
682 tasklet_hi_schedule(&nic->qs_err_task);
683 clear_intr |= qs_err_intr;
684 }
685
686 /* Disable interrupts and start polling */
687 cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
688 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
689 if (!(cq_intr & (1 << qidx)))
690 continue;
691 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
692 continue;
693
694 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
695 clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
696
697 cq_poll = nic->napi[qidx];
698 /* Schedule NAPI */
699 if (cq_poll)
700 napi_schedule(&cq_poll->napi);
701 }
702
703 /* Handle RBDR interrupts */
704 rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
705 if (rbdr_intr) {
706 /* Disable RBDR interrupt and schedule softirq */
707 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
708 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
709 continue;
710 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
711 tasklet_hi_schedule(&nic->rbdr_task);
712 clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
713 }
714 }
715
716 /* Clear interrupts */
717 nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
718 return IRQ_HANDLED;
719 }
720
721 static int nicvf_enable_msix(struct nicvf *nic)
722 {
723 int ret, vec;
724
725 nic->num_vec = NIC_VF_MSIX_VECTORS;
726
727 for (vec = 0; vec < nic->num_vec; vec++)
728 nic->msix_entries[vec].entry = vec;
729
730 ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
731 if (ret) {
732 netdev_err(nic->netdev,
733 "Req for #%d msix vectors failed\n", nic->num_vec);
734 return 0;
735 }
736 nic->msix_enabled = 1;
737 return 1;
738 }
739
740 static void nicvf_disable_msix(struct nicvf *nic)
741 {
742 if (nic->msix_enabled) {
743 pci_disable_msix(nic->pdev);
744 nic->msix_enabled = 0;
745 nic->num_vec = 0;
746 }
747 }
748
749 static int nicvf_register_interrupts(struct nicvf *nic)
750 {
751 int irq, free, ret = 0;
752 int vector;
753
754 for_each_cq_irq(irq)
755 sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
756 nic->vf_id, irq);
757
758 for_each_sq_irq(irq)
759 sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
760 nic->vf_id, irq - NICVF_INTR_ID_SQ);
761
762 for_each_rbdr_irq(irq)
763 sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
764 nic->vf_id, irq - NICVF_INTR_ID_RBDR);
765
766 /* Register all interrupts except mailbox */
767 for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
768 vector = nic->msix_entries[irq].vector;
769 ret = request_irq(vector, nicvf_intr_handler,
770 0, nic->irq_name[irq], nic);
771 if (ret)
772 break;
773 nic->irq_allocated[irq] = true;
774 }
775
776 for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
777 vector = nic->msix_entries[irq].vector;
778 ret = request_irq(vector, nicvf_intr_handler,
779 0, nic->irq_name[irq], nic);
780 if (ret)
781 break;
782 nic->irq_allocated[irq] = true;
783 }
784
785 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
786 "NICVF%d Qset error", nic->vf_id);
787 if (!ret) {
788 vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
789 irq = NICVF_INTR_ID_QS_ERR;
790 ret = request_irq(vector, nicvf_intr_handler,
791 0, nic->irq_name[irq], nic);
792 if (!ret)
793 nic->irq_allocated[irq] = true;
794 }
795
796 if (ret) {
797 netdev_err(nic->netdev, "Request irq failed\n");
798 for (free = 0; free < irq; free++)
799 free_irq(nic->msix_entries[free].vector, nic);
800 return ret;
801 }
802
803 return 0;
804 }
805
806 static void nicvf_unregister_interrupts(struct nicvf *nic)
807 {
808 int irq;
809
810 /* Free registered interrupts */
811 for (irq = 0; irq < nic->num_vec; irq++) {
812 if (nic->irq_allocated[irq])
813 free_irq(nic->msix_entries[irq].vector, nic);
814 nic->irq_allocated[irq] = false;
815 }
816
817 /* Disable MSI-X */
818 nicvf_disable_msix(nic);
819 }
820
821 /* Initialize MSIX vectors and register MISC interrupt.
822 * Send READY message to PF to check if its alive
823 */
824 static int nicvf_register_misc_interrupt(struct nicvf *nic)
825 {
826 int ret = 0;
827 int irq = NICVF_INTR_ID_MISC;
828
829 /* Return if mailbox interrupt is already registered */
830 if (nic->msix_enabled)
831 return 0;
832
833 /* Enable MSI-X */
834 if (!nicvf_enable_msix(nic))
835 return 1;
836
837 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
838 /* Register Misc interrupt */
839 ret = request_irq(nic->msix_entries[irq].vector,
840 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
841
842 if (ret)
843 return ret;
844 nic->irq_allocated[irq] = true;
845
846 /* Enable mailbox interrupt */
847 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
848
849 /* Check if VF is able to communicate with PF */
850 if (!nicvf_check_pf_ready(nic)) {
851 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
852 nicvf_unregister_interrupts(nic);
853 return 1;
854 }
855
856 return 0;
857 }
858
859 static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
860 {
861 struct nicvf *nic = netdev_priv(netdev);
862 int qid = skb_get_queue_mapping(skb);
863 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
864
865 /* Check for minimum packet length */
866 if (skb->len <= ETH_HLEN) {
867 dev_kfree_skb(skb);
868 return NETDEV_TX_OK;
869 }
870
871 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
872 netif_tx_stop_queue(txq);
873 nic->drv_stats.txq_stop++;
874 if (netif_msg_tx_err(nic))
875 netdev_warn(netdev,
876 "%s: Transmit ring full, stopping SQ%d\n",
877 netdev->name, qid);
878
879 return NETDEV_TX_BUSY;
880 }
881
882 return NETDEV_TX_OK;
883 }
884
885 int nicvf_stop(struct net_device *netdev)
886 {
887 int irq, qidx;
888 struct nicvf *nic = netdev_priv(netdev);
889 struct queue_set *qs = nic->qs;
890 struct nicvf_cq_poll *cq_poll = NULL;
891 union nic_mbx mbx = {};
892
893 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
894 nicvf_send_msg_to_pf(nic, &mbx);
895
896 netif_carrier_off(netdev);
897
898 /* Disable RBDR & QS error interrupts */
899 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
900 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
901 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
902 }
903 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
904 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
905
906 /* Wait for pending IRQ handlers to finish */
907 for (irq = 0; irq < nic->num_vec; irq++)
908 synchronize_irq(nic->msix_entries[irq].vector);
909
910 tasklet_kill(&nic->rbdr_task);
911 tasklet_kill(&nic->qs_err_task);
912 if (nic->rb_work_scheduled)
913 cancel_delayed_work_sync(&nic->rbdr_work);
914
915 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
916 cq_poll = nic->napi[qidx];
917 if (!cq_poll)
918 continue;
919 nic->napi[qidx] = NULL;
920 napi_synchronize(&cq_poll->napi);
921 /* CQ intr is enabled while napi_complete,
922 * so disable it now
923 */
924 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
925 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
926 napi_disable(&cq_poll->napi);
927 netif_napi_del(&cq_poll->napi);
928 kfree(cq_poll);
929 }
930
931 netif_tx_disable(netdev);
932
933 /* Free resources */
934 nicvf_config_data_transfer(nic, false);
935
936 /* Disable HW Qset */
937 nicvf_qset_config(nic, false);
938
939 /* disable mailbox interrupt */
940 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
941
942 nicvf_unregister_interrupts(nic);
943
944 return 0;
945 }
946
947 int nicvf_open(struct net_device *netdev)
948 {
949 int err, qidx;
950 struct nicvf *nic = netdev_priv(netdev);
951 struct queue_set *qs = nic->qs;
952 struct nicvf_cq_poll *cq_poll = NULL;
953
954 nic->mtu = netdev->mtu;
955
956 netif_carrier_off(netdev);
957
958 err = nicvf_register_misc_interrupt(nic);
959 if (err)
960 return err;
961
962 /* Register NAPI handler for processing CQEs */
963 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
964 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
965 if (!cq_poll) {
966 err = -ENOMEM;
967 goto napi_del;
968 }
969 cq_poll->cq_idx = qidx;
970 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
971 NAPI_POLL_WEIGHT);
972 napi_enable(&cq_poll->napi);
973 nic->napi[qidx] = cq_poll;
974 }
975
976 /* Check if we got MAC address from PF or else generate a radom MAC */
977 if (is_zero_ether_addr(netdev->dev_addr)) {
978 eth_hw_addr_random(netdev);
979 nicvf_hw_set_mac_addr(nic, netdev);
980 }
981
982 if (nic->set_mac_pending) {
983 nic->set_mac_pending = false;
984 nicvf_hw_set_mac_addr(nic, netdev);
985 }
986
987 /* Init tasklet for handling Qset err interrupt */
988 tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
989 (unsigned long)nic);
990
991 /* Init RBDR tasklet which will refill RBDR */
992 tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
993 (unsigned long)nic);
994 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
995
996 /* Configure CPI alorithm */
997 nic->cpi_alg = cpi_alg;
998 nicvf_config_cpi(nic);
999
1000 /* Configure receive side scaling */
1001 nicvf_rss_init(nic);
1002
1003 err = nicvf_register_interrupts(nic);
1004 if (err)
1005 goto cleanup;
1006
1007 /* Initialize the queues */
1008 err = nicvf_init_resources(nic);
1009 if (err)
1010 goto cleanup;
1011
1012 /* Make sure queue initialization is written */
1013 wmb();
1014
1015 nicvf_reg_write(nic, NIC_VF_INT, -1);
1016 /* Enable Qset err interrupt */
1017 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1018
1019 /* Enable completion queue interrupt */
1020 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1021 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1022
1023 /* Enable RBDR threshold interrupt */
1024 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1025 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1026
1027 nic->drv_stats.txq_stop = 0;
1028 nic->drv_stats.txq_wake = 0;
1029
1030 netif_carrier_on(netdev);
1031 netif_tx_start_all_queues(netdev);
1032
1033 return 0;
1034 cleanup:
1035 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1036 nicvf_unregister_interrupts(nic);
1037 napi_del:
1038 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1039 cq_poll = nic->napi[qidx];
1040 if (!cq_poll)
1041 continue;
1042 napi_disable(&cq_poll->napi);
1043 netif_napi_del(&cq_poll->napi);
1044 kfree(cq_poll);
1045 nic->napi[qidx] = NULL;
1046 }
1047 return err;
1048 }
1049
1050 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1051 {
1052 union nic_mbx mbx = {};
1053
1054 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1055 mbx.frs.max_frs = mtu;
1056 mbx.frs.vf_id = nic->vf_id;
1057
1058 return nicvf_send_msg_to_pf(nic, &mbx);
1059 }
1060
1061 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1062 {
1063 struct nicvf *nic = netdev_priv(netdev);
1064
1065 if (new_mtu > NIC_HW_MAX_FRS)
1066 return -EINVAL;
1067
1068 if (new_mtu < NIC_HW_MIN_FRS)
1069 return -EINVAL;
1070
1071 if (nicvf_update_hw_max_frs(nic, new_mtu))
1072 return -EINVAL;
1073 netdev->mtu = new_mtu;
1074 nic->mtu = new_mtu;
1075
1076 return 0;
1077 }
1078
1079 static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1080 {
1081 struct sockaddr *addr = p;
1082 struct nicvf *nic = netdev_priv(netdev);
1083
1084 if (!is_valid_ether_addr(addr->sa_data))
1085 return -EADDRNOTAVAIL;
1086
1087 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1088
1089 if (nic->msix_enabled) {
1090 if (nicvf_hw_set_mac_addr(nic, netdev))
1091 return -EBUSY;
1092 } else {
1093 nic->set_mac_pending = true;
1094 }
1095
1096 return 0;
1097 }
1098
1099 void nicvf_update_lmac_stats(struct nicvf *nic)
1100 {
1101 int stat = 0;
1102 union nic_mbx mbx = {};
1103
1104 if (!netif_running(nic->netdev))
1105 return;
1106
1107 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1108 mbx.bgx_stats.vf_id = nic->vf_id;
1109 /* Rx stats */
1110 mbx.bgx_stats.rx = 1;
1111 while (stat < BGX_RX_STATS_COUNT) {
1112 mbx.bgx_stats.idx = stat;
1113 if (nicvf_send_msg_to_pf(nic, &mbx))
1114 return;
1115 stat++;
1116 }
1117
1118 stat = 0;
1119
1120 /* Tx stats */
1121 mbx.bgx_stats.rx = 0;
1122 while (stat < BGX_TX_STATS_COUNT) {
1123 mbx.bgx_stats.idx = stat;
1124 if (nicvf_send_msg_to_pf(nic, &mbx))
1125 return;
1126 stat++;
1127 }
1128 }
1129
1130 void nicvf_update_stats(struct nicvf *nic)
1131 {
1132 int qidx;
1133 struct nicvf_hw_stats *stats = &nic->hw_stats;
1134 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1135 struct queue_set *qs = nic->qs;
1136
1137 #define GET_RX_STATS(reg) \
1138 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1139 #define GET_TX_STATS(reg) \
1140 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1141
1142 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1143 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1144 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1145 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1146 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1147 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1148 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1149 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1150 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1151 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1152 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1153 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1154 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1155 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1156
1157 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1158 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1159 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1160 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1161 stats->tx_drops = GET_TX_STATS(TX_DROP);
1162
1163 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1164 stats->tx_bcast_frames_ok +
1165 stats->tx_mcast_frames_ok;
1166 drv_stats->rx_drops = stats->rx_drop_red +
1167 stats->rx_drop_overrun;
1168 drv_stats->tx_drops = stats->tx_drops;
1169
1170 /* Update RQ and SQ stats */
1171 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1172 nicvf_update_rq_stats(nic, qidx);
1173 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1174 nicvf_update_sq_stats(nic, qidx);
1175 }
1176
1177 static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
1178 struct rtnl_link_stats64 *stats)
1179 {
1180 struct nicvf *nic = netdev_priv(netdev);
1181 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1182 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1183
1184 nicvf_update_stats(nic);
1185
1186 stats->rx_bytes = hw_stats->rx_bytes;
1187 stats->rx_packets = drv_stats->rx_frames_ok;
1188 stats->rx_dropped = drv_stats->rx_drops;
1189 stats->multicast = hw_stats->rx_mcast_frames;
1190
1191 stats->tx_bytes = hw_stats->tx_bytes_ok;
1192 stats->tx_packets = drv_stats->tx_frames_ok;
1193 stats->tx_dropped = drv_stats->tx_drops;
1194
1195 return stats;
1196 }
1197
1198 static void nicvf_tx_timeout(struct net_device *dev)
1199 {
1200 struct nicvf *nic = netdev_priv(dev);
1201
1202 if (netif_msg_tx_err(nic))
1203 netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1204 dev->name);
1205
1206 schedule_work(&nic->reset_task);
1207 }
1208
1209 static void nicvf_reset_task(struct work_struct *work)
1210 {
1211 struct nicvf *nic;
1212
1213 nic = container_of(work, struct nicvf, reset_task);
1214
1215 if (!netif_running(nic->netdev))
1216 return;
1217
1218 nicvf_stop(nic->netdev);
1219 nicvf_open(nic->netdev);
1220 nic->netdev->trans_start = jiffies;
1221 }
1222
1223 static const struct net_device_ops nicvf_netdev_ops = {
1224 .ndo_open = nicvf_open,
1225 .ndo_stop = nicvf_stop,
1226 .ndo_start_xmit = nicvf_xmit,
1227 .ndo_change_mtu = nicvf_change_mtu,
1228 .ndo_set_mac_address = nicvf_set_mac_address,
1229 .ndo_get_stats64 = nicvf_get_stats64,
1230 .ndo_tx_timeout = nicvf_tx_timeout,
1231 };
1232
1233 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1234 {
1235 struct device *dev = &pdev->dev;
1236 struct net_device *netdev;
1237 struct nicvf *nic;
1238 struct queue_set *qs;
1239 int err;
1240
1241 err = pci_enable_device(pdev);
1242 if (err) {
1243 dev_err(dev, "Failed to enable PCI device\n");
1244 return err;
1245 }
1246
1247 err = pci_request_regions(pdev, DRV_NAME);
1248 if (err) {
1249 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1250 goto err_disable_device;
1251 }
1252
1253 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1254 if (err) {
1255 dev_err(dev, "Unable to get usable DMA configuration\n");
1256 goto err_release_regions;
1257 }
1258
1259 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1260 if (err) {
1261 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
1262 goto err_release_regions;
1263 }
1264
1265 netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
1266 MAX_RCV_QUEUES_PER_QS,
1267 MAX_SND_QUEUES_PER_QS);
1268 if (!netdev) {
1269 err = -ENOMEM;
1270 goto err_release_regions;
1271 }
1272
1273 pci_set_drvdata(pdev, netdev);
1274
1275 SET_NETDEV_DEV(netdev, &pdev->dev);
1276
1277 nic = netdev_priv(netdev);
1278 nic->netdev = netdev;
1279 nic->pdev = pdev;
1280
1281 /* MAP VF's configuration registers */
1282 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1283 if (!nic->reg_base) {
1284 dev_err(dev, "Cannot map config register space, aborting\n");
1285 err = -ENOMEM;
1286 goto err_free_netdev;
1287 }
1288
1289 err = nicvf_set_qset_resources(nic);
1290 if (err)
1291 goto err_free_netdev;
1292
1293 qs = nic->qs;
1294
1295 err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
1296 if (err)
1297 goto err_free_netdev;
1298
1299 /* Check if PF is alive and get MAC address for this VF */
1300 err = nicvf_register_misc_interrupt(nic);
1301 if (err)
1302 goto err_free_netdev;
1303
1304 netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
1305 NETIF_F_TSO | NETIF_F_GRO | NETIF_F_RXHASH);
1306
1307 netdev->hw_features = netdev->features;
1308
1309 netdev->netdev_ops = &nicvf_netdev_ops;
1310 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1311
1312 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1313
1314 err = register_netdev(netdev);
1315 if (err) {
1316 dev_err(dev, "Failed to register netdevice\n");
1317 goto err_unregister_interrupts;
1318 }
1319
1320 nic->msg_enable = debug;
1321
1322 nicvf_set_ethtool_ops(netdev);
1323
1324 return 0;
1325
1326 err_unregister_interrupts:
1327 nicvf_unregister_interrupts(nic);
1328 err_free_netdev:
1329 pci_set_drvdata(pdev, NULL);
1330 free_netdev(netdev);
1331 err_release_regions:
1332 pci_release_regions(pdev);
1333 err_disable_device:
1334 pci_disable_device(pdev);
1335 return err;
1336 }
1337
1338 static void nicvf_remove(struct pci_dev *pdev)
1339 {
1340 struct net_device *netdev = pci_get_drvdata(pdev);
1341 struct nicvf *nic = netdev_priv(netdev);
1342
1343 unregister_netdev(netdev);
1344 nicvf_unregister_interrupts(nic);
1345 pci_set_drvdata(pdev, NULL);
1346 free_netdev(netdev);
1347 pci_release_regions(pdev);
1348 pci_disable_device(pdev);
1349 }
1350
1351 static void nicvf_shutdown(struct pci_dev *pdev)
1352 {
1353 nicvf_remove(pdev);
1354 }
1355
1356 static struct pci_driver nicvf_driver = {
1357 .name = DRV_NAME,
1358 .id_table = nicvf_id_table,
1359 .probe = nicvf_probe,
1360 .remove = nicvf_remove,
1361 .shutdown = nicvf_shutdown,
1362 };
1363
1364 static int __init nicvf_init_module(void)
1365 {
1366 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1367
1368 return pci_register_driver(&nicvf_driver);
1369 }
1370
1371 static void __exit nicvf_cleanup_module(void)
1372 {
1373 pci_unregister_driver(&nicvf_driver);
1374 }
1375
1376 module_init(nicvf_init_module);
1377 module_exit(nicvf_cleanup_module);
This page took 0.059421 seconds and 6 git commands to generate.