2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/log2.h>
16 #include <linux/prefetch.h>
17 #include <linux/irq.h>
21 #include "nicvf_queues.h"
22 #include "thunder_bgx.h"
24 #define DRV_NAME "thunder-nicvf"
25 #define DRV_VERSION "1.0"
27 /* Supported devices */
28 static const struct pci_device_id nicvf_id_table
[] = {
29 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
30 PCI_DEVICE_ID_THUNDER_NIC_VF
,
31 PCI_VENDOR_ID_CAVIUM
, 0xA11E) },
32 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
33 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF
,
34 PCI_VENDOR_ID_CAVIUM
, 0xA11E) },
35 { 0, } /* end of table */
38 MODULE_AUTHOR("Sunil Goutham");
39 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
40 MODULE_LICENSE("GPL v2");
41 MODULE_VERSION(DRV_VERSION
);
42 MODULE_DEVICE_TABLE(pci
, nicvf_id_table
);
44 static int debug
= 0x00;
45 module_param(debug
, int, 0644);
46 MODULE_PARM_DESC(debug
, "Debug message level bitmap");
48 static int cpi_alg
= CPI_ALG_NONE
;
49 module_param(cpi_alg
, int, S_IRUGO
);
50 MODULE_PARM_DESC(cpi_alg
,
51 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
53 static inline void nicvf_set_rx_frame_cnt(struct nicvf
*nic
,
57 nic
->drv_stats
.rx_frames_64
++;
58 else if (skb
->len
<= 127)
59 nic
->drv_stats
.rx_frames_127
++;
60 else if (skb
->len
<= 255)
61 nic
->drv_stats
.rx_frames_255
++;
62 else if (skb
->len
<= 511)
63 nic
->drv_stats
.rx_frames_511
++;
64 else if (skb
->len
<= 1023)
65 nic
->drv_stats
.rx_frames_1023
++;
66 else if (skb
->len
<= 1518)
67 nic
->drv_stats
.rx_frames_1518
++;
69 nic
->drv_stats
.rx_frames_jumbo
++;
72 /* The Cavium ThunderX network controller can *only* be found in SoCs
73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
74 * registers on this platform are implicitly strongly ordered with respect
75 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
76 * with no memory barriers in this driver. The readq()/writeq() functions add
77 * explicit ordering operation which in this case are redundant, and only
81 /* Register read/write APIs */
82 void nicvf_reg_write(struct nicvf
*nic
, u64 offset
, u64 val
)
84 writeq_relaxed(val
, nic
->reg_base
+ offset
);
87 u64
nicvf_reg_read(struct nicvf
*nic
, u64 offset
)
89 return readq_relaxed(nic
->reg_base
+ offset
);
92 void nicvf_queue_reg_write(struct nicvf
*nic
, u64 offset
,
95 void __iomem
*addr
= nic
->reg_base
+ offset
;
97 writeq_relaxed(val
, addr
+ (qidx
<< NIC_Q_NUM_SHIFT
));
100 u64
nicvf_queue_reg_read(struct nicvf
*nic
, u64 offset
, u64 qidx
)
102 void __iomem
*addr
= nic
->reg_base
+ offset
;
104 return readq_relaxed(addr
+ (qidx
<< NIC_Q_NUM_SHIFT
));
107 /* VF -> PF mailbox communication */
109 static void nicvf_write_to_mbx(struct nicvf
*nic
, union nic_mbx
*mbx
)
111 u64
*msg
= (u64
*)mbx
;
113 nicvf_reg_write(nic
, NIC_VF_PF_MAILBOX_0_1
+ 0, msg
[0]);
114 nicvf_reg_write(nic
, NIC_VF_PF_MAILBOX_0_1
+ 8, msg
[1]);
117 int nicvf_send_msg_to_pf(struct nicvf
*nic
, union nic_mbx
*mbx
)
119 int timeout
= NIC_MBOX_MSG_TIMEOUT
;
122 nic
->pf_acked
= false;
123 nic
->pf_nacked
= false;
125 nicvf_write_to_mbx(nic
, mbx
);
127 /* Wait for previous message to be acked, timeout 2sec */
128 while (!nic
->pf_acked
) {
136 netdev_err(nic
->netdev
,
137 "PF didn't ack to mbox msg %d from VF%d\n",
138 (mbx
->msg
.msg
& 0xFF), nic
->vf_id
);
145 /* Checks if VF is able to comminicate with PF
146 * and also gets the VNIC number this VF is associated to.
148 static int nicvf_check_pf_ready(struct nicvf
*nic
)
150 int timeout
= 5000, sleep
= 20;
151 union nic_mbx mbx
= {};
153 mbx
.msg
.msg
= NIC_MBOX_MSG_READY
;
155 nic
->pf_ready_to_rcv_msg
= false;
157 nicvf_write_to_mbx(nic
, &mbx
);
159 while (!nic
->pf_ready_to_rcv_msg
) {
161 if (nic
->pf_ready_to_rcv_msg
)
165 netdev_err(nic
->netdev
,
166 "PF didn't respond to READY msg\n");
173 static void nicvf_read_bgx_stats(struct nicvf
*nic
, struct bgx_stats_msg
*bgx
)
176 nic
->bgx_stats
.rx_stats
[bgx
->idx
] = bgx
->stats
;
178 nic
->bgx_stats
.tx_stats
[bgx
->idx
] = bgx
->stats
;
181 static void nicvf_handle_mbx_intr(struct nicvf
*nic
)
183 union nic_mbx mbx
= {};
188 mbx_addr
= NIC_VF_PF_MAILBOX_0_1
;
189 mbx_data
= (u64
*)&mbx
;
191 for (i
= 0; i
< NIC_PF_VF_MAILBOX_SIZE
; i
++) {
192 *mbx_data
= nicvf_reg_read(nic
, mbx_addr
);
194 mbx_addr
+= sizeof(u64
);
197 netdev_dbg(nic
->netdev
, "Mbox message: msg: 0x%x\n", mbx
.msg
.msg
);
198 switch (mbx
.msg
.msg
) {
199 case NIC_MBOX_MSG_READY
:
200 nic
->pf_ready_to_rcv_msg
= true;
201 nic
->vf_id
= mbx
.nic_cfg
.vf_id
& 0x7F;
202 nic
->tns_mode
= mbx
.nic_cfg
.tns_mode
& 0x7F;
203 nic
->node
= mbx
.nic_cfg
.node_id
;
204 if (!nic
->set_mac_pending
)
205 ether_addr_copy(nic
->netdev
->dev_addr
,
206 mbx
.nic_cfg
.mac_addr
);
207 nic
->link_up
= false;
211 case NIC_MBOX_MSG_ACK
:
212 nic
->pf_acked
= true;
214 case NIC_MBOX_MSG_NACK
:
215 nic
->pf_nacked
= true;
217 case NIC_MBOX_MSG_RSS_SIZE
:
218 nic
->rss_info
.rss_size
= mbx
.rss_size
.ind_tbl_size
;
219 nic
->pf_acked
= true;
221 case NIC_MBOX_MSG_BGX_STATS
:
222 nicvf_read_bgx_stats(nic
, &mbx
.bgx_stats
);
223 nic
->pf_acked
= true;
224 nic
->bgx_stats_acked
= true;
226 case NIC_MBOX_MSG_BGX_LINK_CHANGE
:
227 nic
->pf_acked
= true;
228 nic
->link_up
= mbx
.link_status
.link_up
;
229 nic
->duplex
= mbx
.link_status
.duplex
;
230 nic
->speed
= mbx
.link_status
.speed
;
232 netdev_info(nic
->netdev
, "%s: Link is Up %d Mbps %s\n",
233 nic
->netdev
->name
, nic
->speed
,
234 nic
->duplex
== DUPLEX_FULL
?
235 "Full duplex" : "Half duplex");
236 netif_carrier_on(nic
->netdev
);
237 netif_tx_start_all_queues(nic
->netdev
);
239 netdev_info(nic
->netdev
, "%s: Link is Down\n",
241 netif_carrier_off(nic
->netdev
);
242 netif_tx_stop_all_queues(nic
->netdev
);
246 netdev_err(nic
->netdev
,
247 "Invalid message from PF, msg 0x%x\n", mbx
.msg
.msg
);
250 nicvf_clear_intr(nic
, NICVF_INTR_MBOX
, 0);
253 static int nicvf_hw_set_mac_addr(struct nicvf
*nic
, struct net_device
*netdev
)
255 union nic_mbx mbx
= {};
257 mbx
.mac
.msg
= NIC_MBOX_MSG_SET_MAC
;
258 mbx
.mac
.vf_id
= nic
->vf_id
;
259 ether_addr_copy(mbx
.mac
.mac_addr
, netdev
->dev_addr
);
261 return nicvf_send_msg_to_pf(nic
, &mbx
);
264 static void nicvf_config_cpi(struct nicvf
*nic
)
266 union nic_mbx mbx
= {};
268 mbx
.cpi_cfg
.msg
= NIC_MBOX_MSG_CPI_CFG
;
269 mbx
.cpi_cfg
.vf_id
= nic
->vf_id
;
270 mbx
.cpi_cfg
.cpi_alg
= nic
->cpi_alg
;
271 mbx
.cpi_cfg
.rq_cnt
= nic
->qs
->rq_cnt
;
273 nicvf_send_msg_to_pf(nic
, &mbx
);
276 static void nicvf_get_rss_size(struct nicvf
*nic
)
278 union nic_mbx mbx
= {};
280 mbx
.rss_size
.msg
= NIC_MBOX_MSG_RSS_SIZE
;
281 mbx
.rss_size
.vf_id
= nic
->vf_id
;
282 nicvf_send_msg_to_pf(nic
, &mbx
);
285 void nicvf_config_rss(struct nicvf
*nic
)
287 union nic_mbx mbx
= {};
288 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
289 int ind_tbl_len
= rss
->rss_size
;
292 mbx
.rss_cfg
.vf_id
= nic
->vf_id
;
293 mbx
.rss_cfg
.hash_bits
= rss
->hash_bits
;
294 while (ind_tbl_len
) {
295 mbx
.rss_cfg
.tbl_offset
= nextq
;
296 mbx
.rss_cfg
.tbl_len
= min(ind_tbl_len
,
297 RSS_IND_TBL_LEN_PER_MBX_MSG
);
298 mbx
.rss_cfg
.msg
= mbx
.rss_cfg
.tbl_offset
?
299 NIC_MBOX_MSG_RSS_CFG_CONT
: NIC_MBOX_MSG_RSS_CFG
;
301 for (i
= 0; i
< mbx
.rss_cfg
.tbl_len
; i
++)
302 mbx
.rss_cfg
.ind_tbl
[i
] = rss
->ind_tbl
[nextq
++];
304 nicvf_send_msg_to_pf(nic
, &mbx
);
306 ind_tbl_len
-= mbx
.rss_cfg
.tbl_len
;
310 void nicvf_set_rss_key(struct nicvf
*nic
)
312 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
313 u64 key_addr
= NIC_VNIC_RSS_KEY_0_4
;
316 for (idx
= 0; idx
< RSS_HASH_KEY_SIZE
; idx
++) {
317 nicvf_reg_write(nic
, key_addr
, rss
->key
[idx
]);
318 key_addr
+= sizeof(u64
);
322 static int nicvf_rss_init(struct nicvf
*nic
)
324 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
327 nicvf_get_rss_size(nic
);
329 if ((nic
->qs
->rq_cnt
<= 1) || (cpi_alg
!= CPI_ALG_NONE
)) {
337 /* Using the HW reset value for now */
338 rss
->key
[0] = 0xFEED0BADFEED0BADULL
;
339 rss
->key
[1] = 0xFEED0BADFEED0BADULL
;
340 rss
->key
[2] = 0xFEED0BADFEED0BADULL
;
341 rss
->key
[3] = 0xFEED0BADFEED0BADULL
;
342 rss
->key
[4] = 0xFEED0BADFEED0BADULL
;
344 nicvf_set_rss_key(nic
);
346 rss
->cfg
= RSS_IP_HASH_ENA
| RSS_TCP_HASH_ENA
| RSS_UDP_HASH_ENA
;
347 nicvf_reg_write(nic
, NIC_VNIC_RSS_CFG
, rss
->cfg
);
349 rss
->hash_bits
= ilog2(rounddown_pow_of_two(rss
->rss_size
));
351 for (idx
= 0; idx
< rss
->rss_size
; idx
++)
352 rss
->ind_tbl
[idx
] = ethtool_rxfh_indir_default(idx
,
354 nicvf_config_rss(nic
);
358 int nicvf_set_real_num_queues(struct net_device
*netdev
,
359 int tx_queues
, int rx_queues
)
363 err
= netif_set_real_num_tx_queues(netdev
, tx_queues
);
366 "Failed to set no of Tx queues: %d\n", tx_queues
);
370 err
= netif_set_real_num_rx_queues(netdev
, rx_queues
);
373 "Failed to set no of Rx queues: %d\n", rx_queues
);
377 static int nicvf_init_resources(struct nicvf
*nic
)
380 union nic_mbx mbx
= {};
382 mbx
.msg
.msg
= NIC_MBOX_MSG_CFG_DONE
;
385 nicvf_qset_config(nic
, true);
387 /* Initialize queues and HW for data transfer */
388 err
= nicvf_config_data_transfer(nic
, true);
390 netdev_err(nic
->netdev
,
391 "Failed to alloc/config VF's QSet resources\n");
395 /* Send VF config done msg to PF */
396 nicvf_write_to_mbx(nic
, &mbx
);
401 static void nicvf_snd_pkt_handler(struct net_device
*netdev
,
402 struct cmp_queue
*cq
,
403 struct cqe_send_t
*cqe_tx
, int cqe_type
)
405 struct sk_buff
*skb
= NULL
;
406 struct nicvf
*nic
= netdev_priv(netdev
);
407 struct snd_queue
*sq
;
408 struct sq_hdr_subdesc
*hdr
;
410 sq
= &nic
->qs
->sq
[cqe_tx
->sq_idx
];
412 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, cqe_tx
->sqe_ptr
);
413 if (hdr
->subdesc_type
!= SQ_DESC_TYPE_HEADER
)
416 netdev_dbg(nic
->netdev
,
417 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
418 __func__
, cqe_tx
->sq_qs
, cqe_tx
->sq_idx
,
419 cqe_tx
->sqe_ptr
, hdr
->subdesc_cnt
);
421 nicvf_put_sq_desc(sq
, hdr
->subdesc_cnt
+ 1);
422 nicvf_check_cqe_tx_errs(nic
, cq
, cqe_tx
);
423 skb
= (struct sk_buff
*)sq
->skbuff
[cqe_tx
->sqe_ptr
];
424 /* For TSO offloaded packets only one head SKB needs to be freed */
427 dev_consume_skb_any(skb
);
428 sq
->skbuff
[cqe_tx
->sqe_ptr
] = (u64
)NULL
;
432 static void nicvf_rcv_pkt_handler(struct net_device
*netdev
,
433 struct napi_struct
*napi
,
434 struct cmp_queue
*cq
,
435 struct cqe_rx_t
*cqe_rx
, int cqe_type
)
438 struct nicvf
*nic
= netdev_priv(netdev
);
441 /* Check for errors */
442 err
= nicvf_check_cqe_rx_errs(nic
, cq
, cqe_rx
);
443 if (err
&& !cqe_rx
->rb_cnt
)
446 skb
= nicvf_get_rcv_skb(nic
, cqe_rx
);
448 netdev_dbg(nic
->netdev
, "Packet not received\n");
452 if (netif_msg_pktdata(nic
)) {
453 netdev_info(nic
->netdev
, "%s: skb 0x%p, len=%d\n", netdev
->name
,
455 print_hex_dump(KERN_INFO
, "", DUMP_PREFIX_OFFSET
, 16, 1,
456 skb
->data
, skb
->len
, true);
459 nicvf_set_rx_frame_cnt(nic
, skb
);
461 skb_record_rx_queue(skb
, cqe_rx
->rq_idx
);
462 if (netdev
->hw_features
& NETIF_F_RXCSUM
) {
463 /* HW by default verifies TCP/UDP/SCTP checksums */
464 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
466 skb_checksum_none_assert(skb
);
469 skb
->protocol
= eth_type_trans(skb
, netdev
);
471 if (napi
&& (netdev
->features
& NETIF_F_GRO
))
472 napi_gro_receive(napi
, skb
);
474 netif_receive_skb(skb
);
477 static int nicvf_cq_intr_handler(struct net_device
*netdev
, u8 cq_idx
,
478 struct napi_struct
*napi
, int budget
)
480 int processed_cqe
, work_done
= 0, tx_done
= 0;
481 int cqe_count
, cqe_head
;
482 struct nicvf
*nic
= netdev_priv(netdev
);
483 struct queue_set
*qs
= nic
->qs
;
484 struct cmp_queue
*cq
= &qs
->cq
[cq_idx
];
485 struct cqe_rx_t
*cq_desc
;
486 struct netdev_queue
*txq
;
488 spin_lock_bh(&cq
->lock
);
491 /* Get no of valid CQ entries to process */
492 cqe_count
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS
, cq_idx
);
493 cqe_count
&= CQ_CQE_COUNT
;
497 /* Get head of the valid CQ entries */
498 cqe_head
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_HEAD
, cq_idx
) >> 9;
501 netdev_dbg(nic
->netdev
, "%s CQ%d cqe_count %d cqe_head %d\n",
502 __func__
, cq_idx
, cqe_count
, cqe_head
);
503 while (processed_cqe
< cqe_count
) {
504 /* Get the CQ descriptor */
505 cq_desc
= (struct cqe_rx_t
*)GET_CQ_DESC(cq
, cqe_head
);
507 cqe_head
&= (cq
->dmem
.q_len
- 1);
508 /* Initiate prefetch for next descriptor */
509 prefetch((struct cqe_rx_t
*)GET_CQ_DESC(cq
, cqe_head
));
511 if ((work_done
>= budget
) && napi
&&
512 (cq_desc
->cqe_type
!= CQE_TYPE_SEND
)) {
516 netdev_dbg(nic
->netdev
, "CQ%d cq_desc->cqe_type %d\n",
517 cq_idx
, cq_desc
->cqe_type
);
518 switch (cq_desc
->cqe_type
) {
520 nicvf_rcv_pkt_handler(netdev
, napi
, cq
,
521 cq_desc
, CQE_TYPE_RX
);
525 nicvf_snd_pkt_handler(netdev
, cq
,
526 (void *)cq_desc
, CQE_TYPE_SEND
);
529 case CQE_TYPE_INVALID
:
530 case CQE_TYPE_RX_SPLIT
:
531 case CQE_TYPE_RX_TCP
:
532 case CQE_TYPE_SEND_PTP
:
538 netdev_dbg(nic
->netdev
,
539 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
540 __func__
, cq_idx
, processed_cqe
, work_done
, budget
);
542 /* Ring doorbell to inform H/W to reuse processed CQEs */
543 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_DOOR
,
544 cq_idx
, processed_cqe
);
546 if ((work_done
< budget
) && napi
)
550 /* Wakeup TXQ if its stopped earlier due to SQ full */
552 txq
= netdev_get_tx_queue(netdev
, cq_idx
);
553 if (netif_tx_queue_stopped(txq
)) {
554 netif_tx_start_queue(txq
);
555 nic
->drv_stats
.txq_wake
++;
556 if (netif_msg_tx_err(nic
))
558 "%s: Transmit queue wakeup SQ%d\n",
559 netdev
->name
, cq_idx
);
563 spin_unlock_bh(&cq
->lock
);
567 static int nicvf_poll(struct napi_struct
*napi
, int budget
)
571 struct net_device
*netdev
= napi
->dev
;
572 struct nicvf
*nic
= netdev_priv(netdev
);
573 struct nicvf_cq_poll
*cq
;
575 cq
= container_of(napi
, struct nicvf_cq_poll
, napi
);
576 work_done
= nicvf_cq_intr_handler(netdev
, cq
->cq_idx
, napi
, budget
);
578 if (work_done
< budget
) {
579 /* Slow packet rate, exit polling */
581 /* Re-enable interrupts */
582 cq_head
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_HEAD
,
584 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, cq
->cq_idx
);
585 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_HEAD
,
586 cq
->cq_idx
, cq_head
);
587 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, cq
->cq_idx
);
592 /* Qset error interrupt handler
594 * As of now only CQ errors are handled
596 static void nicvf_handle_qs_err(unsigned long data
)
598 struct nicvf
*nic
= (struct nicvf
*)data
;
599 struct queue_set
*qs
= nic
->qs
;
603 netif_tx_disable(nic
->netdev
);
605 /* Check if it is CQ err */
606 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
607 status
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS
,
609 if (!(status
& CQ_ERR_MASK
))
611 /* Process already queued CQEs and reconfig CQ */
612 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
613 nicvf_sq_disable(nic
, qidx
);
614 nicvf_cq_intr_handler(nic
->netdev
, qidx
, NULL
, 0);
615 nicvf_cmp_queue_config(nic
, qs
, qidx
, true);
616 nicvf_sq_free_used_descs(nic
->netdev
, &qs
->sq
[qidx
], qidx
);
617 nicvf_sq_enable(nic
, &qs
->sq
[qidx
], qidx
);
619 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, qidx
);
622 netif_tx_start_all_queues(nic
->netdev
);
623 /* Re-enable Qset error interrupt */
624 nicvf_enable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
627 static irqreturn_t
nicvf_misc_intr_handler(int irq
, void *nicvf_irq
)
629 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
632 intr
= nicvf_reg_read(nic
, NIC_VF_INT
);
633 /* Check for spurious interrupt */
634 if (!(intr
& NICVF_INTR_MBOX_MASK
))
637 nicvf_handle_mbx_intr(nic
);
642 static irqreturn_t
nicvf_intr_handler(int irq
, void *nicvf_irq
)
644 u64 qidx
, intr
, clear_intr
= 0;
645 u64 cq_intr
, rbdr_intr
, qs_err_intr
;
646 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
647 struct queue_set
*qs
= nic
->qs
;
648 struct nicvf_cq_poll
*cq_poll
= NULL
;
650 intr
= nicvf_reg_read(nic
, NIC_VF_INT
);
651 if (netif_msg_intr(nic
))
652 netdev_info(nic
->netdev
, "%s: interrupt status 0x%llx\n",
653 nic
->netdev
->name
, intr
);
655 qs_err_intr
= intr
& NICVF_INTR_QS_ERR_MASK
;
657 /* Disable Qset err interrupt and schedule softirq */
658 nicvf_disable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
659 tasklet_hi_schedule(&nic
->qs_err_task
);
660 clear_intr
|= qs_err_intr
;
663 /* Disable interrupts and start polling */
664 cq_intr
= (intr
& NICVF_INTR_CQ_MASK
) >> NICVF_INTR_CQ_SHIFT
;
665 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
666 if (!(cq_intr
& (1 << qidx
)))
668 if (!nicvf_is_intr_enabled(nic
, NICVF_INTR_CQ
, qidx
))
671 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
672 clear_intr
|= ((1 << qidx
) << NICVF_INTR_CQ_SHIFT
);
674 cq_poll
= nic
->napi
[qidx
];
677 napi_schedule(&cq_poll
->napi
);
680 /* Handle RBDR interrupts */
681 rbdr_intr
= (intr
& NICVF_INTR_RBDR_MASK
) >> NICVF_INTR_RBDR_SHIFT
;
683 /* Disable RBDR interrupt and schedule softirq */
684 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++) {
685 if (!nicvf_is_intr_enabled(nic
, NICVF_INTR_RBDR
, qidx
))
687 nicvf_disable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
688 tasklet_hi_schedule(&nic
->rbdr_task
);
689 clear_intr
|= ((1 << qidx
) << NICVF_INTR_RBDR_SHIFT
);
693 /* Clear interrupts */
694 nicvf_reg_write(nic
, NIC_VF_INT
, clear_intr
);
698 static int nicvf_enable_msix(struct nicvf
*nic
)
702 nic
->num_vec
= NIC_VF_MSIX_VECTORS
;
704 for (vec
= 0; vec
< nic
->num_vec
; vec
++)
705 nic
->msix_entries
[vec
].entry
= vec
;
707 ret
= pci_enable_msix(nic
->pdev
, nic
->msix_entries
, nic
->num_vec
);
709 netdev_err(nic
->netdev
,
710 "Req for #%d msix vectors failed\n", nic
->num_vec
);
713 nic
->msix_enabled
= 1;
717 static void nicvf_disable_msix(struct nicvf
*nic
)
719 if (nic
->msix_enabled
) {
720 pci_disable_msix(nic
->pdev
);
721 nic
->msix_enabled
= 0;
726 static int nicvf_register_interrupts(struct nicvf
*nic
)
728 int irq
, free
, ret
= 0;
732 sprintf(nic
->irq_name
[irq
], "NICVF%d CQ%d",
736 sprintf(nic
->irq_name
[irq
], "NICVF%d SQ%d",
737 nic
->vf_id
, irq
- NICVF_INTR_ID_SQ
);
739 for_each_rbdr_irq(irq
)
740 sprintf(nic
->irq_name
[irq
], "NICVF%d RBDR%d",
741 nic
->vf_id
, irq
- NICVF_INTR_ID_RBDR
);
743 /* Register all interrupts except mailbox */
744 for (irq
= 0; irq
< NICVF_INTR_ID_SQ
; irq
++) {
745 vector
= nic
->msix_entries
[irq
].vector
;
746 ret
= request_irq(vector
, nicvf_intr_handler
,
747 0, nic
->irq_name
[irq
], nic
);
750 nic
->irq_allocated
[irq
] = true;
753 for (irq
= NICVF_INTR_ID_SQ
; irq
< NICVF_INTR_ID_MISC
; irq
++) {
754 vector
= nic
->msix_entries
[irq
].vector
;
755 ret
= request_irq(vector
, nicvf_intr_handler
,
756 0, nic
->irq_name
[irq
], nic
);
759 nic
->irq_allocated
[irq
] = true;
762 sprintf(nic
->irq_name
[NICVF_INTR_ID_QS_ERR
],
763 "NICVF%d Qset error", nic
->vf_id
);
765 vector
= nic
->msix_entries
[NICVF_INTR_ID_QS_ERR
].vector
;
766 irq
= NICVF_INTR_ID_QS_ERR
;
767 ret
= request_irq(vector
, nicvf_intr_handler
,
768 0, nic
->irq_name
[irq
], nic
);
770 nic
->irq_allocated
[irq
] = true;
774 netdev_err(nic
->netdev
, "Request irq failed\n");
775 for (free
= 0; free
< irq
; free
++)
776 free_irq(nic
->msix_entries
[free
].vector
, nic
);
783 static void nicvf_unregister_interrupts(struct nicvf
*nic
)
787 /* Free registered interrupts */
788 for (irq
= 0; irq
< nic
->num_vec
; irq
++) {
789 if (nic
->irq_allocated
[irq
])
790 free_irq(nic
->msix_entries
[irq
].vector
, nic
);
791 nic
->irq_allocated
[irq
] = false;
795 nicvf_disable_msix(nic
);
798 /* Initialize MSIX vectors and register MISC interrupt.
799 * Send READY message to PF to check if its alive
801 static int nicvf_register_misc_interrupt(struct nicvf
*nic
)
804 int irq
= NICVF_INTR_ID_MISC
;
806 /* Return if mailbox interrupt is already registered */
807 if (nic
->msix_enabled
)
811 if (!nicvf_enable_msix(nic
))
814 sprintf(nic
->irq_name
[irq
], "%s Mbox", "NICVF");
815 /* Register Misc interrupt */
816 ret
= request_irq(nic
->msix_entries
[irq
].vector
,
817 nicvf_misc_intr_handler
, 0, nic
->irq_name
[irq
], nic
);
821 nic
->irq_allocated
[irq
] = true;
823 /* Enable mailbox interrupt */
824 nicvf_enable_intr(nic
, NICVF_INTR_MBOX
, 0);
826 /* Check if VF is able to communicate with PF */
827 if (!nicvf_check_pf_ready(nic
)) {
828 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
829 nicvf_unregister_interrupts(nic
);
836 static netdev_tx_t
nicvf_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
838 struct nicvf
*nic
= netdev_priv(netdev
);
839 int qid
= skb_get_queue_mapping(skb
);
840 struct netdev_queue
*txq
= netdev_get_tx_queue(netdev
, qid
);
842 /* Check for minimum packet length */
843 if (skb
->len
<= ETH_HLEN
) {
848 if (!netif_tx_queue_stopped(txq
) && !nicvf_sq_append_skb(nic
, skb
)) {
849 netif_tx_stop_queue(txq
);
850 nic
->drv_stats
.txq_stop
++;
851 if (netif_msg_tx_err(nic
))
853 "%s: Transmit ring full, stopping SQ%d\n",
856 return NETDEV_TX_BUSY
;
862 int nicvf_stop(struct net_device
*netdev
)
865 struct nicvf
*nic
= netdev_priv(netdev
);
866 struct queue_set
*qs
= nic
->qs
;
867 struct nicvf_cq_poll
*cq_poll
= NULL
;
868 union nic_mbx mbx
= {};
870 mbx
.msg
.msg
= NIC_MBOX_MSG_SHUTDOWN
;
871 nicvf_send_msg_to_pf(nic
, &mbx
);
873 netif_carrier_off(netdev
);
875 /* Disable RBDR & QS error interrupts */
876 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++) {
877 nicvf_disable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
878 nicvf_clear_intr(nic
, NICVF_INTR_RBDR
, qidx
);
880 nicvf_disable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
881 nicvf_clear_intr(nic
, NICVF_INTR_QS_ERR
, 0);
883 /* Wait for pending IRQ handlers to finish */
884 for (irq
= 0; irq
< nic
->num_vec
; irq
++)
885 synchronize_irq(nic
->msix_entries
[irq
].vector
);
887 tasklet_kill(&nic
->rbdr_task
);
888 tasklet_kill(&nic
->qs_err_task
);
889 if (nic
->rb_work_scheduled
)
890 cancel_delayed_work_sync(&nic
->rbdr_work
);
892 for (qidx
= 0; qidx
< nic
->qs
->cq_cnt
; qidx
++) {
893 cq_poll
= nic
->napi
[qidx
];
896 nic
->napi
[qidx
] = NULL
;
897 napi_synchronize(&cq_poll
->napi
);
898 /* CQ intr is enabled while napi_complete,
901 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
902 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, qidx
);
903 napi_disable(&cq_poll
->napi
);
904 netif_napi_del(&cq_poll
->napi
);
908 netif_tx_disable(netdev
);
911 nicvf_config_data_transfer(nic
, false);
913 /* Disable HW Qset */
914 nicvf_qset_config(nic
, false);
916 /* disable mailbox interrupt */
917 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
919 nicvf_unregister_interrupts(nic
);
924 int nicvf_open(struct net_device
*netdev
)
927 struct nicvf
*nic
= netdev_priv(netdev
);
928 struct queue_set
*qs
= nic
->qs
;
929 struct nicvf_cq_poll
*cq_poll
= NULL
;
931 nic
->mtu
= netdev
->mtu
;
933 netif_carrier_off(netdev
);
935 err
= nicvf_register_misc_interrupt(nic
);
939 /* Register NAPI handler for processing CQEs */
940 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
941 cq_poll
= kzalloc(sizeof(*cq_poll
), GFP_KERNEL
);
946 cq_poll
->cq_idx
= qidx
;
947 netif_napi_add(netdev
, &cq_poll
->napi
, nicvf_poll
,
949 napi_enable(&cq_poll
->napi
);
950 nic
->napi
[qidx
] = cq_poll
;
953 /* Check if we got MAC address from PF or else generate a radom MAC */
954 if (is_zero_ether_addr(netdev
->dev_addr
)) {
955 eth_hw_addr_random(netdev
);
956 nicvf_hw_set_mac_addr(nic
, netdev
);
959 if (nic
->set_mac_pending
) {
960 nic
->set_mac_pending
= false;
961 nicvf_hw_set_mac_addr(nic
, netdev
);
964 /* Init tasklet for handling Qset err interrupt */
965 tasklet_init(&nic
->qs_err_task
, nicvf_handle_qs_err
,
968 /* Init RBDR tasklet which will refill RBDR */
969 tasklet_init(&nic
->rbdr_task
, nicvf_rbdr_task
,
971 INIT_DELAYED_WORK(&nic
->rbdr_work
, nicvf_rbdr_work
);
973 /* Configure CPI alorithm */
974 nic
->cpi_alg
= cpi_alg
;
975 nicvf_config_cpi(nic
);
977 /* Configure receive side scaling */
980 err
= nicvf_register_interrupts(nic
);
984 /* Initialize the queues */
985 err
= nicvf_init_resources(nic
);
989 /* Make sure queue initialization is written */
992 nicvf_reg_write(nic
, NIC_VF_INT
, -1);
993 /* Enable Qset err interrupt */
994 nicvf_enable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
996 /* Enable completion queue interrupt */
997 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
998 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, qidx
);
1000 /* Enable RBDR threshold interrupt */
1001 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
1002 nicvf_enable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
1004 nic
->drv_stats
.txq_stop
= 0;
1005 nic
->drv_stats
.txq_wake
= 0;
1007 netif_carrier_on(netdev
);
1008 netif_tx_start_all_queues(netdev
);
1012 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
1013 nicvf_unregister_interrupts(nic
);
1015 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
1016 cq_poll
= nic
->napi
[qidx
];
1019 napi_disable(&cq_poll
->napi
);
1020 netif_napi_del(&cq_poll
->napi
);
1022 nic
->napi
[qidx
] = NULL
;
1027 static int nicvf_update_hw_max_frs(struct nicvf
*nic
, int mtu
)
1029 union nic_mbx mbx
= {};
1031 mbx
.frs
.msg
= NIC_MBOX_MSG_SET_MAX_FRS
;
1032 mbx
.frs
.max_frs
= mtu
;
1033 mbx
.frs
.vf_id
= nic
->vf_id
;
1035 return nicvf_send_msg_to_pf(nic
, &mbx
);
1038 static int nicvf_change_mtu(struct net_device
*netdev
, int new_mtu
)
1040 struct nicvf
*nic
= netdev_priv(netdev
);
1042 if (new_mtu
> NIC_HW_MAX_FRS
)
1045 if (new_mtu
< NIC_HW_MIN_FRS
)
1048 if (nicvf_update_hw_max_frs(nic
, new_mtu
))
1050 netdev
->mtu
= new_mtu
;
1056 static int nicvf_set_mac_address(struct net_device
*netdev
, void *p
)
1058 struct sockaddr
*addr
= p
;
1059 struct nicvf
*nic
= netdev_priv(netdev
);
1061 if (!is_valid_ether_addr(addr
->sa_data
))
1062 return -EADDRNOTAVAIL
;
1064 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1066 if (nic
->msix_enabled
) {
1067 if (nicvf_hw_set_mac_addr(nic
, netdev
))
1070 nic
->set_mac_pending
= true;
1076 void nicvf_update_lmac_stats(struct nicvf
*nic
)
1079 union nic_mbx mbx
= {};
1082 if (!netif_running(nic
->netdev
))
1085 mbx
.bgx_stats
.msg
= NIC_MBOX_MSG_BGX_STATS
;
1086 mbx
.bgx_stats
.vf_id
= nic
->vf_id
;
1088 mbx
.bgx_stats
.rx
= 1;
1089 while (stat
< BGX_RX_STATS_COUNT
) {
1090 nic
->bgx_stats_acked
= 0;
1091 mbx
.bgx_stats
.idx
= stat
;
1092 nicvf_send_msg_to_pf(nic
, &mbx
);
1094 while ((!nic
->bgx_stats_acked
) && (timeout
< 10)) {
1104 mbx
.bgx_stats
.rx
= 0;
1105 while (stat
< BGX_TX_STATS_COUNT
) {
1106 nic
->bgx_stats_acked
= 0;
1107 mbx
.bgx_stats
.idx
= stat
;
1108 nicvf_send_msg_to_pf(nic
, &mbx
);
1110 while ((!nic
->bgx_stats_acked
) && (timeout
< 10)) {
1118 void nicvf_update_stats(struct nicvf
*nic
)
1121 struct nicvf_hw_stats
*stats
= &nic
->stats
;
1122 struct nicvf_drv_stats
*drv_stats
= &nic
->drv_stats
;
1123 struct queue_set
*qs
= nic
->qs
;
1125 #define GET_RX_STATS(reg) \
1126 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1127 #define GET_TX_STATS(reg) \
1128 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1130 stats
->rx_bytes_ok
= GET_RX_STATS(RX_OCTS
);
1131 stats
->rx_ucast_frames_ok
= GET_RX_STATS(RX_UCAST
);
1132 stats
->rx_bcast_frames_ok
= GET_RX_STATS(RX_BCAST
);
1133 stats
->rx_mcast_frames_ok
= GET_RX_STATS(RX_MCAST
);
1134 stats
->rx_fcs_errors
= GET_RX_STATS(RX_FCS
);
1135 stats
->rx_l2_errors
= GET_RX_STATS(RX_L2ERR
);
1136 stats
->rx_drop_red
= GET_RX_STATS(RX_RED
);
1137 stats
->rx_drop_overrun
= GET_RX_STATS(RX_ORUN
);
1138 stats
->rx_drop_bcast
= GET_RX_STATS(RX_DRP_BCAST
);
1139 stats
->rx_drop_mcast
= GET_RX_STATS(RX_DRP_MCAST
);
1140 stats
->rx_drop_l3_bcast
= GET_RX_STATS(RX_DRP_L3BCAST
);
1141 stats
->rx_drop_l3_mcast
= GET_RX_STATS(RX_DRP_L3MCAST
);
1143 stats
->tx_bytes_ok
= GET_TX_STATS(TX_OCTS
);
1144 stats
->tx_ucast_frames_ok
= GET_TX_STATS(TX_UCAST
);
1145 stats
->tx_bcast_frames_ok
= GET_TX_STATS(TX_BCAST
);
1146 stats
->tx_mcast_frames_ok
= GET_TX_STATS(TX_MCAST
);
1147 stats
->tx_drops
= GET_TX_STATS(TX_DROP
);
1149 drv_stats
->rx_frames_ok
= stats
->rx_ucast_frames_ok
+
1150 stats
->rx_bcast_frames_ok
+
1151 stats
->rx_mcast_frames_ok
;
1152 drv_stats
->tx_frames_ok
= stats
->tx_ucast_frames_ok
+
1153 stats
->tx_bcast_frames_ok
+
1154 stats
->tx_mcast_frames_ok
;
1155 drv_stats
->rx_drops
= stats
->rx_drop_red
+
1156 stats
->rx_drop_overrun
;
1157 drv_stats
->tx_drops
= stats
->tx_drops
;
1159 /* Update RQ and SQ stats */
1160 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
1161 nicvf_update_rq_stats(nic
, qidx
);
1162 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
1163 nicvf_update_sq_stats(nic
, qidx
);
1166 static struct rtnl_link_stats64
*nicvf_get_stats64(struct net_device
*netdev
,
1167 struct rtnl_link_stats64
*stats
)
1169 struct nicvf
*nic
= netdev_priv(netdev
);
1170 struct nicvf_hw_stats
*hw_stats
= &nic
->stats
;
1171 struct nicvf_drv_stats
*drv_stats
= &nic
->drv_stats
;
1173 nicvf_update_stats(nic
);
1175 stats
->rx_bytes
= hw_stats
->rx_bytes_ok
;
1176 stats
->rx_packets
= drv_stats
->rx_frames_ok
;
1177 stats
->rx_dropped
= drv_stats
->rx_drops
;
1179 stats
->tx_bytes
= hw_stats
->tx_bytes_ok
;
1180 stats
->tx_packets
= drv_stats
->tx_frames_ok
;
1181 stats
->tx_dropped
= drv_stats
->tx_drops
;
1186 static void nicvf_tx_timeout(struct net_device
*dev
)
1188 struct nicvf
*nic
= netdev_priv(dev
);
1190 if (netif_msg_tx_err(nic
))
1191 netdev_warn(dev
, "%s: Transmit timed out, resetting\n",
1194 schedule_work(&nic
->reset_task
);
1197 static void nicvf_reset_task(struct work_struct
*work
)
1201 nic
= container_of(work
, struct nicvf
, reset_task
);
1203 if (!netif_running(nic
->netdev
))
1206 nicvf_stop(nic
->netdev
);
1207 nicvf_open(nic
->netdev
);
1208 nic
->netdev
->trans_start
= jiffies
;
1211 static const struct net_device_ops nicvf_netdev_ops
= {
1212 .ndo_open
= nicvf_open
,
1213 .ndo_stop
= nicvf_stop
,
1214 .ndo_start_xmit
= nicvf_xmit
,
1215 .ndo_change_mtu
= nicvf_change_mtu
,
1216 .ndo_set_mac_address
= nicvf_set_mac_address
,
1217 .ndo_get_stats64
= nicvf_get_stats64
,
1218 .ndo_tx_timeout
= nicvf_tx_timeout
,
1221 static int nicvf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1223 struct device
*dev
= &pdev
->dev
;
1224 struct net_device
*netdev
;
1226 struct queue_set
*qs
;
1229 err
= pci_enable_device(pdev
);
1231 dev_err(dev
, "Failed to enable PCI device\n");
1235 err
= pci_request_regions(pdev
, DRV_NAME
);
1237 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
1238 goto err_disable_device
;
1241 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(48));
1243 dev_err(dev
, "Unable to get usable DMA configuration\n");
1244 goto err_release_regions
;
1247 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(48));
1249 dev_err(dev
, "unable to get 48-bit DMA for consistent allocations\n");
1250 goto err_release_regions
;
1253 netdev
= alloc_etherdev_mqs(sizeof(struct nicvf
),
1254 MAX_RCV_QUEUES_PER_QS
,
1255 MAX_SND_QUEUES_PER_QS
);
1258 goto err_release_regions
;
1261 pci_set_drvdata(pdev
, netdev
);
1263 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1265 nic
= netdev_priv(netdev
);
1266 nic
->netdev
= netdev
;
1269 /* MAP VF's configuration registers */
1270 nic
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
1271 if (!nic
->reg_base
) {
1272 dev_err(dev
, "Cannot map config register space, aborting\n");
1274 goto err_free_netdev
;
1277 err
= nicvf_set_qset_resources(nic
);
1279 goto err_free_netdev
;
1283 err
= nicvf_set_real_num_queues(netdev
, qs
->sq_cnt
, qs
->rq_cnt
);
1285 goto err_free_netdev
;
1287 /* Check if PF is alive and get MAC address for this VF */
1288 err
= nicvf_register_misc_interrupt(nic
);
1290 goto err_free_netdev
;
1292 netdev
->features
|= (NETIF_F_RXCSUM
| NETIF_F_IP_CSUM
| NETIF_F_SG
|
1293 NETIF_F_TSO
| NETIF_F_GRO
);
1294 netdev
->hw_features
= netdev
->features
;
1296 netdev
->netdev_ops
= &nicvf_netdev_ops
;
1297 netdev
->watchdog_timeo
= NICVF_TX_TIMEOUT
;
1299 INIT_WORK(&nic
->reset_task
, nicvf_reset_task
);
1301 err
= register_netdev(netdev
);
1303 dev_err(dev
, "Failed to register netdevice\n");
1304 goto err_unregister_interrupts
;
1307 nic
->msg_enable
= debug
;
1309 nicvf_set_ethtool_ops(netdev
);
1313 err_unregister_interrupts
:
1314 nicvf_unregister_interrupts(nic
);
1316 pci_set_drvdata(pdev
, NULL
);
1317 free_netdev(netdev
);
1318 err_release_regions
:
1319 pci_release_regions(pdev
);
1321 pci_disable_device(pdev
);
1325 static void nicvf_remove(struct pci_dev
*pdev
)
1327 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1328 struct nicvf
*nic
= netdev_priv(netdev
);
1330 unregister_netdev(netdev
);
1331 nicvf_unregister_interrupts(nic
);
1332 pci_set_drvdata(pdev
, NULL
);
1333 free_netdev(netdev
);
1334 pci_release_regions(pdev
);
1335 pci_disable_device(pdev
);
1338 static void nicvf_shutdown(struct pci_dev
*pdev
)
1343 static struct pci_driver nicvf_driver
= {
1345 .id_table
= nicvf_id_table
,
1346 .probe
= nicvf_probe
,
1347 .remove
= nicvf_remove
,
1348 .shutdown
= nicvf_shutdown
,
1351 static int __init
nicvf_init_module(void)
1353 pr_info("%s, ver %s\n", DRV_NAME
, DRV_VERSION
);
1355 return pci_register_driver(&nicvf_driver
);
1358 static void __exit
nicvf_cleanup_module(void)
1360 pci_unregister_driver(&nicvf_driver
);
1363 module_init(nicvf_init_module
);
1364 module_exit(nicvf_cleanup_module
);