2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/log2.h>
16 #include <linux/prefetch.h>
17 #include <linux/irq.h>
21 #include "nicvf_queues.h"
22 #include "thunder_bgx.h"
24 #define DRV_NAME "thunder-nicvf"
25 #define DRV_VERSION "1.0"
27 /* Supported devices */
28 static const struct pci_device_id nicvf_id_table
[] = {
29 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
30 PCI_DEVICE_ID_THUNDER_NIC_VF
,
31 PCI_VENDOR_ID_CAVIUM
, 0xA11E) },
32 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
33 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF
,
34 PCI_VENDOR_ID_CAVIUM
, 0xA11E) },
35 { 0, } /* end of table */
38 MODULE_AUTHOR("Sunil Goutham");
39 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
40 MODULE_LICENSE("GPL v2");
41 MODULE_VERSION(DRV_VERSION
);
42 MODULE_DEVICE_TABLE(pci
, nicvf_id_table
);
44 static int debug
= 0x00;
45 module_param(debug
, int, 0644);
46 MODULE_PARM_DESC(debug
, "Debug message level bitmap");
48 static int cpi_alg
= CPI_ALG_NONE
;
49 module_param(cpi_alg
, int, S_IRUGO
);
50 MODULE_PARM_DESC(cpi_alg
,
51 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
53 static inline void nicvf_set_rx_frame_cnt(struct nicvf
*nic
,
57 nic
->drv_stats
.rx_frames_64
++;
58 else if (skb
->len
<= 127)
59 nic
->drv_stats
.rx_frames_127
++;
60 else if (skb
->len
<= 255)
61 nic
->drv_stats
.rx_frames_255
++;
62 else if (skb
->len
<= 511)
63 nic
->drv_stats
.rx_frames_511
++;
64 else if (skb
->len
<= 1023)
65 nic
->drv_stats
.rx_frames_1023
++;
66 else if (skb
->len
<= 1518)
67 nic
->drv_stats
.rx_frames_1518
++;
69 nic
->drv_stats
.rx_frames_jumbo
++;
72 /* The Cavium ThunderX network controller can *only* be found in SoCs
73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
74 * registers on this platform are implicitly strongly ordered with respect
75 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
76 * with no memory barriers in this driver. The readq()/writeq() functions add
77 * explicit ordering operation which in this case are redundant, and only
81 /* Register read/write APIs */
82 void nicvf_reg_write(struct nicvf
*nic
, u64 offset
, u64 val
)
84 writeq_relaxed(val
, nic
->reg_base
+ offset
);
87 u64
nicvf_reg_read(struct nicvf
*nic
, u64 offset
)
89 return readq_relaxed(nic
->reg_base
+ offset
);
92 void nicvf_queue_reg_write(struct nicvf
*nic
, u64 offset
,
95 void __iomem
*addr
= nic
->reg_base
+ offset
;
97 writeq_relaxed(val
, addr
+ (qidx
<< NIC_Q_NUM_SHIFT
));
100 u64
nicvf_queue_reg_read(struct nicvf
*nic
, u64 offset
, u64 qidx
)
102 void __iomem
*addr
= nic
->reg_base
+ offset
;
104 return readq_relaxed(addr
+ (qidx
<< NIC_Q_NUM_SHIFT
));
107 /* VF -> PF mailbox communication */
108 static void nicvf_write_to_mbx(struct nicvf
*nic
, union nic_mbx
*mbx
)
110 u64
*msg
= (u64
*)mbx
;
112 nicvf_reg_write(nic
, NIC_VF_PF_MAILBOX_0_1
+ 0, msg
[0]);
113 nicvf_reg_write(nic
, NIC_VF_PF_MAILBOX_0_1
+ 8, msg
[1]);
116 int nicvf_send_msg_to_pf(struct nicvf
*nic
, union nic_mbx
*mbx
)
118 int timeout
= NIC_MBOX_MSG_TIMEOUT
;
121 nic
->pf_acked
= false;
122 nic
->pf_nacked
= false;
124 nicvf_write_to_mbx(nic
, mbx
);
126 /* Wait for previous message to be acked, timeout 2sec */
127 while (!nic
->pf_acked
) {
135 netdev_err(nic
->netdev
,
136 "PF didn't ack to mbox msg %d from VF%d\n",
137 (mbx
->msg
.msg
& 0xFF), nic
->vf_id
);
144 /* Checks if VF is able to comminicate with PF
145 * and also gets the VNIC number this VF is associated to.
147 static int nicvf_check_pf_ready(struct nicvf
*nic
)
149 union nic_mbx mbx
= {};
151 mbx
.msg
.msg
= NIC_MBOX_MSG_READY
;
152 if (nicvf_send_msg_to_pf(nic
, &mbx
)) {
153 netdev_err(nic
->netdev
,
154 "PF didn't respond to READY msg\n");
161 static void nicvf_read_bgx_stats(struct nicvf
*nic
, struct bgx_stats_msg
*bgx
)
164 nic
->bgx_stats
.rx_stats
[bgx
->idx
] = bgx
->stats
;
166 nic
->bgx_stats
.tx_stats
[bgx
->idx
] = bgx
->stats
;
169 static void nicvf_handle_mbx_intr(struct nicvf
*nic
)
171 union nic_mbx mbx
= {};
176 mbx_addr
= NIC_VF_PF_MAILBOX_0_1
;
177 mbx_data
= (u64
*)&mbx
;
179 for (i
= 0; i
< NIC_PF_VF_MAILBOX_SIZE
; i
++) {
180 *mbx_data
= nicvf_reg_read(nic
, mbx_addr
);
182 mbx_addr
+= sizeof(u64
);
185 netdev_dbg(nic
->netdev
, "Mbox message: msg: 0x%x\n", mbx
.msg
.msg
);
186 switch (mbx
.msg
.msg
) {
187 case NIC_MBOX_MSG_READY
:
188 nic
->pf_acked
= true;
189 nic
->vf_id
= mbx
.nic_cfg
.vf_id
& 0x7F;
190 nic
->tns_mode
= mbx
.nic_cfg
.tns_mode
& 0x7F;
191 nic
->node
= mbx
.nic_cfg
.node_id
;
192 if (!nic
->set_mac_pending
)
193 ether_addr_copy(nic
->netdev
->dev_addr
,
194 mbx
.nic_cfg
.mac_addr
);
195 nic
->link_up
= false;
199 case NIC_MBOX_MSG_ACK
:
200 nic
->pf_acked
= true;
202 case NIC_MBOX_MSG_NACK
:
203 nic
->pf_nacked
= true;
205 case NIC_MBOX_MSG_RSS_SIZE
:
206 nic
->rss_info
.rss_size
= mbx
.rss_size
.ind_tbl_size
;
207 nic
->pf_acked
= true;
209 case NIC_MBOX_MSG_BGX_STATS
:
210 nicvf_read_bgx_stats(nic
, &mbx
.bgx_stats
);
211 nic
->pf_acked
= true;
213 case NIC_MBOX_MSG_BGX_LINK_CHANGE
:
214 nic
->pf_acked
= true;
215 nic
->link_up
= mbx
.link_status
.link_up
;
216 nic
->duplex
= mbx
.link_status
.duplex
;
217 nic
->speed
= mbx
.link_status
.speed
;
219 netdev_info(nic
->netdev
, "%s: Link is Up %d Mbps %s\n",
220 nic
->netdev
->name
, nic
->speed
,
221 nic
->duplex
== DUPLEX_FULL
?
222 "Full duplex" : "Half duplex");
223 netif_carrier_on(nic
->netdev
);
224 netif_tx_start_all_queues(nic
->netdev
);
226 netdev_info(nic
->netdev
, "%s: Link is Down\n",
228 netif_carrier_off(nic
->netdev
);
229 netif_tx_stop_all_queues(nic
->netdev
);
233 netdev_err(nic
->netdev
,
234 "Invalid message from PF, msg 0x%x\n", mbx
.msg
.msg
);
237 nicvf_clear_intr(nic
, NICVF_INTR_MBOX
, 0);
240 static int nicvf_hw_set_mac_addr(struct nicvf
*nic
, struct net_device
*netdev
)
242 union nic_mbx mbx
= {};
244 mbx
.mac
.msg
= NIC_MBOX_MSG_SET_MAC
;
245 mbx
.mac
.vf_id
= nic
->vf_id
;
246 ether_addr_copy(mbx
.mac
.mac_addr
, netdev
->dev_addr
);
248 return nicvf_send_msg_to_pf(nic
, &mbx
);
251 static void nicvf_config_cpi(struct nicvf
*nic
)
253 union nic_mbx mbx
= {};
255 mbx
.cpi_cfg
.msg
= NIC_MBOX_MSG_CPI_CFG
;
256 mbx
.cpi_cfg
.vf_id
= nic
->vf_id
;
257 mbx
.cpi_cfg
.cpi_alg
= nic
->cpi_alg
;
258 mbx
.cpi_cfg
.rq_cnt
= nic
->qs
->rq_cnt
;
260 nicvf_send_msg_to_pf(nic
, &mbx
);
263 static void nicvf_get_rss_size(struct nicvf
*nic
)
265 union nic_mbx mbx
= {};
267 mbx
.rss_size
.msg
= NIC_MBOX_MSG_RSS_SIZE
;
268 mbx
.rss_size
.vf_id
= nic
->vf_id
;
269 nicvf_send_msg_to_pf(nic
, &mbx
);
272 void nicvf_config_rss(struct nicvf
*nic
)
274 union nic_mbx mbx
= {};
275 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
276 int ind_tbl_len
= rss
->rss_size
;
279 mbx
.rss_cfg
.vf_id
= nic
->vf_id
;
280 mbx
.rss_cfg
.hash_bits
= rss
->hash_bits
;
281 while (ind_tbl_len
) {
282 mbx
.rss_cfg
.tbl_offset
= nextq
;
283 mbx
.rss_cfg
.tbl_len
= min(ind_tbl_len
,
284 RSS_IND_TBL_LEN_PER_MBX_MSG
);
285 mbx
.rss_cfg
.msg
= mbx
.rss_cfg
.tbl_offset
?
286 NIC_MBOX_MSG_RSS_CFG_CONT
: NIC_MBOX_MSG_RSS_CFG
;
288 for (i
= 0; i
< mbx
.rss_cfg
.tbl_len
; i
++)
289 mbx
.rss_cfg
.ind_tbl
[i
] = rss
->ind_tbl
[nextq
++];
291 nicvf_send_msg_to_pf(nic
, &mbx
);
293 ind_tbl_len
-= mbx
.rss_cfg
.tbl_len
;
297 void nicvf_set_rss_key(struct nicvf
*nic
)
299 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
300 u64 key_addr
= NIC_VNIC_RSS_KEY_0_4
;
303 for (idx
= 0; idx
< RSS_HASH_KEY_SIZE
; idx
++) {
304 nicvf_reg_write(nic
, key_addr
, rss
->key
[idx
]);
305 key_addr
+= sizeof(u64
);
309 static int nicvf_rss_init(struct nicvf
*nic
)
311 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
314 nicvf_get_rss_size(nic
);
316 if (cpi_alg
!= CPI_ALG_NONE
) {
324 /* Using the HW reset value for now */
325 rss
->key
[0] = 0xFEED0BADFEED0BADULL
;
326 rss
->key
[1] = 0xFEED0BADFEED0BADULL
;
327 rss
->key
[2] = 0xFEED0BADFEED0BADULL
;
328 rss
->key
[3] = 0xFEED0BADFEED0BADULL
;
329 rss
->key
[4] = 0xFEED0BADFEED0BADULL
;
331 nicvf_set_rss_key(nic
);
333 rss
->cfg
= RSS_IP_HASH_ENA
| RSS_TCP_HASH_ENA
| RSS_UDP_HASH_ENA
;
334 nicvf_reg_write(nic
, NIC_VNIC_RSS_CFG
, rss
->cfg
);
336 rss
->hash_bits
= ilog2(rounddown_pow_of_two(rss
->rss_size
));
338 for (idx
= 0; idx
< rss
->rss_size
; idx
++)
339 rss
->ind_tbl
[idx
] = ethtool_rxfh_indir_default(idx
,
341 nicvf_config_rss(nic
);
345 int nicvf_set_real_num_queues(struct net_device
*netdev
,
346 int tx_queues
, int rx_queues
)
350 err
= netif_set_real_num_tx_queues(netdev
, tx_queues
);
353 "Failed to set no of Tx queues: %d\n", tx_queues
);
357 err
= netif_set_real_num_rx_queues(netdev
, rx_queues
);
360 "Failed to set no of Rx queues: %d\n", rx_queues
);
364 static int nicvf_init_resources(struct nicvf
*nic
)
367 union nic_mbx mbx
= {};
369 mbx
.msg
.msg
= NIC_MBOX_MSG_CFG_DONE
;
372 nicvf_qset_config(nic
, true);
374 /* Initialize queues and HW for data transfer */
375 err
= nicvf_config_data_transfer(nic
, true);
377 netdev_err(nic
->netdev
,
378 "Failed to alloc/config VF's QSet resources\n");
382 /* Send VF config done msg to PF */
383 nicvf_write_to_mbx(nic
, &mbx
);
388 static void nicvf_snd_pkt_handler(struct net_device
*netdev
,
389 struct cmp_queue
*cq
,
390 struct cqe_send_t
*cqe_tx
, int cqe_type
)
392 struct sk_buff
*skb
= NULL
;
393 struct nicvf
*nic
= netdev_priv(netdev
);
394 struct snd_queue
*sq
;
395 struct sq_hdr_subdesc
*hdr
;
397 sq
= &nic
->qs
->sq
[cqe_tx
->sq_idx
];
399 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, cqe_tx
->sqe_ptr
);
400 if (hdr
->subdesc_type
!= SQ_DESC_TYPE_HEADER
)
403 netdev_dbg(nic
->netdev
,
404 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
405 __func__
, cqe_tx
->sq_qs
, cqe_tx
->sq_idx
,
406 cqe_tx
->sqe_ptr
, hdr
->subdesc_cnt
);
408 nicvf_put_sq_desc(sq
, hdr
->subdesc_cnt
+ 1);
409 nicvf_check_cqe_tx_errs(nic
, cq
, cqe_tx
);
410 skb
= (struct sk_buff
*)sq
->skbuff
[cqe_tx
->sqe_ptr
];
411 /* For TSO offloaded packets only one head SKB needs to be freed */
414 dev_consume_skb_any(skb
);
415 sq
->skbuff
[cqe_tx
->sqe_ptr
] = (u64
)NULL
;
419 static inline void nicvf_set_rxhash(struct net_device
*netdev
,
420 struct cqe_rx_t
*cqe_rx
,
426 if (!(netdev
->features
& NETIF_F_RXHASH
))
429 switch (cqe_rx
->rss_alg
) {
432 hash_type
= PKT_HASH_TYPE_L4
;
433 hash
= cqe_rx
->rss_tag
;
436 hash_type
= PKT_HASH_TYPE_L3
;
437 hash
= cqe_rx
->rss_tag
;
440 hash_type
= PKT_HASH_TYPE_NONE
;
444 skb_set_hash(skb
, hash
, hash_type
);
447 static void nicvf_rcv_pkt_handler(struct net_device
*netdev
,
448 struct napi_struct
*napi
,
449 struct cmp_queue
*cq
,
450 struct cqe_rx_t
*cqe_rx
, int cqe_type
)
453 struct nicvf
*nic
= netdev_priv(netdev
);
456 /* Check for errors */
457 err
= nicvf_check_cqe_rx_errs(nic
, cq
, cqe_rx
);
458 if (err
&& !cqe_rx
->rb_cnt
)
461 skb
= nicvf_get_rcv_skb(nic
, cqe_rx
);
463 netdev_dbg(nic
->netdev
, "Packet not received\n");
467 if (netif_msg_pktdata(nic
)) {
468 netdev_info(nic
->netdev
, "%s: skb 0x%p, len=%d\n", netdev
->name
,
470 print_hex_dump(KERN_INFO
, "", DUMP_PREFIX_OFFSET
, 16, 1,
471 skb
->data
, skb
->len
, true);
474 /* If error packet, drop it here */
476 dev_kfree_skb_any(skb
);
480 nicvf_set_rx_frame_cnt(nic
, skb
);
482 nicvf_set_rxhash(netdev
, cqe_rx
, skb
);
484 skb_record_rx_queue(skb
, cqe_rx
->rq_idx
);
485 if (netdev
->hw_features
& NETIF_F_RXCSUM
) {
486 /* HW by default verifies TCP/UDP/SCTP checksums */
487 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
489 skb_checksum_none_assert(skb
);
492 skb
->protocol
= eth_type_trans(skb
, netdev
);
494 if (napi
&& (netdev
->features
& NETIF_F_GRO
))
495 napi_gro_receive(napi
, skb
);
497 netif_receive_skb(skb
);
500 static int nicvf_cq_intr_handler(struct net_device
*netdev
, u8 cq_idx
,
501 struct napi_struct
*napi
, int budget
)
503 int processed_cqe
, work_done
= 0, tx_done
= 0;
504 int cqe_count
, cqe_head
;
505 struct nicvf
*nic
= netdev_priv(netdev
);
506 struct queue_set
*qs
= nic
->qs
;
507 struct cmp_queue
*cq
= &qs
->cq
[cq_idx
];
508 struct cqe_rx_t
*cq_desc
;
509 struct netdev_queue
*txq
;
511 spin_lock_bh(&cq
->lock
);
514 /* Get no of valid CQ entries to process */
515 cqe_count
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS
, cq_idx
);
516 cqe_count
&= CQ_CQE_COUNT
;
520 /* Get head of the valid CQ entries */
521 cqe_head
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_HEAD
, cq_idx
) >> 9;
524 netdev_dbg(nic
->netdev
, "%s CQ%d cqe_count %d cqe_head %d\n",
525 __func__
, cq_idx
, cqe_count
, cqe_head
);
526 while (processed_cqe
< cqe_count
) {
527 /* Get the CQ descriptor */
528 cq_desc
= (struct cqe_rx_t
*)GET_CQ_DESC(cq
, cqe_head
);
530 cqe_head
&= (cq
->dmem
.q_len
- 1);
531 /* Initiate prefetch for next descriptor */
532 prefetch((struct cqe_rx_t
*)GET_CQ_DESC(cq
, cqe_head
));
534 if ((work_done
>= budget
) && napi
&&
535 (cq_desc
->cqe_type
!= CQE_TYPE_SEND
)) {
539 netdev_dbg(nic
->netdev
, "CQ%d cq_desc->cqe_type %d\n",
540 cq_idx
, cq_desc
->cqe_type
);
541 switch (cq_desc
->cqe_type
) {
543 nicvf_rcv_pkt_handler(netdev
, napi
, cq
,
544 cq_desc
, CQE_TYPE_RX
);
548 nicvf_snd_pkt_handler(netdev
, cq
,
549 (void *)cq_desc
, CQE_TYPE_SEND
);
552 case CQE_TYPE_INVALID
:
553 case CQE_TYPE_RX_SPLIT
:
554 case CQE_TYPE_RX_TCP
:
555 case CQE_TYPE_SEND_PTP
:
561 netdev_dbg(nic
->netdev
,
562 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
563 __func__
, cq_idx
, processed_cqe
, work_done
, budget
);
565 /* Ring doorbell to inform H/W to reuse processed CQEs */
566 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_DOOR
,
567 cq_idx
, processed_cqe
);
569 if ((work_done
< budget
) && napi
)
573 /* Wakeup TXQ if its stopped earlier due to SQ full */
575 txq
= netdev_get_tx_queue(netdev
, cq_idx
);
576 if (netif_tx_queue_stopped(txq
)) {
577 netif_tx_start_queue(txq
);
578 nic
->drv_stats
.txq_wake
++;
579 if (netif_msg_tx_err(nic
))
581 "%s: Transmit queue wakeup SQ%d\n",
582 netdev
->name
, cq_idx
);
586 spin_unlock_bh(&cq
->lock
);
590 static int nicvf_poll(struct napi_struct
*napi
, int budget
)
594 struct net_device
*netdev
= napi
->dev
;
595 struct nicvf
*nic
= netdev_priv(netdev
);
596 struct nicvf_cq_poll
*cq
;
598 cq
= container_of(napi
, struct nicvf_cq_poll
, napi
);
599 work_done
= nicvf_cq_intr_handler(netdev
, cq
->cq_idx
, napi
, budget
);
601 if (work_done
< budget
) {
602 /* Slow packet rate, exit polling */
604 /* Re-enable interrupts */
605 cq_head
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_HEAD
,
607 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, cq
->cq_idx
);
608 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_HEAD
,
609 cq
->cq_idx
, cq_head
);
610 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, cq
->cq_idx
);
615 /* Qset error interrupt handler
617 * As of now only CQ errors are handled
619 static void nicvf_handle_qs_err(unsigned long data
)
621 struct nicvf
*nic
= (struct nicvf
*)data
;
622 struct queue_set
*qs
= nic
->qs
;
626 netif_tx_disable(nic
->netdev
);
628 /* Check if it is CQ err */
629 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
630 status
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS
,
632 if (!(status
& CQ_ERR_MASK
))
634 /* Process already queued CQEs and reconfig CQ */
635 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
636 nicvf_sq_disable(nic
, qidx
);
637 nicvf_cq_intr_handler(nic
->netdev
, qidx
, NULL
, 0);
638 nicvf_cmp_queue_config(nic
, qs
, qidx
, true);
639 nicvf_sq_free_used_descs(nic
->netdev
, &qs
->sq
[qidx
], qidx
);
640 nicvf_sq_enable(nic
, &qs
->sq
[qidx
], qidx
);
642 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, qidx
);
645 netif_tx_start_all_queues(nic
->netdev
);
646 /* Re-enable Qset error interrupt */
647 nicvf_enable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
650 static irqreturn_t
nicvf_misc_intr_handler(int irq
, void *nicvf_irq
)
652 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
655 intr
= nicvf_reg_read(nic
, NIC_VF_INT
);
656 /* Check for spurious interrupt */
657 if (!(intr
& NICVF_INTR_MBOX_MASK
))
660 nicvf_handle_mbx_intr(nic
);
665 static irqreturn_t
nicvf_intr_handler(int irq
, void *nicvf_irq
)
667 u64 qidx
, intr
, clear_intr
= 0;
668 u64 cq_intr
, rbdr_intr
, qs_err_intr
;
669 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
670 struct queue_set
*qs
= nic
->qs
;
671 struct nicvf_cq_poll
*cq_poll
= NULL
;
673 intr
= nicvf_reg_read(nic
, NIC_VF_INT
);
674 if (netif_msg_intr(nic
))
675 netdev_info(nic
->netdev
, "%s: interrupt status 0x%llx\n",
676 nic
->netdev
->name
, intr
);
678 qs_err_intr
= intr
& NICVF_INTR_QS_ERR_MASK
;
680 /* Disable Qset err interrupt and schedule softirq */
681 nicvf_disable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
682 tasklet_hi_schedule(&nic
->qs_err_task
);
683 clear_intr
|= qs_err_intr
;
686 /* Disable interrupts and start polling */
687 cq_intr
= (intr
& NICVF_INTR_CQ_MASK
) >> NICVF_INTR_CQ_SHIFT
;
688 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
689 if (!(cq_intr
& (1 << qidx
)))
691 if (!nicvf_is_intr_enabled(nic
, NICVF_INTR_CQ
, qidx
))
694 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
695 clear_intr
|= ((1 << qidx
) << NICVF_INTR_CQ_SHIFT
);
697 cq_poll
= nic
->napi
[qidx
];
700 napi_schedule(&cq_poll
->napi
);
703 /* Handle RBDR interrupts */
704 rbdr_intr
= (intr
& NICVF_INTR_RBDR_MASK
) >> NICVF_INTR_RBDR_SHIFT
;
706 /* Disable RBDR interrupt and schedule softirq */
707 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++) {
708 if (!nicvf_is_intr_enabled(nic
, NICVF_INTR_RBDR
, qidx
))
710 nicvf_disable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
711 tasklet_hi_schedule(&nic
->rbdr_task
);
712 clear_intr
|= ((1 << qidx
) << NICVF_INTR_RBDR_SHIFT
);
716 /* Clear interrupts */
717 nicvf_reg_write(nic
, NIC_VF_INT
, clear_intr
);
721 static int nicvf_enable_msix(struct nicvf
*nic
)
725 nic
->num_vec
= NIC_VF_MSIX_VECTORS
;
727 for (vec
= 0; vec
< nic
->num_vec
; vec
++)
728 nic
->msix_entries
[vec
].entry
= vec
;
730 ret
= pci_enable_msix(nic
->pdev
, nic
->msix_entries
, nic
->num_vec
);
732 netdev_err(nic
->netdev
,
733 "Req for #%d msix vectors failed\n", nic
->num_vec
);
736 nic
->msix_enabled
= 1;
740 static void nicvf_disable_msix(struct nicvf
*nic
)
742 if (nic
->msix_enabled
) {
743 pci_disable_msix(nic
->pdev
);
744 nic
->msix_enabled
= 0;
749 static int nicvf_register_interrupts(struct nicvf
*nic
)
751 int irq
, free
, ret
= 0;
755 sprintf(nic
->irq_name
[irq
], "NICVF%d CQ%d",
759 sprintf(nic
->irq_name
[irq
], "NICVF%d SQ%d",
760 nic
->vf_id
, irq
- NICVF_INTR_ID_SQ
);
762 for_each_rbdr_irq(irq
)
763 sprintf(nic
->irq_name
[irq
], "NICVF%d RBDR%d",
764 nic
->vf_id
, irq
- NICVF_INTR_ID_RBDR
);
766 /* Register all interrupts except mailbox */
767 for (irq
= 0; irq
< NICVF_INTR_ID_SQ
; irq
++) {
768 vector
= nic
->msix_entries
[irq
].vector
;
769 ret
= request_irq(vector
, nicvf_intr_handler
,
770 0, nic
->irq_name
[irq
], nic
);
773 nic
->irq_allocated
[irq
] = true;
776 for (irq
= NICVF_INTR_ID_SQ
; irq
< NICVF_INTR_ID_MISC
; irq
++) {
777 vector
= nic
->msix_entries
[irq
].vector
;
778 ret
= request_irq(vector
, nicvf_intr_handler
,
779 0, nic
->irq_name
[irq
], nic
);
782 nic
->irq_allocated
[irq
] = true;
785 sprintf(nic
->irq_name
[NICVF_INTR_ID_QS_ERR
],
786 "NICVF%d Qset error", nic
->vf_id
);
788 vector
= nic
->msix_entries
[NICVF_INTR_ID_QS_ERR
].vector
;
789 irq
= NICVF_INTR_ID_QS_ERR
;
790 ret
= request_irq(vector
, nicvf_intr_handler
,
791 0, nic
->irq_name
[irq
], nic
);
793 nic
->irq_allocated
[irq
] = true;
797 netdev_err(nic
->netdev
, "Request irq failed\n");
798 for (free
= 0; free
< irq
; free
++)
799 free_irq(nic
->msix_entries
[free
].vector
, nic
);
806 static void nicvf_unregister_interrupts(struct nicvf
*nic
)
810 /* Free registered interrupts */
811 for (irq
= 0; irq
< nic
->num_vec
; irq
++) {
812 if (nic
->irq_allocated
[irq
])
813 free_irq(nic
->msix_entries
[irq
].vector
, nic
);
814 nic
->irq_allocated
[irq
] = false;
818 nicvf_disable_msix(nic
);
821 /* Initialize MSIX vectors and register MISC interrupt.
822 * Send READY message to PF to check if its alive
824 static int nicvf_register_misc_interrupt(struct nicvf
*nic
)
827 int irq
= NICVF_INTR_ID_MISC
;
829 /* Return if mailbox interrupt is already registered */
830 if (nic
->msix_enabled
)
834 if (!nicvf_enable_msix(nic
))
837 sprintf(nic
->irq_name
[irq
], "%s Mbox", "NICVF");
838 /* Register Misc interrupt */
839 ret
= request_irq(nic
->msix_entries
[irq
].vector
,
840 nicvf_misc_intr_handler
, 0, nic
->irq_name
[irq
], nic
);
844 nic
->irq_allocated
[irq
] = true;
846 /* Enable mailbox interrupt */
847 nicvf_enable_intr(nic
, NICVF_INTR_MBOX
, 0);
849 /* Check if VF is able to communicate with PF */
850 if (!nicvf_check_pf_ready(nic
)) {
851 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
852 nicvf_unregister_interrupts(nic
);
859 static netdev_tx_t
nicvf_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
861 struct nicvf
*nic
= netdev_priv(netdev
);
862 int qid
= skb_get_queue_mapping(skb
);
863 struct netdev_queue
*txq
= netdev_get_tx_queue(netdev
, qid
);
865 /* Check for minimum packet length */
866 if (skb
->len
<= ETH_HLEN
) {
871 if (!netif_tx_queue_stopped(txq
) && !nicvf_sq_append_skb(nic
, skb
)) {
872 netif_tx_stop_queue(txq
);
873 nic
->drv_stats
.txq_stop
++;
874 if (netif_msg_tx_err(nic
))
876 "%s: Transmit ring full, stopping SQ%d\n",
879 return NETDEV_TX_BUSY
;
885 int nicvf_stop(struct net_device
*netdev
)
888 struct nicvf
*nic
= netdev_priv(netdev
);
889 struct queue_set
*qs
= nic
->qs
;
890 struct nicvf_cq_poll
*cq_poll
= NULL
;
891 union nic_mbx mbx
= {};
893 mbx
.msg
.msg
= NIC_MBOX_MSG_SHUTDOWN
;
894 nicvf_send_msg_to_pf(nic
, &mbx
);
896 netif_carrier_off(netdev
);
898 /* Disable RBDR & QS error interrupts */
899 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++) {
900 nicvf_disable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
901 nicvf_clear_intr(nic
, NICVF_INTR_RBDR
, qidx
);
903 nicvf_disable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
904 nicvf_clear_intr(nic
, NICVF_INTR_QS_ERR
, 0);
906 /* Wait for pending IRQ handlers to finish */
907 for (irq
= 0; irq
< nic
->num_vec
; irq
++)
908 synchronize_irq(nic
->msix_entries
[irq
].vector
);
910 tasklet_kill(&nic
->rbdr_task
);
911 tasklet_kill(&nic
->qs_err_task
);
912 if (nic
->rb_work_scheduled
)
913 cancel_delayed_work_sync(&nic
->rbdr_work
);
915 for (qidx
= 0; qidx
< nic
->qs
->cq_cnt
; qidx
++) {
916 cq_poll
= nic
->napi
[qidx
];
919 nic
->napi
[qidx
] = NULL
;
920 napi_synchronize(&cq_poll
->napi
);
921 /* CQ intr is enabled while napi_complete,
924 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
925 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, qidx
);
926 napi_disable(&cq_poll
->napi
);
927 netif_napi_del(&cq_poll
->napi
);
931 netif_tx_disable(netdev
);
934 nicvf_config_data_transfer(nic
, false);
936 /* Disable HW Qset */
937 nicvf_qset_config(nic
, false);
939 /* disable mailbox interrupt */
940 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
942 nicvf_unregister_interrupts(nic
);
947 int nicvf_open(struct net_device
*netdev
)
950 struct nicvf
*nic
= netdev_priv(netdev
);
951 struct queue_set
*qs
= nic
->qs
;
952 struct nicvf_cq_poll
*cq_poll
= NULL
;
954 nic
->mtu
= netdev
->mtu
;
956 netif_carrier_off(netdev
);
958 err
= nicvf_register_misc_interrupt(nic
);
962 /* Register NAPI handler for processing CQEs */
963 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
964 cq_poll
= kzalloc(sizeof(*cq_poll
), GFP_KERNEL
);
969 cq_poll
->cq_idx
= qidx
;
970 netif_napi_add(netdev
, &cq_poll
->napi
, nicvf_poll
,
972 napi_enable(&cq_poll
->napi
);
973 nic
->napi
[qidx
] = cq_poll
;
976 /* Check if we got MAC address from PF or else generate a radom MAC */
977 if (is_zero_ether_addr(netdev
->dev_addr
)) {
978 eth_hw_addr_random(netdev
);
979 nicvf_hw_set_mac_addr(nic
, netdev
);
982 if (nic
->set_mac_pending
) {
983 nic
->set_mac_pending
= false;
984 nicvf_hw_set_mac_addr(nic
, netdev
);
987 /* Init tasklet for handling Qset err interrupt */
988 tasklet_init(&nic
->qs_err_task
, nicvf_handle_qs_err
,
991 /* Init RBDR tasklet which will refill RBDR */
992 tasklet_init(&nic
->rbdr_task
, nicvf_rbdr_task
,
994 INIT_DELAYED_WORK(&nic
->rbdr_work
, nicvf_rbdr_work
);
996 /* Configure CPI alorithm */
997 nic
->cpi_alg
= cpi_alg
;
998 nicvf_config_cpi(nic
);
1000 /* Configure receive side scaling */
1001 nicvf_rss_init(nic
);
1003 err
= nicvf_register_interrupts(nic
);
1007 /* Initialize the queues */
1008 err
= nicvf_init_resources(nic
);
1012 /* Make sure queue initialization is written */
1015 nicvf_reg_write(nic
, NIC_VF_INT
, -1);
1016 /* Enable Qset err interrupt */
1017 nicvf_enable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
1019 /* Enable completion queue interrupt */
1020 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
1021 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, qidx
);
1023 /* Enable RBDR threshold interrupt */
1024 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
1025 nicvf_enable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
1027 nic
->drv_stats
.txq_stop
= 0;
1028 nic
->drv_stats
.txq_wake
= 0;
1030 netif_carrier_on(netdev
);
1031 netif_tx_start_all_queues(netdev
);
1035 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
1036 nicvf_unregister_interrupts(nic
);
1038 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
1039 cq_poll
= nic
->napi
[qidx
];
1042 napi_disable(&cq_poll
->napi
);
1043 netif_napi_del(&cq_poll
->napi
);
1045 nic
->napi
[qidx
] = NULL
;
1050 static int nicvf_update_hw_max_frs(struct nicvf
*nic
, int mtu
)
1052 union nic_mbx mbx
= {};
1054 mbx
.frs
.msg
= NIC_MBOX_MSG_SET_MAX_FRS
;
1055 mbx
.frs
.max_frs
= mtu
;
1056 mbx
.frs
.vf_id
= nic
->vf_id
;
1058 return nicvf_send_msg_to_pf(nic
, &mbx
);
1061 static int nicvf_change_mtu(struct net_device
*netdev
, int new_mtu
)
1063 struct nicvf
*nic
= netdev_priv(netdev
);
1065 if (new_mtu
> NIC_HW_MAX_FRS
)
1068 if (new_mtu
< NIC_HW_MIN_FRS
)
1071 if (nicvf_update_hw_max_frs(nic
, new_mtu
))
1073 netdev
->mtu
= new_mtu
;
1079 static int nicvf_set_mac_address(struct net_device
*netdev
, void *p
)
1081 struct sockaddr
*addr
= p
;
1082 struct nicvf
*nic
= netdev_priv(netdev
);
1084 if (!is_valid_ether_addr(addr
->sa_data
))
1085 return -EADDRNOTAVAIL
;
1087 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1089 if (nic
->msix_enabled
) {
1090 if (nicvf_hw_set_mac_addr(nic
, netdev
))
1093 nic
->set_mac_pending
= true;
1099 void nicvf_update_lmac_stats(struct nicvf
*nic
)
1102 union nic_mbx mbx
= {};
1104 if (!netif_running(nic
->netdev
))
1107 mbx
.bgx_stats
.msg
= NIC_MBOX_MSG_BGX_STATS
;
1108 mbx
.bgx_stats
.vf_id
= nic
->vf_id
;
1110 mbx
.bgx_stats
.rx
= 1;
1111 while (stat
< BGX_RX_STATS_COUNT
) {
1112 mbx
.bgx_stats
.idx
= stat
;
1113 if (nicvf_send_msg_to_pf(nic
, &mbx
))
1121 mbx
.bgx_stats
.rx
= 0;
1122 while (stat
< BGX_TX_STATS_COUNT
) {
1123 mbx
.bgx_stats
.idx
= stat
;
1124 if (nicvf_send_msg_to_pf(nic
, &mbx
))
1130 void nicvf_update_stats(struct nicvf
*nic
)
1133 struct nicvf_hw_stats
*stats
= &nic
->hw_stats
;
1134 struct nicvf_drv_stats
*drv_stats
= &nic
->drv_stats
;
1135 struct queue_set
*qs
= nic
->qs
;
1137 #define GET_RX_STATS(reg) \
1138 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1139 #define GET_TX_STATS(reg) \
1140 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1142 stats
->rx_bytes
= GET_RX_STATS(RX_OCTS
);
1143 stats
->rx_ucast_frames
= GET_RX_STATS(RX_UCAST
);
1144 stats
->rx_bcast_frames
= GET_RX_STATS(RX_BCAST
);
1145 stats
->rx_mcast_frames
= GET_RX_STATS(RX_MCAST
);
1146 stats
->rx_fcs_errors
= GET_RX_STATS(RX_FCS
);
1147 stats
->rx_l2_errors
= GET_RX_STATS(RX_L2ERR
);
1148 stats
->rx_drop_red
= GET_RX_STATS(RX_RED
);
1149 stats
->rx_drop_red_bytes
= GET_RX_STATS(RX_RED_OCTS
);
1150 stats
->rx_drop_overrun
= GET_RX_STATS(RX_ORUN
);
1151 stats
->rx_drop_overrun_bytes
= GET_RX_STATS(RX_ORUN_OCTS
);
1152 stats
->rx_drop_bcast
= GET_RX_STATS(RX_DRP_BCAST
);
1153 stats
->rx_drop_mcast
= GET_RX_STATS(RX_DRP_MCAST
);
1154 stats
->rx_drop_l3_bcast
= GET_RX_STATS(RX_DRP_L3BCAST
);
1155 stats
->rx_drop_l3_mcast
= GET_RX_STATS(RX_DRP_L3MCAST
);
1157 stats
->tx_bytes_ok
= GET_TX_STATS(TX_OCTS
);
1158 stats
->tx_ucast_frames_ok
= GET_TX_STATS(TX_UCAST
);
1159 stats
->tx_bcast_frames_ok
= GET_TX_STATS(TX_BCAST
);
1160 stats
->tx_mcast_frames_ok
= GET_TX_STATS(TX_MCAST
);
1161 stats
->tx_drops
= GET_TX_STATS(TX_DROP
);
1163 drv_stats
->tx_frames_ok
= stats
->tx_ucast_frames_ok
+
1164 stats
->tx_bcast_frames_ok
+
1165 stats
->tx_mcast_frames_ok
;
1166 drv_stats
->rx_drops
= stats
->rx_drop_red
+
1167 stats
->rx_drop_overrun
;
1168 drv_stats
->tx_drops
= stats
->tx_drops
;
1170 /* Update RQ and SQ stats */
1171 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
1172 nicvf_update_rq_stats(nic
, qidx
);
1173 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
1174 nicvf_update_sq_stats(nic
, qidx
);
1177 static struct rtnl_link_stats64
*nicvf_get_stats64(struct net_device
*netdev
,
1178 struct rtnl_link_stats64
*stats
)
1180 struct nicvf
*nic
= netdev_priv(netdev
);
1181 struct nicvf_hw_stats
*hw_stats
= &nic
->hw_stats
;
1182 struct nicvf_drv_stats
*drv_stats
= &nic
->drv_stats
;
1184 nicvf_update_stats(nic
);
1186 stats
->rx_bytes
= hw_stats
->rx_bytes
;
1187 stats
->rx_packets
= drv_stats
->rx_frames_ok
;
1188 stats
->rx_dropped
= drv_stats
->rx_drops
;
1189 stats
->multicast
= hw_stats
->rx_mcast_frames
;
1191 stats
->tx_bytes
= hw_stats
->tx_bytes_ok
;
1192 stats
->tx_packets
= drv_stats
->tx_frames_ok
;
1193 stats
->tx_dropped
= drv_stats
->tx_drops
;
1198 static void nicvf_tx_timeout(struct net_device
*dev
)
1200 struct nicvf
*nic
= netdev_priv(dev
);
1202 if (netif_msg_tx_err(nic
))
1203 netdev_warn(dev
, "%s: Transmit timed out, resetting\n",
1206 schedule_work(&nic
->reset_task
);
1209 static void nicvf_reset_task(struct work_struct
*work
)
1213 nic
= container_of(work
, struct nicvf
, reset_task
);
1215 if (!netif_running(nic
->netdev
))
1218 nicvf_stop(nic
->netdev
);
1219 nicvf_open(nic
->netdev
);
1220 nic
->netdev
->trans_start
= jiffies
;
1223 static const struct net_device_ops nicvf_netdev_ops
= {
1224 .ndo_open
= nicvf_open
,
1225 .ndo_stop
= nicvf_stop
,
1226 .ndo_start_xmit
= nicvf_xmit
,
1227 .ndo_change_mtu
= nicvf_change_mtu
,
1228 .ndo_set_mac_address
= nicvf_set_mac_address
,
1229 .ndo_get_stats64
= nicvf_get_stats64
,
1230 .ndo_tx_timeout
= nicvf_tx_timeout
,
1233 static int nicvf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1235 struct device
*dev
= &pdev
->dev
;
1236 struct net_device
*netdev
;
1238 struct queue_set
*qs
;
1241 err
= pci_enable_device(pdev
);
1243 dev_err(dev
, "Failed to enable PCI device\n");
1247 err
= pci_request_regions(pdev
, DRV_NAME
);
1249 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
1250 goto err_disable_device
;
1253 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(48));
1255 dev_err(dev
, "Unable to get usable DMA configuration\n");
1256 goto err_release_regions
;
1259 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(48));
1261 dev_err(dev
, "unable to get 48-bit DMA for consistent allocations\n");
1262 goto err_release_regions
;
1265 netdev
= alloc_etherdev_mqs(sizeof(struct nicvf
),
1266 MAX_RCV_QUEUES_PER_QS
,
1267 MAX_SND_QUEUES_PER_QS
);
1270 goto err_release_regions
;
1273 pci_set_drvdata(pdev
, netdev
);
1275 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1277 nic
= netdev_priv(netdev
);
1278 nic
->netdev
= netdev
;
1281 /* MAP VF's configuration registers */
1282 nic
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
1283 if (!nic
->reg_base
) {
1284 dev_err(dev
, "Cannot map config register space, aborting\n");
1286 goto err_free_netdev
;
1289 err
= nicvf_set_qset_resources(nic
);
1291 goto err_free_netdev
;
1295 err
= nicvf_set_real_num_queues(netdev
, qs
->sq_cnt
, qs
->rq_cnt
);
1297 goto err_free_netdev
;
1299 /* Check if PF is alive and get MAC address for this VF */
1300 err
= nicvf_register_misc_interrupt(nic
);
1302 goto err_free_netdev
;
1304 netdev
->features
|= (NETIF_F_RXCSUM
| NETIF_F_IP_CSUM
| NETIF_F_SG
|
1305 NETIF_F_TSO
| NETIF_F_GRO
| NETIF_F_RXHASH
);
1307 netdev
->hw_features
= netdev
->features
;
1309 netdev
->netdev_ops
= &nicvf_netdev_ops
;
1310 netdev
->watchdog_timeo
= NICVF_TX_TIMEOUT
;
1312 INIT_WORK(&nic
->reset_task
, nicvf_reset_task
);
1314 err
= register_netdev(netdev
);
1316 dev_err(dev
, "Failed to register netdevice\n");
1317 goto err_unregister_interrupts
;
1320 nic
->msg_enable
= debug
;
1322 nicvf_set_ethtool_ops(netdev
);
1326 err_unregister_interrupts
:
1327 nicvf_unregister_interrupts(nic
);
1329 pci_set_drvdata(pdev
, NULL
);
1330 free_netdev(netdev
);
1331 err_release_regions
:
1332 pci_release_regions(pdev
);
1334 pci_disable_device(pdev
);
1338 static void nicvf_remove(struct pci_dev
*pdev
)
1340 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1341 struct nicvf
*nic
= netdev_priv(netdev
);
1343 unregister_netdev(netdev
);
1344 nicvf_unregister_interrupts(nic
);
1345 pci_set_drvdata(pdev
, NULL
);
1346 free_netdev(netdev
);
1347 pci_release_regions(pdev
);
1348 pci_disable_device(pdev
);
1351 static void nicvf_shutdown(struct pci_dev
*pdev
)
1356 static struct pci_driver nicvf_driver
= {
1358 .id_table
= nicvf_id_table
,
1359 .probe
= nicvf_probe
,
1360 .remove
= nicvf_remove
,
1361 .shutdown
= nicvf_shutdown
,
1364 static int __init
nicvf_init_module(void)
1366 pr_info("%s, ver %s\n", DRV_NAME
, DRV_VERSION
);
1368 return pci_register_driver(&nicvf_driver
);
1371 static void __exit
nicvf_cleanup_module(void)
1373 pci_unregister_driver(&nicvf_driver
);
1376 module_init(nicvf_init_module
);
1377 module_exit(nicvf_cleanup_module
);