2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/etherdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/log2.h>
17 #include <linux/prefetch.h>
18 #include <linux/irq.h>
22 #include "nicvf_queues.h"
23 #include "thunder_bgx.h"
25 #define DRV_NAME "thunder-nicvf"
26 #define DRV_VERSION "1.0"
28 /* Supported devices */
29 static const struct pci_device_id nicvf_id_table
[] = {
30 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
31 PCI_DEVICE_ID_THUNDER_NIC_VF
,
32 PCI_VENDOR_ID_CAVIUM
, 0xA11E) },
33 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
34 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF
,
35 PCI_VENDOR_ID_CAVIUM
, 0xA11E) },
36 { 0, } /* end of table */
39 MODULE_AUTHOR("Sunil Goutham");
40 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
41 MODULE_LICENSE("GPL v2");
42 MODULE_VERSION(DRV_VERSION
);
43 MODULE_DEVICE_TABLE(pci
, nicvf_id_table
);
45 static int debug
= 0x00;
46 module_param(debug
, int, 0644);
47 MODULE_PARM_DESC(debug
, "Debug message level bitmap");
49 static int cpi_alg
= CPI_ALG_NONE
;
50 module_param(cpi_alg
, int, S_IRUGO
);
51 MODULE_PARM_DESC(cpi_alg
,
52 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
54 static inline void nicvf_set_rx_frame_cnt(struct nicvf
*nic
,
58 nic
->drv_stats
.rx_frames_64
++;
59 else if (skb
->len
<= 127)
60 nic
->drv_stats
.rx_frames_127
++;
61 else if (skb
->len
<= 255)
62 nic
->drv_stats
.rx_frames_255
++;
63 else if (skb
->len
<= 511)
64 nic
->drv_stats
.rx_frames_511
++;
65 else if (skb
->len
<= 1023)
66 nic
->drv_stats
.rx_frames_1023
++;
67 else if (skb
->len
<= 1518)
68 nic
->drv_stats
.rx_frames_1518
++;
70 nic
->drv_stats
.rx_frames_jumbo
++;
73 /* The Cavium ThunderX network controller can *only* be found in SoCs
74 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
75 * registers on this platform are implicitly strongly ordered with respect
76 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
77 * with no memory barriers in this driver. The readq()/writeq() functions add
78 * explicit ordering operation which in this case are redundant, and only
82 /* Register read/write APIs */
83 void nicvf_reg_write(struct nicvf
*nic
, u64 offset
, u64 val
)
85 writeq_relaxed(val
, nic
->reg_base
+ offset
);
88 u64
nicvf_reg_read(struct nicvf
*nic
, u64 offset
)
90 return readq_relaxed(nic
->reg_base
+ offset
);
93 void nicvf_queue_reg_write(struct nicvf
*nic
, u64 offset
,
96 void __iomem
*addr
= nic
->reg_base
+ offset
;
98 writeq_relaxed(val
, addr
+ (qidx
<< NIC_Q_NUM_SHIFT
));
101 u64
nicvf_queue_reg_read(struct nicvf
*nic
, u64 offset
, u64 qidx
)
103 void __iomem
*addr
= nic
->reg_base
+ offset
;
105 return readq_relaxed(addr
+ (qidx
<< NIC_Q_NUM_SHIFT
));
108 /* VF -> PF mailbox communication */
109 static void nicvf_write_to_mbx(struct nicvf
*nic
, union nic_mbx
*mbx
)
111 u64
*msg
= (u64
*)mbx
;
113 nicvf_reg_write(nic
, NIC_VF_PF_MAILBOX_0_1
+ 0, msg
[0]);
114 nicvf_reg_write(nic
, NIC_VF_PF_MAILBOX_0_1
+ 8, msg
[1]);
117 int nicvf_send_msg_to_pf(struct nicvf
*nic
, union nic_mbx
*mbx
)
119 int timeout
= NIC_MBOX_MSG_TIMEOUT
;
122 nic
->pf_acked
= false;
123 nic
->pf_nacked
= false;
125 nicvf_write_to_mbx(nic
, mbx
);
127 /* Wait for previous message to be acked, timeout 2sec */
128 while (!nic
->pf_acked
) {
136 netdev_err(nic
->netdev
,
137 "PF didn't ack to mbox msg %d from VF%d\n",
138 (mbx
->msg
.msg
& 0xFF), nic
->vf_id
);
145 /* Checks if VF is able to comminicate with PF
146 * and also gets the VNIC number this VF is associated to.
148 static int nicvf_check_pf_ready(struct nicvf
*nic
)
150 union nic_mbx mbx
= {};
152 mbx
.msg
.msg
= NIC_MBOX_MSG_READY
;
153 if (nicvf_send_msg_to_pf(nic
, &mbx
)) {
154 netdev_err(nic
->netdev
,
155 "PF didn't respond to READY msg\n");
162 static void nicvf_read_bgx_stats(struct nicvf
*nic
, struct bgx_stats_msg
*bgx
)
165 nic
->bgx_stats
.rx_stats
[bgx
->idx
] = bgx
->stats
;
167 nic
->bgx_stats
.tx_stats
[bgx
->idx
] = bgx
->stats
;
170 static void nicvf_handle_mbx_intr(struct nicvf
*nic
)
172 union nic_mbx mbx
= {};
177 mbx_addr
= NIC_VF_PF_MAILBOX_0_1
;
178 mbx_data
= (u64
*)&mbx
;
180 for (i
= 0; i
< NIC_PF_VF_MAILBOX_SIZE
; i
++) {
181 *mbx_data
= nicvf_reg_read(nic
, mbx_addr
);
183 mbx_addr
+= sizeof(u64
);
186 netdev_dbg(nic
->netdev
, "Mbox message: msg: 0x%x\n", mbx
.msg
.msg
);
187 switch (mbx
.msg
.msg
) {
188 case NIC_MBOX_MSG_READY
:
189 nic
->pf_acked
= true;
190 nic
->vf_id
= mbx
.nic_cfg
.vf_id
& 0x7F;
191 nic
->tns_mode
= mbx
.nic_cfg
.tns_mode
& 0x7F;
192 nic
->node
= mbx
.nic_cfg
.node_id
;
193 if (!nic
->set_mac_pending
)
194 ether_addr_copy(nic
->netdev
->dev_addr
,
195 mbx
.nic_cfg
.mac_addr
);
196 nic
->link_up
= false;
200 case NIC_MBOX_MSG_ACK
:
201 nic
->pf_acked
= true;
203 case NIC_MBOX_MSG_NACK
:
204 nic
->pf_nacked
= true;
206 case NIC_MBOX_MSG_RSS_SIZE
:
207 nic
->rss_info
.rss_size
= mbx
.rss_size
.ind_tbl_size
;
208 nic
->pf_acked
= true;
210 case NIC_MBOX_MSG_BGX_STATS
:
211 nicvf_read_bgx_stats(nic
, &mbx
.bgx_stats
);
212 nic
->pf_acked
= true;
214 case NIC_MBOX_MSG_BGX_LINK_CHANGE
:
215 nic
->pf_acked
= true;
216 nic
->link_up
= mbx
.link_status
.link_up
;
217 nic
->duplex
= mbx
.link_status
.duplex
;
218 nic
->speed
= mbx
.link_status
.speed
;
220 netdev_info(nic
->netdev
, "%s: Link is Up %d Mbps %s\n",
221 nic
->netdev
->name
, nic
->speed
,
222 nic
->duplex
== DUPLEX_FULL
?
223 "Full duplex" : "Half duplex");
224 netif_carrier_on(nic
->netdev
);
225 netif_tx_start_all_queues(nic
->netdev
);
227 netdev_info(nic
->netdev
, "%s: Link is Down\n",
229 netif_carrier_off(nic
->netdev
);
230 netif_tx_stop_all_queues(nic
->netdev
);
234 netdev_err(nic
->netdev
,
235 "Invalid message from PF, msg 0x%x\n", mbx
.msg
.msg
);
238 nicvf_clear_intr(nic
, NICVF_INTR_MBOX
, 0);
241 static int nicvf_hw_set_mac_addr(struct nicvf
*nic
, struct net_device
*netdev
)
243 union nic_mbx mbx
= {};
245 mbx
.mac
.msg
= NIC_MBOX_MSG_SET_MAC
;
246 mbx
.mac
.vf_id
= nic
->vf_id
;
247 ether_addr_copy(mbx
.mac
.mac_addr
, netdev
->dev_addr
);
249 return nicvf_send_msg_to_pf(nic
, &mbx
);
252 static void nicvf_config_cpi(struct nicvf
*nic
)
254 union nic_mbx mbx
= {};
256 mbx
.cpi_cfg
.msg
= NIC_MBOX_MSG_CPI_CFG
;
257 mbx
.cpi_cfg
.vf_id
= nic
->vf_id
;
258 mbx
.cpi_cfg
.cpi_alg
= nic
->cpi_alg
;
259 mbx
.cpi_cfg
.rq_cnt
= nic
->qs
->rq_cnt
;
261 nicvf_send_msg_to_pf(nic
, &mbx
);
264 static void nicvf_get_rss_size(struct nicvf
*nic
)
266 union nic_mbx mbx
= {};
268 mbx
.rss_size
.msg
= NIC_MBOX_MSG_RSS_SIZE
;
269 mbx
.rss_size
.vf_id
= nic
->vf_id
;
270 nicvf_send_msg_to_pf(nic
, &mbx
);
273 void nicvf_config_rss(struct nicvf
*nic
)
275 union nic_mbx mbx
= {};
276 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
277 int ind_tbl_len
= rss
->rss_size
;
280 mbx
.rss_cfg
.vf_id
= nic
->vf_id
;
281 mbx
.rss_cfg
.hash_bits
= rss
->hash_bits
;
282 while (ind_tbl_len
) {
283 mbx
.rss_cfg
.tbl_offset
= nextq
;
284 mbx
.rss_cfg
.tbl_len
= min(ind_tbl_len
,
285 RSS_IND_TBL_LEN_PER_MBX_MSG
);
286 mbx
.rss_cfg
.msg
= mbx
.rss_cfg
.tbl_offset
?
287 NIC_MBOX_MSG_RSS_CFG_CONT
: NIC_MBOX_MSG_RSS_CFG
;
289 for (i
= 0; i
< mbx
.rss_cfg
.tbl_len
; i
++)
290 mbx
.rss_cfg
.ind_tbl
[i
] = rss
->ind_tbl
[nextq
++];
292 nicvf_send_msg_to_pf(nic
, &mbx
);
294 ind_tbl_len
-= mbx
.rss_cfg
.tbl_len
;
298 void nicvf_set_rss_key(struct nicvf
*nic
)
300 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
301 u64 key_addr
= NIC_VNIC_RSS_KEY_0_4
;
304 for (idx
= 0; idx
< RSS_HASH_KEY_SIZE
; idx
++) {
305 nicvf_reg_write(nic
, key_addr
, rss
->key
[idx
]);
306 key_addr
+= sizeof(u64
);
310 static int nicvf_rss_init(struct nicvf
*nic
)
312 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
315 nicvf_get_rss_size(nic
);
317 if (cpi_alg
!= CPI_ALG_NONE
) {
325 /* Using the HW reset value for now */
326 rss
->key
[0] = 0xFEED0BADFEED0BADULL
;
327 rss
->key
[1] = 0xFEED0BADFEED0BADULL
;
328 rss
->key
[2] = 0xFEED0BADFEED0BADULL
;
329 rss
->key
[3] = 0xFEED0BADFEED0BADULL
;
330 rss
->key
[4] = 0xFEED0BADFEED0BADULL
;
332 nicvf_set_rss_key(nic
);
334 rss
->cfg
= RSS_IP_HASH_ENA
| RSS_TCP_HASH_ENA
| RSS_UDP_HASH_ENA
;
335 nicvf_reg_write(nic
, NIC_VNIC_RSS_CFG
, rss
->cfg
);
337 rss
->hash_bits
= ilog2(rounddown_pow_of_two(rss
->rss_size
));
339 for (idx
= 0; idx
< rss
->rss_size
; idx
++)
340 rss
->ind_tbl
[idx
] = ethtool_rxfh_indir_default(idx
,
342 nicvf_config_rss(nic
);
346 int nicvf_set_real_num_queues(struct net_device
*netdev
,
347 int tx_queues
, int rx_queues
)
351 err
= netif_set_real_num_tx_queues(netdev
, tx_queues
);
354 "Failed to set no of Tx queues: %d\n", tx_queues
);
358 err
= netif_set_real_num_rx_queues(netdev
, rx_queues
);
361 "Failed to set no of Rx queues: %d\n", rx_queues
);
365 static int nicvf_init_resources(struct nicvf
*nic
)
368 union nic_mbx mbx
= {};
370 mbx
.msg
.msg
= NIC_MBOX_MSG_CFG_DONE
;
373 nicvf_qset_config(nic
, true);
375 /* Initialize queues and HW for data transfer */
376 err
= nicvf_config_data_transfer(nic
, true);
378 netdev_err(nic
->netdev
,
379 "Failed to alloc/config VF's QSet resources\n");
383 /* Send VF config done msg to PF */
384 nicvf_write_to_mbx(nic
, &mbx
);
389 static void nicvf_snd_pkt_handler(struct net_device
*netdev
,
390 struct cmp_queue
*cq
,
391 struct cqe_send_t
*cqe_tx
, int cqe_type
)
393 struct sk_buff
*skb
= NULL
;
394 struct nicvf
*nic
= netdev_priv(netdev
);
395 struct snd_queue
*sq
;
396 struct sq_hdr_subdesc
*hdr
;
398 sq
= &nic
->qs
->sq
[cqe_tx
->sq_idx
];
400 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, cqe_tx
->sqe_ptr
);
401 if (hdr
->subdesc_type
!= SQ_DESC_TYPE_HEADER
)
404 netdev_dbg(nic
->netdev
,
405 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
406 __func__
, cqe_tx
->sq_qs
, cqe_tx
->sq_idx
,
407 cqe_tx
->sqe_ptr
, hdr
->subdesc_cnt
);
409 nicvf_put_sq_desc(sq
, hdr
->subdesc_cnt
+ 1);
410 nicvf_check_cqe_tx_errs(nic
, cq
, cqe_tx
);
411 skb
= (struct sk_buff
*)sq
->skbuff
[cqe_tx
->sqe_ptr
];
412 /* For TSO offloaded packets only one head SKB needs to be freed */
415 dev_consume_skb_any(skb
);
416 sq
->skbuff
[cqe_tx
->sqe_ptr
] = (u64
)NULL
;
420 static inline void nicvf_set_rxhash(struct net_device
*netdev
,
421 struct cqe_rx_t
*cqe_rx
,
427 if (!(netdev
->features
& NETIF_F_RXHASH
))
430 switch (cqe_rx
->rss_alg
) {
433 hash_type
= PKT_HASH_TYPE_L4
;
434 hash
= cqe_rx
->rss_tag
;
437 hash_type
= PKT_HASH_TYPE_L3
;
438 hash
= cqe_rx
->rss_tag
;
441 hash_type
= PKT_HASH_TYPE_NONE
;
445 skb_set_hash(skb
, hash
, hash_type
);
448 static void nicvf_rcv_pkt_handler(struct net_device
*netdev
,
449 struct napi_struct
*napi
,
450 struct cmp_queue
*cq
,
451 struct cqe_rx_t
*cqe_rx
, int cqe_type
)
454 struct nicvf
*nic
= netdev_priv(netdev
);
457 /* Check for errors */
458 err
= nicvf_check_cqe_rx_errs(nic
, cq
, cqe_rx
);
459 if (err
&& !cqe_rx
->rb_cnt
)
462 skb
= nicvf_get_rcv_skb(nic
, cqe_rx
);
464 netdev_dbg(nic
->netdev
, "Packet not received\n");
468 if (netif_msg_pktdata(nic
)) {
469 netdev_info(nic
->netdev
, "%s: skb 0x%p, len=%d\n", netdev
->name
,
471 print_hex_dump(KERN_INFO
, "", DUMP_PREFIX_OFFSET
, 16, 1,
472 skb
->data
, skb
->len
, true);
475 /* If error packet, drop it here */
477 dev_kfree_skb_any(skb
);
481 nicvf_set_rx_frame_cnt(nic
, skb
);
483 nicvf_set_rxhash(netdev
, cqe_rx
, skb
);
485 skb_record_rx_queue(skb
, cqe_rx
->rq_idx
);
486 if (netdev
->hw_features
& NETIF_F_RXCSUM
) {
487 /* HW by default verifies TCP/UDP/SCTP checksums */
488 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
490 skb_checksum_none_assert(skb
);
493 skb
->protocol
= eth_type_trans(skb
, netdev
);
495 /* Check for stripped VLAN */
496 if (cqe_rx
->vlan_found
&& cqe_rx
->vlan_stripped
)
497 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
498 ntohs((__force __be16
)cqe_rx
->vlan_tci
));
500 if (napi
&& (netdev
->features
& NETIF_F_GRO
))
501 napi_gro_receive(napi
, skb
);
503 netif_receive_skb(skb
);
506 static int nicvf_cq_intr_handler(struct net_device
*netdev
, u8 cq_idx
,
507 struct napi_struct
*napi
, int budget
)
509 int processed_cqe
, work_done
= 0, tx_done
= 0;
510 int cqe_count
, cqe_head
;
511 struct nicvf
*nic
= netdev_priv(netdev
);
512 struct queue_set
*qs
= nic
->qs
;
513 struct cmp_queue
*cq
= &qs
->cq
[cq_idx
];
514 struct cqe_rx_t
*cq_desc
;
515 struct netdev_queue
*txq
;
517 spin_lock_bh(&cq
->lock
);
520 /* Get no of valid CQ entries to process */
521 cqe_count
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS
, cq_idx
);
522 cqe_count
&= CQ_CQE_COUNT
;
526 /* Get head of the valid CQ entries */
527 cqe_head
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_HEAD
, cq_idx
) >> 9;
530 netdev_dbg(nic
->netdev
, "%s CQ%d cqe_count %d cqe_head %d\n",
531 __func__
, cq_idx
, cqe_count
, cqe_head
);
532 while (processed_cqe
< cqe_count
) {
533 /* Get the CQ descriptor */
534 cq_desc
= (struct cqe_rx_t
*)GET_CQ_DESC(cq
, cqe_head
);
536 cqe_head
&= (cq
->dmem
.q_len
- 1);
537 /* Initiate prefetch for next descriptor */
538 prefetch((struct cqe_rx_t
*)GET_CQ_DESC(cq
, cqe_head
));
540 if ((work_done
>= budget
) && napi
&&
541 (cq_desc
->cqe_type
!= CQE_TYPE_SEND
)) {
545 netdev_dbg(nic
->netdev
, "CQ%d cq_desc->cqe_type %d\n",
546 cq_idx
, cq_desc
->cqe_type
);
547 switch (cq_desc
->cqe_type
) {
549 nicvf_rcv_pkt_handler(netdev
, napi
, cq
,
550 cq_desc
, CQE_TYPE_RX
);
554 nicvf_snd_pkt_handler(netdev
, cq
,
555 (void *)cq_desc
, CQE_TYPE_SEND
);
558 case CQE_TYPE_INVALID
:
559 case CQE_TYPE_RX_SPLIT
:
560 case CQE_TYPE_RX_TCP
:
561 case CQE_TYPE_SEND_PTP
:
567 netdev_dbg(nic
->netdev
,
568 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
569 __func__
, cq_idx
, processed_cqe
, work_done
, budget
);
571 /* Ring doorbell to inform H/W to reuse processed CQEs */
572 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_DOOR
,
573 cq_idx
, processed_cqe
);
575 if ((work_done
< budget
) && napi
)
579 /* Wakeup TXQ if its stopped earlier due to SQ full */
581 txq
= netdev_get_tx_queue(netdev
, cq_idx
);
582 if (netif_tx_queue_stopped(txq
)) {
583 netif_tx_start_queue(txq
);
584 nic
->drv_stats
.txq_wake
++;
585 if (netif_msg_tx_err(nic
))
587 "%s: Transmit queue wakeup SQ%d\n",
588 netdev
->name
, cq_idx
);
592 spin_unlock_bh(&cq
->lock
);
596 static int nicvf_poll(struct napi_struct
*napi
, int budget
)
600 struct net_device
*netdev
= napi
->dev
;
601 struct nicvf
*nic
= netdev_priv(netdev
);
602 struct nicvf_cq_poll
*cq
;
604 cq
= container_of(napi
, struct nicvf_cq_poll
, napi
);
605 work_done
= nicvf_cq_intr_handler(netdev
, cq
->cq_idx
, napi
, budget
);
607 if (work_done
< budget
) {
608 /* Slow packet rate, exit polling */
610 /* Re-enable interrupts */
611 cq_head
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_HEAD
,
613 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, cq
->cq_idx
);
614 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_HEAD
,
615 cq
->cq_idx
, cq_head
);
616 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, cq
->cq_idx
);
621 /* Qset error interrupt handler
623 * As of now only CQ errors are handled
625 static void nicvf_handle_qs_err(unsigned long data
)
627 struct nicvf
*nic
= (struct nicvf
*)data
;
628 struct queue_set
*qs
= nic
->qs
;
632 netif_tx_disable(nic
->netdev
);
634 /* Check if it is CQ err */
635 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
636 status
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS
,
638 if (!(status
& CQ_ERR_MASK
))
640 /* Process already queued CQEs and reconfig CQ */
641 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
642 nicvf_sq_disable(nic
, qidx
);
643 nicvf_cq_intr_handler(nic
->netdev
, qidx
, NULL
, 0);
644 nicvf_cmp_queue_config(nic
, qs
, qidx
, true);
645 nicvf_sq_free_used_descs(nic
->netdev
, &qs
->sq
[qidx
], qidx
);
646 nicvf_sq_enable(nic
, &qs
->sq
[qidx
], qidx
);
648 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, qidx
);
651 netif_tx_start_all_queues(nic
->netdev
);
652 /* Re-enable Qset error interrupt */
653 nicvf_enable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
656 static void nicvf_dump_intr_status(struct nicvf
*nic
)
658 if (netif_msg_intr(nic
))
659 netdev_info(nic
->netdev
, "%s: interrupt status 0x%llx\n",
660 nic
->netdev
->name
, nicvf_reg_read(nic
, NIC_VF_INT
));
663 static irqreturn_t
nicvf_misc_intr_handler(int irq
, void *nicvf_irq
)
665 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
668 nicvf_dump_intr_status(nic
);
670 intr
= nicvf_reg_read(nic
, NIC_VF_INT
);
671 /* Check for spurious interrupt */
672 if (!(intr
& NICVF_INTR_MBOX_MASK
))
675 nicvf_handle_mbx_intr(nic
);
680 static irqreturn_t
nicvf_intr_handler(int irq
, void *cq_irq
)
682 struct nicvf_cq_poll
*cq_poll
= (struct nicvf_cq_poll
*)cq_irq
;
683 struct nicvf
*nic
= cq_poll
->nicvf
;
684 int qidx
= cq_poll
->cq_idx
;
686 nicvf_dump_intr_status(nic
);
688 /* Disable interrupts */
689 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
692 napi_schedule(&cq_poll
->napi
);
694 /* Clear interrupt */
695 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, qidx
);
700 static irqreturn_t
nicvf_rbdr_intr_handler(int irq
, void *nicvf_irq
)
702 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
706 nicvf_dump_intr_status(nic
);
708 /* Disable RBDR interrupt and schedule softirq */
709 for (qidx
= 0; qidx
< nic
->qs
->rbdr_cnt
; qidx
++) {
710 if (!nicvf_is_intr_enabled(nic
, NICVF_INTR_RBDR
, qidx
))
712 nicvf_disable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
713 tasklet_hi_schedule(&nic
->rbdr_task
);
714 /* Clear interrupt */
715 nicvf_clear_intr(nic
, NICVF_INTR_RBDR
, qidx
);
721 static irqreturn_t
nicvf_qs_err_intr_handler(int irq
, void *nicvf_irq
)
723 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
725 nicvf_dump_intr_status(nic
);
727 /* Disable Qset err interrupt and schedule softirq */
728 nicvf_disable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
729 tasklet_hi_schedule(&nic
->qs_err_task
);
730 nicvf_clear_intr(nic
, NICVF_INTR_QS_ERR
, 0);
735 static int nicvf_enable_msix(struct nicvf
*nic
)
739 nic
->num_vec
= NIC_VF_MSIX_VECTORS
;
741 for (vec
= 0; vec
< nic
->num_vec
; vec
++)
742 nic
->msix_entries
[vec
].entry
= vec
;
744 ret
= pci_enable_msix(nic
->pdev
, nic
->msix_entries
, nic
->num_vec
);
746 netdev_err(nic
->netdev
,
747 "Req for #%d msix vectors failed\n", nic
->num_vec
);
750 nic
->msix_enabled
= 1;
754 static void nicvf_disable_msix(struct nicvf
*nic
)
756 if (nic
->msix_enabled
) {
757 pci_disable_msix(nic
->pdev
);
758 nic
->msix_enabled
= 0;
763 static int nicvf_register_interrupts(struct nicvf
*nic
)
769 sprintf(nic
->irq_name
[irq
], "NICVF%d CQ%d",
773 sprintf(nic
->irq_name
[irq
], "NICVF%d SQ%d",
774 nic
->vf_id
, irq
- NICVF_INTR_ID_SQ
);
776 for_each_rbdr_irq(irq
)
777 sprintf(nic
->irq_name
[irq
], "NICVF%d RBDR%d",
778 nic
->vf_id
, irq
- NICVF_INTR_ID_RBDR
);
780 /* Register CQ interrupts */
781 for (irq
= 0; irq
< nic
->qs
->cq_cnt
; irq
++) {
782 vector
= nic
->msix_entries
[irq
].vector
;
783 ret
= request_irq(vector
, nicvf_intr_handler
,
784 0, nic
->irq_name
[irq
], nic
->napi
[irq
]);
787 nic
->irq_allocated
[irq
] = true;
790 /* Register RBDR interrupt */
791 for (irq
= NICVF_INTR_ID_RBDR
;
792 irq
< (NICVF_INTR_ID_RBDR
+ nic
->qs
->rbdr_cnt
); irq
++) {
793 vector
= nic
->msix_entries
[irq
].vector
;
794 ret
= request_irq(vector
, nicvf_rbdr_intr_handler
,
795 0, nic
->irq_name
[irq
], nic
);
798 nic
->irq_allocated
[irq
] = true;
801 /* Register QS error interrupt */
802 sprintf(nic
->irq_name
[NICVF_INTR_ID_QS_ERR
],
803 "NICVF%d Qset error", nic
->vf_id
);
804 irq
= NICVF_INTR_ID_QS_ERR
;
805 ret
= request_irq(nic
->msix_entries
[irq
].vector
,
806 nicvf_qs_err_intr_handler
,
807 0, nic
->irq_name
[irq
], nic
);
809 nic
->irq_allocated
[irq
] = true;
813 netdev_err(nic
->netdev
, "request_irq failed, vector %d\n", irq
);
818 static void nicvf_unregister_interrupts(struct nicvf
*nic
)
822 /* Free registered interrupts */
823 for (irq
= 0; irq
< nic
->num_vec
; irq
++) {
824 if (!nic
->irq_allocated
[irq
])
827 if (irq
< NICVF_INTR_ID_SQ
)
828 free_irq(nic
->msix_entries
[irq
].vector
, nic
->napi
[irq
]);
830 free_irq(nic
->msix_entries
[irq
].vector
, nic
);
832 nic
->irq_allocated
[irq
] = false;
836 nicvf_disable_msix(nic
);
839 /* Initialize MSIX vectors and register MISC interrupt.
840 * Send READY message to PF to check if its alive
842 static int nicvf_register_misc_interrupt(struct nicvf
*nic
)
845 int irq
= NICVF_INTR_ID_MISC
;
847 /* Return if mailbox interrupt is already registered */
848 if (nic
->msix_enabled
)
852 if (!nicvf_enable_msix(nic
))
855 sprintf(nic
->irq_name
[irq
], "%s Mbox", "NICVF");
856 /* Register Misc interrupt */
857 ret
= request_irq(nic
->msix_entries
[irq
].vector
,
858 nicvf_misc_intr_handler
, 0, nic
->irq_name
[irq
], nic
);
862 nic
->irq_allocated
[irq
] = true;
864 /* Enable mailbox interrupt */
865 nicvf_enable_intr(nic
, NICVF_INTR_MBOX
, 0);
867 /* Check if VF is able to communicate with PF */
868 if (!nicvf_check_pf_ready(nic
)) {
869 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
870 nicvf_unregister_interrupts(nic
);
877 static netdev_tx_t
nicvf_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
879 struct nicvf
*nic
= netdev_priv(netdev
);
880 int qid
= skb_get_queue_mapping(skb
);
881 struct netdev_queue
*txq
= netdev_get_tx_queue(netdev
, qid
);
883 /* Check for minimum packet length */
884 if (skb
->len
<= ETH_HLEN
) {
889 if (!netif_tx_queue_stopped(txq
) && !nicvf_sq_append_skb(nic
, skb
)) {
890 netif_tx_stop_queue(txq
);
891 nic
->drv_stats
.txq_stop
++;
892 if (netif_msg_tx_err(nic
))
894 "%s: Transmit ring full, stopping SQ%d\n",
897 return NETDEV_TX_BUSY
;
903 static inline void nicvf_free_cq_poll(struct nicvf
*nic
)
905 struct nicvf_cq_poll
*cq_poll
;
908 for (qidx
= 0; qidx
< nic
->qs
->cq_cnt
; qidx
++) {
909 cq_poll
= nic
->napi
[qidx
];
912 nic
->napi
[qidx
] = NULL
;
917 int nicvf_stop(struct net_device
*netdev
)
920 struct nicvf
*nic
= netdev_priv(netdev
);
921 struct queue_set
*qs
= nic
->qs
;
922 struct nicvf_cq_poll
*cq_poll
= NULL
;
923 union nic_mbx mbx
= {};
925 mbx
.msg
.msg
= NIC_MBOX_MSG_SHUTDOWN
;
926 nicvf_send_msg_to_pf(nic
, &mbx
);
928 netif_carrier_off(netdev
);
930 /* Disable RBDR & QS error interrupts */
931 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++) {
932 nicvf_disable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
933 nicvf_clear_intr(nic
, NICVF_INTR_RBDR
, qidx
);
935 nicvf_disable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
936 nicvf_clear_intr(nic
, NICVF_INTR_QS_ERR
, 0);
938 /* Wait for pending IRQ handlers to finish */
939 for (irq
= 0; irq
< nic
->num_vec
; irq
++)
940 synchronize_irq(nic
->msix_entries
[irq
].vector
);
942 tasklet_kill(&nic
->rbdr_task
);
943 tasklet_kill(&nic
->qs_err_task
);
944 if (nic
->rb_work_scheduled
)
945 cancel_delayed_work_sync(&nic
->rbdr_work
);
947 for (qidx
= 0; qidx
< nic
->qs
->cq_cnt
; qidx
++) {
948 cq_poll
= nic
->napi
[qidx
];
951 napi_synchronize(&cq_poll
->napi
);
952 /* CQ intr is enabled while napi_complete,
955 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
956 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, qidx
);
957 napi_disable(&cq_poll
->napi
);
958 netif_napi_del(&cq_poll
->napi
);
961 netif_tx_disable(netdev
);
964 nicvf_config_data_transfer(nic
, false);
966 /* Disable HW Qset */
967 nicvf_qset_config(nic
, false);
969 /* disable mailbox interrupt */
970 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
972 nicvf_unregister_interrupts(nic
);
974 nicvf_free_cq_poll(nic
);
979 int nicvf_open(struct net_device
*netdev
)
982 struct nicvf
*nic
= netdev_priv(netdev
);
983 struct queue_set
*qs
= nic
->qs
;
984 struct nicvf_cq_poll
*cq_poll
= NULL
;
986 nic
->mtu
= netdev
->mtu
;
988 netif_carrier_off(netdev
);
990 err
= nicvf_register_misc_interrupt(nic
);
994 /* Register NAPI handler for processing CQEs */
995 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
996 cq_poll
= kzalloc(sizeof(*cq_poll
), GFP_KERNEL
);
1001 cq_poll
->cq_idx
= qidx
;
1002 cq_poll
->nicvf
= nic
;
1003 netif_napi_add(netdev
, &cq_poll
->napi
, nicvf_poll
,
1005 napi_enable(&cq_poll
->napi
);
1006 nic
->napi
[qidx
] = cq_poll
;
1009 /* Check if we got MAC address from PF or else generate a radom MAC */
1010 if (is_zero_ether_addr(netdev
->dev_addr
)) {
1011 eth_hw_addr_random(netdev
);
1012 nicvf_hw_set_mac_addr(nic
, netdev
);
1015 if (nic
->set_mac_pending
) {
1016 nic
->set_mac_pending
= false;
1017 nicvf_hw_set_mac_addr(nic
, netdev
);
1020 /* Init tasklet for handling Qset err interrupt */
1021 tasklet_init(&nic
->qs_err_task
, nicvf_handle_qs_err
,
1022 (unsigned long)nic
);
1024 /* Init RBDR tasklet which will refill RBDR */
1025 tasklet_init(&nic
->rbdr_task
, nicvf_rbdr_task
,
1026 (unsigned long)nic
);
1027 INIT_DELAYED_WORK(&nic
->rbdr_work
, nicvf_rbdr_work
);
1029 /* Configure CPI alorithm */
1030 nic
->cpi_alg
= cpi_alg
;
1031 nicvf_config_cpi(nic
);
1033 /* Configure receive side scaling */
1034 nicvf_rss_init(nic
);
1036 err
= nicvf_register_interrupts(nic
);
1040 /* Initialize the queues */
1041 err
= nicvf_init_resources(nic
);
1045 /* Make sure queue initialization is written */
1048 nicvf_reg_write(nic
, NIC_VF_INT
, -1);
1049 /* Enable Qset err interrupt */
1050 nicvf_enable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
1052 /* Enable completion queue interrupt */
1053 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
1054 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, qidx
);
1056 /* Enable RBDR threshold interrupt */
1057 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
1058 nicvf_enable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
1060 nic
->drv_stats
.txq_stop
= 0;
1061 nic
->drv_stats
.txq_wake
= 0;
1063 netif_carrier_on(netdev
);
1064 netif_tx_start_all_queues(netdev
);
1068 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
1069 nicvf_unregister_interrupts(nic
);
1070 tasklet_kill(&nic
->qs_err_task
);
1071 tasklet_kill(&nic
->rbdr_task
);
1073 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
1074 cq_poll
= nic
->napi
[qidx
];
1077 napi_disable(&cq_poll
->napi
);
1078 netif_napi_del(&cq_poll
->napi
);
1080 nicvf_free_cq_poll(nic
);
1084 static int nicvf_update_hw_max_frs(struct nicvf
*nic
, int mtu
)
1086 union nic_mbx mbx
= {};
1088 mbx
.frs
.msg
= NIC_MBOX_MSG_SET_MAX_FRS
;
1089 mbx
.frs
.max_frs
= mtu
;
1090 mbx
.frs
.vf_id
= nic
->vf_id
;
1092 return nicvf_send_msg_to_pf(nic
, &mbx
);
1095 static int nicvf_change_mtu(struct net_device
*netdev
, int new_mtu
)
1097 struct nicvf
*nic
= netdev_priv(netdev
);
1099 if (new_mtu
> NIC_HW_MAX_FRS
)
1102 if (new_mtu
< NIC_HW_MIN_FRS
)
1105 if (nicvf_update_hw_max_frs(nic
, new_mtu
))
1107 netdev
->mtu
= new_mtu
;
1113 static int nicvf_set_mac_address(struct net_device
*netdev
, void *p
)
1115 struct sockaddr
*addr
= p
;
1116 struct nicvf
*nic
= netdev_priv(netdev
);
1118 if (!is_valid_ether_addr(addr
->sa_data
))
1119 return -EADDRNOTAVAIL
;
1121 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1123 if (nic
->msix_enabled
) {
1124 if (nicvf_hw_set_mac_addr(nic
, netdev
))
1127 nic
->set_mac_pending
= true;
1133 void nicvf_update_lmac_stats(struct nicvf
*nic
)
1136 union nic_mbx mbx
= {};
1138 if (!netif_running(nic
->netdev
))
1141 mbx
.bgx_stats
.msg
= NIC_MBOX_MSG_BGX_STATS
;
1142 mbx
.bgx_stats
.vf_id
= nic
->vf_id
;
1144 mbx
.bgx_stats
.rx
= 1;
1145 while (stat
< BGX_RX_STATS_COUNT
) {
1146 mbx
.bgx_stats
.idx
= stat
;
1147 if (nicvf_send_msg_to_pf(nic
, &mbx
))
1155 mbx
.bgx_stats
.rx
= 0;
1156 while (stat
< BGX_TX_STATS_COUNT
) {
1157 mbx
.bgx_stats
.idx
= stat
;
1158 if (nicvf_send_msg_to_pf(nic
, &mbx
))
1164 void nicvf_update_stats(struct nicvf
*nic
)
1167 struct nicvf_hw_stats
*stats
= &nic
->hw_stats
;
1168 struct nicvf_drv_stats
*drv_stats
= &nic
->drv_stats
;
1169 struct queue_set
*qs
= nic
->qs
;
1171 #define GET_RX_STATS(reg) \
1172 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1173 #define GET_TX_STATS(reg) \
1174 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1176 stats
->rx_bytes
= GET_RX_STATS(RX_OCTS
);
1177 stats
->rx_ucast_frames
= GET_RX_STATS(RX_UCAST
);
1178 stats
->rx_bcast_frames
= GET_RX_STATS(RX_BCAST
);
1179 stats
->rx_mcast_frames
= GET_RX_STATS(RX_MCAST
);
1180 stats
->rx_fcs_errors
= GET_RX_STATS(RX_FCS
);
1181 stats
->rx_l2_errors
= GET_RX_STATS(RX_L2ERR
);
1182 stats
->rx_drop_red
= GET_RX_STATS(RX_RED
);
1183 stats
->rx_drop_red_bytes
= GET_RX_STATS(RX_RED_OCTS
);
1184 stats
->rx_drop_overrun
= GET_RX_STATS(RX_ORUN
);
1185 stats
->rx_drop_overrun_bytes
= GET_RX_STATS(RX_ORUN_OCTS
);
1186 stats
->rx_drop_bcast
= GET_RX_STATS(RX_DRP_BCAST
);
1187 stats
->rx_drop_mcast
= GET_RX_STATS(RX_DRP_MCAST
);
1188 stats
->rx_drop_l3_bcast
= GET_RX_STATS(RX_DRP_L3BCAST
);
1189 stats
->rx_drop_l3_mcast
= GET_RX_STATS(RX_DRP_L3MCAST
);
1191 stats
->tx_bytes_ok
= GET_TX_STATS(TX_OCTS
);
1192 stats
->tx_ucast_frames_ok
= GET_TX_STATS(TX_UCAST
);
1193 stats
->tx_bcast_frames_ok
= GET_TX_STATS(TX_BCAST
);
1194 stats
->tx_mcast_frames_ok
= GET_TX_STATS(TX_MCAST
);
1195 stats
->tx_drops
= GET_TX_STATS(TX_DROP
);
1197 drv_stats
->tx_frames_ok
= stats
->tx_ucast_frames_ok
+
1198 stats
->tx_bcast_frames_ok
+
1199 stats
->tx_mcast_frames_ok
;
1200 drv_stats
->rx_drops
= stats
->rx_drop_red
+
1201 stats
->rx_drop_overrun
;
1202 drv_stats
->tx_drops
= stats
->tx_drops
;
1204 /* Update RQ and SQ stats */
1205 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
1206 nicvf_update_rq_stats(nic
, qidx
);
1207 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
1208 nicvf_update_sq_stats(nic
, qidx
);
1211 static struct rtnl_link_stats64
*nicvf_get_stats64(struct net_device
*netdev
,
1212 struct rtnl_link_stats64
*stats
)
1214 struct nicvf
*nic
= netdev_priv(netdev
);
1215 struct nicvf_hw_stats
*hw_stats
= &nic
->hw_stats
;
1216 struct nicvf_drv_stats
*drv_stats
= &nic
->drv_stats
;
1218 nicvf_update_stats(nic
);
1220 stats
->rx_bytes
= hw_stats
->rx_bytes
;
1221 stats
->rx_packets
= drv_stats
->rx_frames_ok
;
1222 stats
->rx_dropped
= drv_stats
->rx_drops
;
1223 stats
->multicast
= hw_stats
->rx_mcast_frames
;
1225 stats
->tx_bytes
= hw_stats
->tx_bytes_ok
;
1226 stats
->tx_packets
= drv_stats
->tx_frames_ok
;
1227 stats
->tx_dropped
= drv_stats
->tx_drops
;
1232 static void nicvf_tx_timeout(struct net_device
*dev
)
1234 struct nicvf
*nic
= netdev_priv(dev
);
1236 if (netif_msg_tx_err(nic
))
1237 netdev_warn(dev
, "%s: Transmit timed out, resetting\n",
1240 schedule_work(&nic
->reset_task
);
1243 static void nicvf_reset_task(struct work_struct
*work
)
1247 nic
= container_of(work
, struct nicvf
, reset_task
);
1249 if (!netif_running(nic
->netdev
))
1252 nicvf_stop(nic
->netdev
);
1253 nicvf_open(nic
->netdev
);
1254 nic
->netdev
->trans_start
= jiffies
;
1257 static int nicvf_set_features(struct net_device
*netdev
,
1258 netdev_features_t features
)
1260 struct nicvf
*nic
= netdev_priv(netdev
);
1261 netdev_features_t changed
= features
^ netdev
->features
;
1263 if (changed
& NETIF_F_HW_VLAN_CTAG_RX
)
1264 nicvf_config_vlan_stripping(nic
, features
);
1269 static const struct net_device_ops nicvf_netdev_ops
= {
1270 .ndo_open
= nicvf_open
,
1271 .ndo_stop
= nicvf_stop
,
1272 .ndo_start_xmit
= nicvf_xmit
,
1273 .ndo_change_mtu
= nicvf_change_mtu
,
1274 .ndo_set_mac_address
= nicvf_set_mac_address
,
1275 .ndo_get_stats64
= nicvf_get_stats64
,
1276 .ndo_tx_timeout
= nicvf_tx_timeout
,
1277 .ndo_set_features
= nicvf_set_features
,
1280 static int nicvf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1282 struct device
*dev
= &pdev
->dev
;
1283 struct net_device
*netdev
;
1285 struct queue_set
*qs
;
1288 err
= pci_enable_device(pdev
);
1290 dev_err(dev
, "Failed to enable PCI device\n");
1294 err
= pci_request_regions(pdev
, DRV_NAME
);
1296 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
1297 goto err_disable_device
;
1300 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(48));
1302 dev_err(dev
, "Unable to get usable DMA configuration\n");
1303 goto err_release_regions
;
1306 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(48));
1308 dev_err(dev
, "unable to get 48-bit DMA for consistent allocations\n");
1309 goto err_release_regions
;
1312 netdev
= alloc_etherdev_mqs(sizeof(struct nicvf
),
1313 MAX_RCV_QUEUES_PER_QS
,
1314 MAX_SND_QUEUES_PER_QS
);
1317 goto err_release_regions
;
1320 pci_set_drvdata(pdev
, netdev
);
1322 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1324 nic
= netdev_priv(netdev
);
1325 nic
->netdev
= netdev
;
1328 /* MAP VF's configuration registers */
1329 nic
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
1330 if (!nic
->reg_base
) {
1331 dev_err(dev
, "Cannot map config register space, aborting\n");
1333 goto err_free_netdev
;
1336 err
= nicvf_set_qset_resources(nic
);
1338 goto err_free_netdev
;
1342 err
= nicvf_set_real_num_queues(netdev
, qs
->sq_cnt
, qs
->rq_cnt
);
1344 goto err_free_netdev
;
1346 /* Check if PF is alive and get MAC address for this VF */
1347 err
= nicvf_register_misc_interrupt(nic
);
1349 goto err_free_netdev
;
1351 netdev
->hw_features
= (NETIF_F_RXCSUM
| NETIF_F_IP_CSUM
| NETIF_F_SG
|
1352 NETIF_F_TSO
| NETIF_F_GRO
|
1353 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_RXHASH
);
1355 netdev
->features
|= netdev
->hw_features
;
1357 netdev
->vlan_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
;
1359 netdev
->netdev_ops
= &nicvf_netdev_ops
;
1360 netdev
->watchdog_timeo
= NICVF_TX_TIMEOUT
;
1362 INIT_WORK(&nic
->reset_task
, nicvf_reset_task
);
1364 err
= register_netdev(netdev
);
1366 dev_err(dev
, "Failed to register netdevice\n");
1367 goto err_unregister_interrupts
;
1370 nic
->msg_enable
= debug
;
1372 nicvf_set_ethtool_ops(netdev
);
1376 err_unregister_interrupts
:
1377 nicvf_unregister_interrupts(nic
);
1379 pci_set_drvdata(pdev
, NULL
);
1380 free_netdev(netdev
);
1381 err_release_regions
:
1382 pci_release_regions(pdev
);
1384 pci_disable_device(pdev
);
1388 static void nicvf_remove(struct pci_dev
*pdev
)
1390 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1391 struct nicvf
*nic
= netdev_priv(netdev
);
1393 unregister_netdev(netdev
);
1394 nicvf_unregister_interrupts(nic
);
1395 pci_set_drvdata(pdev
, NULL
);
1396 free_netdev(netdev
);
1397 pci_release_regions(pdev
);
1398 pci_disable_device(pdev
);
1401 static void nicvf_shutdown(struct pci_dev
*pdev
)
1406 static struct pci_driver nicvf_driver
= {
1408 .id_table
= nicvf_id_table
,
1409 .probe
= nicvf_probe
,
1410 .remove
= nicvf_remove
,
1411 .shutdown
= nicvf_shutdown
,
1414 static int __init
nicvf_init_module(void)
1416 pr_info("%s, ver %s\n", DRV_NAME
, DRV_VERSION
);
1418 return pci_register_driver(&nicvf_driver
);
1421 static void __exit
nicvf_cleanup_module(void)
1423 pci_unregister_driver(&nicvf_driver
);
1426 module_init(nicvf_init_module
);
1427 module_exit(nicvf_cleanup_module
);