1 /*******************************************************************************
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *******************************************************************************/
27 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/init.h>
32 #include <linux/pci.h>
33 #include <linux/vmalloc.h>
34 #include <linux/pagemap.h>
35 #include <linux/delay.h>
36 #include <linux/netdevice.h>
37 #include <linux/tcp.h>
38 #include <linux/ipv6.h>
39 #include <linux/slab.h>
40 #include <net/checksum.h>
41 #include <net/ip6_checksum.h>
42 #include <linux/mii.h>
43 #include <linux/ethtool.h>
44 #include <linux/if_vlan.h>
45 #include <linux/prefetch.h>
49 #define DRV_VERSION "2.0.2-k"
50 char igbvf_driver_name
[] = "igbvf";
51 const char igbvf_driver_version
[] = DRV_VERSION
;
52 static const char igbvf_driver_string
[] =
53 "Intel(R) Gigabit Virtual Function Network Driver";
54 static const char igbvf_copyright
[] =
55 "Copyright (c) 2009 - 2012 Intel Corporation.";
57 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
58 static int debug
= -1;
59 module_param(debug
, int, 0);
60 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
62 static int igbvf_poll(struct napi_struct
*napi
, int budget
);
63 static void igbvf_reset(struct igbvf_adapter
*);
64 static void igbvf_set_interrupt_capability(struct igbvf_adapter
*);
65 static void igbvf_reset_interrupt_capability(struct igbvf_adapter
*);
67 static struct igbvf_info igbvf_vf_info
= {
71 .init_ops
= e1000_init_function_pointers_vf
,
74 static struct igbvf_info igbvf_i350_vf_info
= {
75 .mac
= e1000_vfadapt_i350
,
78 .init_ops
= e1000_init_function_pointers_vf
,
81 static const struct igbvf_info
*igbvf_info_tbl
[] = {
82 [board_vf
] = &igbvf_vf_info
,
83 [board_i350_vf
] = &igbvf_i350_vf_info
,
87 * igbvf_desc_unused - calculate if we have unused descriptors
88 * @rx_ring: address of receive ring structure
90 static int igbvf_desc_unused(struct igbvf_ring
*ring
)
92 if (ring
->next_to_clean
> ring
->next_to_use
)
93 return ring
->next_to_clean
- ring
->next_to_use
- 1;
95 return ring
->count
+ ring
->next_to_clean
- ring
->next_to_use
- 1;
99 * igbvf_receive_skb - helper function to handle Rx indications
100 * @adapter: board private structure
101 * @status: descriptor status field as written by hardware
102 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
103 * @skb: pointer to sk_buff to be indicated to stack
105 static void igbvf_receive_skb(struct igbvf_adapter
*adapter
,
106 struct net_device
*netdev
,
108 u32 status
, u16 vlan
)
112 if (status
& E1000_RXD_STAT_VP
) {
113 if ((adapter
->flags
& IGBVF_FLAG_RX_LB_VLAN_BSWAP
) &&
114 (status
& E1000_RXDEXT_STATERR_LB
))
115 vid
= be16_to_cpu(vlan
) & E1000_RXD_SPC_VLAN_MASK
;
117 vid
= le16_to_cpu(vlan
) & E1000_RXD_SPC_VLAN_MASK
;
118 if (test_bit(vid
, adapter
->active_vlans
))
119 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
122 napi_gro_receive(&adapter
->rx_ring
->napi
, skb
);
125 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter
*adapter
,
126 u32 status_err
, struct sk_buff
*skb
)
128 skb_checksum_none_assert(skb
);
130 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
131 if ((status_err
& E1000_RXD_STAT_IXSM
) ||
132 (adapter
->flags
& IGBVF_FLAG_RX_CSUM_DISABLED
))
135 /* TCP/UDP checksum error bit is set */
137 (E1000_RXDEXT_STATERR_TCPE
| E1000_RXDEXT_STATERR_IPE
)) {
138 /* let the stack verify checksum errors */
139 adapter
->hw_csum_err
++;
143 /* It must be a TCP or UDP packet with a valid checksum */
144 if (status_err
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
))
145 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
147 adapter
->hw_csum_good
++;
151 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
152 * @rx_ring: address of ring structure to repopulate
153 * @cleaned_count: number of buffers to repopulate
155 static void igbvf_alloc_rx_buffers(struct igbvf_ring
*rx_ring
,
158 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
159 struct net_device
*netdev
= adapter
->netdev
;
160 struct pci_dev
*pdev
= adapter
->pdev
;
161 union e1000_adv_rx_desc
*rx_desc
;
162 struct igbvf_buffer
*buffer_info
;
167 i
= rx_ring
->next_to_use
;
168 buffer_info
= &rx_ring
->buffer_info
[i
];
170 if (adapter
->rx_ps_hdr_size
)
171 bufsz
= adapter
->rx_ps_hdr_size
;
173 bufsz
= adapter
->rx_buffer_len
;
175 while (cleaned_count
--) {
176 rx_desc
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
178 if (adapter
->rx_ps_hdr_size
&& !buffer_info
->page_dma
) {
179 if (!buffer_info
->page
) {
180 buffer_info
->page
= alloc_page(GFP_ATOMIC
);
181 if (!buffer_info
->page
) {
182 adapter
->alloc_rx_buff_failed
++;
185 buffer_info
->page_offset
= 0;
187 buffer_info
->page_offset
^= PAGE_SIZE
/ 2;
189 buffer_info
->page_dma
=
190 dma_map_page(&pdev
->dev
, buffer_info
->page
,
191 buffer_info
->page_offset
,
194 if (dma_mapping_error(&pdev
->dev
,
195 buffer_info
->page_dma
)) {
196 __free_page(buffer_info
->page
);
197 buffer_info
->page
= NULL
;
198 dev_err(&pdev
->dev
, "RX DMA map failed\n");
203 if (!buffer_info
->skb
) {
204 skb
= netdev_alloc_skb_ip_align(netdev
, bufsz
);
206 adapter
->alloc_rx_buff_failed
++;
210 buffer_info
->skb
= skb
;
211 buffer_info
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
214 if (dma_mapping_error(&pdev
->dev
, buffer_info
->dma
)) {
215 dev_kfree_skb(buffer_info
->skb
);
216 buffer_info
->skb
= NULL
;
217 dev_err(&pdev
->dev
, "RX DMA map failed\n");
221 /* Refresh the desc even if buffer_addrs didn't change because
222 * each write-back erases this info.
224 if (adapter
->rx_ps_hdr_size
) {
225 rx_desc
->read
.pkt_addr
=
226 cpu_to_le64(buffer_info
->page_dma
);
227 rx_desc
->read
.hdr_addr
= cpu_to_le64(buffer_info
->dma
);
229 rx_desc
->read
.pkt_addr
= cpu_to_le64(buffer_info
->dma
);
230 rx_desc
->read
.hdr_addr
= 0;
234 if (i
== rx_ring
->count
)
236 buffer_info
= &rx_ring
->buffer_info
[i
];
240 if (rx_ring
->next_to_use
!= i
) {
241 rx_ring
->next_to_use
= i
;
243 i
= (rx_ring
->count
- 1);
247 /* Force memory writes to complete before letting h/w
248 * know there are new descriptors to fetch. (Only
249 * applicable for weak-ordered memory model archs,
253 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
258 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
259 * @adapter: board private structure
261 * the return value indicates whether actual cleaning was done, there
262 * is no guarantee that everything was cleaned
264 static bool igbvf_clean_rx_irq(struct igbvf_adapter
*adapter
,
265 int *work_done
, int work_to_do
)
267 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
268 struct net_device
*netdev
= adapter
->netdev
;
269 struct pci_dev
*pdev
= adapter
->pdev
;
270 union e1000_adv_rx_desc
*rx_desc
, *next_rxd
;
271 struct igbvf_buffer
*buffer_info
, *next_buffer
;
273 bool cleaned
= false;
274 int cleaned_count
= 0;
275 unsigned int total_bytes
= 0, total_packets
= 0;
277 u32 length
, hlen
, staterr
;
279 i
= rx_ring
->next_to_clean
;
280 rx_desc
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
281 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
283 while (staterr
& E1000_RXD_STAT_DD
) {
284 if (*work_done
>= work_to_do
)
287 rmb(); /* read descriptor and rx_buffer_info after status DD */
289 buffer_info
= &rx_ring
->buffer_info
[i
];
291 /* HW will not DMA in data larger than the given buffer, even
292 * if it parses the (NFS, of course) header to be larger. In
293 * that case, it fills the header buffer and spills the rest
296 hlen
= (le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
)
297 & E1000_RXDADV_HDRBUFLEN_MASK
) >>
298 E1000_RXDADV_HDRBUFLEN_SHIFT
;
299 if (hlen
> adapter
->rx_ps_hdr_size
)
300 hlen
= adapter
->rx_ps_hdr_size
;
302 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
306 skb
= buffer_info
->skb
;
307 prefetch(skb
->data
- NET_IP_ALIGN
);
308 buffer_info
->skb
= NULL
;
309 if (!adapter
->rx_ps_hdr_size
) {
310 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
311 adapter
->rx_buffer_len
,
313 buffer_info
->dma
= 0;
314 skb_put(skb
, length
);
318 if (!skb_shinfo(skb
)->nr_frags
) {
319 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
320 adapter
->rx_ps_hdr_size
,
326 dma_unmap_page(&pdev
->dev
, buffer_info
->page_dma
,
329 buffer_info
->page_dma
= 0;
331 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
333 buffer_info
->page_offset
,
336 if ((adapter
->rx_buffer_len
> (PAGE_SIZE
/ 2)) ||
337 (page_count(buffer_info
->page
) != 1))
338 buffer_info
->page
= NULL
;
340 get_page(buffer_info
->page
);
343 skb
->data_len
+= length
;
344 skb
->truesize
+= PAGE_SIZE
/ 2;
348 if (i
== rx_ring
->count
)
350 next_rxd
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
352 next_buffer
= &rx_ring
->buffer_info
[i
];
354 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
355 buffer_info
->skb
= next_buffer
->skb
;
356 buffer_info
->dma
= next_buffer
->dma
;
357 next_buffer
->skb
= skb
;
358 next_buffer
->dma
= 0;
362 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
363 dev_kfree_skb_irq(skb
);
367 total_bytes
+= skb
->len
;
370 igbvf_rx_checksum_adv(adapter
, staterr
, skb
);
372 skb
->protocol
= eth_type_trans(skb
, netdev
);
374 igbvf_receive_skb(adapter
, netdev
, skb
, staterr
,
375 rx_desc
->wb
.upper
.vlan
);
378 rx_desc
->wb
.upper
.status_error
= 0;
380 /* return some buffers to hardware, one at a time is too slow */
381 if (cleaned_count
>= IGBVF_RX_BUFFER_WRITE
) {
382 igbvf_alloc_rx_buffers(rx_ring
, cleaned_count
);
386 /* use prefetched values */
388 buffer_info
= next_buffer
;
390 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
393 rx_ring
->next_to_clean
= i
;
394 cleaned_count
= igbvf_desc_unused(rx_ring
);
397 igbvf_alloc_rx_buffers(rx_ring
, cleaned_count
);
399 adapter
->total_rx_packets
+= total_packets
;
400 adapter
->total_rx_bytes
+= total_bytes
;
401 adapter
->net_stats
.rx_bytes
+= total_bytes
;
402 adapter
->net_stats
.rx_packets
+= total_packets
;
406 static void igbvf_put_txbuf(struct igbvf_adapter
*adapter
,
407 struct igbvf_buffer
*buffer_info
)
409 if (buffer_info
->dma
) {
410 if (buffer_info
->mapped_as_page
)
411 dma_unmap_page(&adapter
->pdev
->dev
,
416 dma_unmap_single(&adapter
->pdev
->dev
,
420 buffer_info
->dma
= 0;
422 if (buffer_info
->skb
) {
423 dev_kfree_skb_any(buffer_info
->skb
);
424 buffer_info
->skb
= NULL
;
426 buffer_info
->time_stamp
= 0;
430 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
431 * @adapter: board private structure
433 * Return 0 on success, negative on failure
435 int igbvf_setup_tx_resources(struct igbvf_adapter
*adapter
,
436 struct igbvf_ring
*tx_ring
)
438 struct pci_dev
*pdev
= adapter
->pdev
;
441 size
= sizeof(struct igbvf_buffer
) * tx_ring
->count
;
442 tx_ring
->buffer_info
= vzalloc(size
);
443 if (!tx_ring
->buffer_info
)
446 /* round up to nearest 4K */
447 tx_ring
->size
= tx_ring
->count
* sizeof(union e1000_adv_tx_desc
);
448 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
450 tx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, tx_ring
->size
,
451 &tx_ring
->dma
, GFP_KERNEL
);
455 tx_ring
->adapter
= adapter
;
456 tx_ring
->next_to_use
= 0;
457 tx_ring
->next_to_clean
= 0;
461 vfree(tx_ring
->buffer_info
);
462 dev_err(&adapter
->pdev
->dev
,
463 "Unable to allocate memory for the transmit descriptor ring\n");
468 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
469 * @adapter: board private structure
471 * Returns 0 on success, negative on failure
473 int igbvf_setup_rx_resources(struct igbvf_adapter
*adapter
,
474 struct igbvf_ring
*rx_ring
)
476 struct pci_dev
*pdev
= adapter
->pdev
;
479 size
= sizeof(struct igbvf_buffer
) * rx_ring
->count
;
480 rx_ring
->buffer_info
= vzalloc(size
);
481 if (!rx_ring
->buffer_info
)
484 desc_len
= sizeof(union e1000_adv_rx_desc
);
486 /* Round up to nearest 4K */
487 rx_ring
->size
= rx_ring
->count
* desc_len
;
488 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
490 rx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, rx_ring
->size
,
491 &rx_ring
->dma
, GFP_KERNEL
);
495 rx_ring
->next_to_clean
= 0;
496 rx_ring
->next_to_use
= 0;
498 rx_ring
->adapter
= adapter
;
503 vfree(rx_ring
->buffer_info
);
504 rx_ring
->buffer_info
= NULL
;
505 dev_err(&adapter
->pdev
->dev
,
506 "Unable to allocate memory for the receive descriptor ring\n");
511 * igbvf_clean_tx_ring - Free Tx Buffers
512 * @tx_ring: ring to be cleaned
514 static void igbvf_clean_tx_ring(struct igbvf_ring
*tx_ring
)
516 struct igbvf_adapter
*adapter
= tx_ring
->adapter
;
517 struct igbvf_buffer
*buffer_info
;
521 if (!tx_ring
->buffer_info
)
524 /* Free all the Tx ring sk_buffs */
525 for (i
= 0; i
< tx_ring
->count
; i
++) {
526 buffer_info
= &tx_ring
->buffer_info
[i
];
527 igbvf_put_txbuf(adapter
, buffer_info
);
530 size
= sizeof(struct igbvf_buffer
) * tx_ring
->count
;
531 memset(tx_ring
->buffer_info
, 0, size
);
533 /* Zero out the descriptor ring */
534 memset(tx_ring
->desc
, 0, tx_ring
->size
);
536 tx_ring
->next_to_use
= 0;
537 tx_ring
->next_to_clean
= 0;
539 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
540 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
544 * igbvf_free_tx_resources - Free Tx Resources per Queue
545 * @tx_ring: ring to free resources from
547 * Free all transmit software resources
549 void igbvf_free_tx_resources(struct igbvf_ring
*tx_ring
)
551 struct pci_dev
*pdev
= tx_ring
->adapter
->pdev
;
553 igbvf_clean_tx_ring(tx_ring
);
555 vfree(tx_ring
->buffer_info
);
556 tx_ring
->buffer_info
= NULL
;
558 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
561 tx_ring
->desc
= NULL
;
565 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
566 * @adapter: board private structure
568 static void igbvf_clean_rx_ring(struct igbvf_ring
*rx_ring
)
570 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
571 struct igbvf_buffer
*buffer_info
;
572 struct pci_dev
*pdev
= adapter
->pdev
;
576 if (!rx_ring
->buffer_info
)
579 /* Free all the Rx ring sk_buffs */
580 for (i
= 0; i
< rx_ring
->count
; i
++) {
581 buffer_info
= &rx_ring
->buffer_info
[i
];
582 if (buffer_info
->dma
) {
583 if (adapter
->rx_ps_hdr_size
) {
584 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
585 adapter
->rx_ps_hdr_size
,
588 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
589 adapter
->rx_buffer_len
,
592 buffer_info
->dma
= 0;
595 if (buffer_info
->skb
) {
596 dev_kfree_skb(buffer_info
->skb
);
597 buffer_info
->skb
= NULL
;
600 if (buffer_info
->page
) {
601 if (buffer_info
->page_dma
)
602 dma_unmap_page(&pdev
->dev
,
603 buffer_info
->page_dma
,
606 put_page(buffer_info
->page
);
607 buffer_info
->page
= NULL
;
608 buffer_info
->page_dma
= 0;
609 buffer_info
->page_offset
= 0;
613 size
= sizeof(struct igbvf_buffer
) * rx_ring
->count
;
614 memset(rx_ring
->buffer_info
, 0, size
);
616 /* Zero out the descriptor ring */
617 memset(rx_ring
->desc
, 0, rx_ring
->size
);
619 rx_ring
->next_to_clean
= 0;
620 rx_ring
->next_to_use
= 0;
622 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
623 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
627 * igbvf_free_rx_resources - Free Rx Resources
628 * @rx_ring: ring to clean the resources from
630 * Free all receive software resources
633 void igbvf_free_rx_resources(struct igbvf_ring
*rx_ring
)
635 struct pci_dev
*pdev
= rx_ring
->adapter
->pdev
;
637 igbvf_clean_rx_ring(rx_ring
);
639 vfree(rx_ring
->buffer_info
);
640 rx_ring
->buffer_info
= NULL
;
642 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
644 rx_ring
->desc
= NULL
;
648 * igbvf_update_itr - update the dynamic ITR value based on statistics
649 * @adapter: pointer to adapter
650 * @itr_setting: current adapter->itr
651 * @packets: the number of packets during this measurement interval
652 * @bytes: the number of bytes during this measurement interval
654 * Stores a new ITR value based on packets and byte counts during the last
655 * interrupt. The advantage of per interrupt computation is faster updates
656 * and more accurate ITR for the current traffic pattern. Constants in this
657 * function were computed based on theoretical maximum wire speed and thresholds
658 * were set based on testing data as well as attempting to minimize response
659 * time while increasing bulk throughput.
661 static enum latency_range
igbvf_update_itr(struct igbvf_adapter
*adapter
,
662 enum latency_range itr_setting
,
663 int packets
, int bytes
)
665 enum latency_range retval
= itr_setting
;
668 goto update_itr_done
;
670 switch (itr_setting
) {
672 /* handle TSO and jumbo frames */
673 if (bytes
/packets
> 8000)
674 retval
= bulk_latency
;
675 else if ((packets
< 5) && (bytes
> 512))
676 retval
= low_latency
;
678 case low_latency
: /* 50 usec aka 20000 ints/s */
680 /* this if handles the TSO accounting */
681 if (bytes
/packets
> 8000)
682 retval
= bulk_latency
;
683 else if ((packets
< 10) || ((bytes
/packets
) > 1200))
684 retval
= bulk_latency
;
685 else if ((packets
> 35))
686 retval
= lowest_latency
;
687 } else if (bytes
/packets
> 2000) {
688 retval
= bulk_latency
;
689 } else if (packets
<= 2 && bytes
< 512) {
690 retval
= lowest_latency
;
693 case bulk_latency
: /* 250 usec aka 4000 ints/s */
696 retval
= low_latency
;
697 } else if (bytes
< 6000) {
698 retval
= low_latency
;
709 static int igbvf_range_to_itr(enum latency_range current_range
)
713 switch (current_range
) {
714 /* counts and packets in update_itr are dependent on these numbers */
716 new_itr
= IGBVF_70K_ITR
;
719 new_itr
= IGBVF_20K_ITR
;
722 new_itr
= IGBVF_4K_ITR
;
725 new_itr
= IGBVF_START_ITR
;
731 static void igbvf_set_itr(struct igbvf_adapter
*adapter
)
735 adapter
->tx_ring
->itr_range
=
736 igbvf_update_itr(adapter
,
737 adapter
->tx_ring
->itr_val
,
738 adapter
->total_tx_packets
,
739 adapter
->total_tx_bytes
);
741 /* conservative mode (itr 3) eliminates the lowest_latency setting */
742 if (adapter
->requested_itr
== 3 &&
743 adapter
->tx_ring
->itr_range
== lowest_latency
)
744 adapter
->tx_ring
->itr_range
= low_latency
;
746 new_itr
= igbvf_range_to_itr(adapter
->tx_ring
->itr_range
);
748 if (new_itr
!= adapter
->tx_ring
->itr_val
) {
749 u32 current_itr
= adapter
->tx_ring
->itr_val
;
750 /* this attempts to bias the interrupt rate towards Bulk
751 * by adding intermediate steps when interrupt rate is
754 new_itr
= new_itr
> current_itr
?
755 min(current_itr
+ (new_itr
>> 2), new_itr
) :
757 adapter
->tx_ring
->itr_val
= new_itr
;
759 adapter
->tx_ring
->set_itr
= 1;
762 adapter
->rx_ring
->itr_range
=
763 igbvf_update_itr(adapter
, adapter
->rx_ring
->itr_val
,
764 adapter
->total_rx_packets
,
765 adapter
->total_rx_bytes
);
766 if (adapter
->requested_itr
== 3 &&
767 adapter
->rx_ring
->itr_range
== lowest_latency
)
768 adapter
->rx_ring
->itr_range
= low_latency
;
770 new_itr
= igbvf_range_to_itr(adapter
->rx_ring
->itr_range
);
772 if (new_itr
!= adapter
->rx_ring
->itr_val
) {
773 u32 current_itr
= adapter
->rx_ring
->itr_val
;
775 new_itr
= new_itr
> current_itr
?
776 min(current_itr
+ (new_itr
>> 2), new_itr
) :
778 adapter
->rx_ring
->itr_val
= new_itr
;
780 adapter
->rx_ring
->set_itr
= 1;
785 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
786 * @adapter: board private structure
788 * returns true if ring is completely cleaned
790 static bool igbvf_clean_tx_irq(struct igbvf_ring
*tx_ring
)
792 struct igbvf_adapter
*adapter
= tx_ring
->adapter
;
793 struct net_device
*netdev
= adapter
->netdev
;
794 struct igbvf_buffer
*buffer_info
;
796 union e1000_adv_tx_desc
*tx_desc
, *eop_desc
;
797 unsigned int total_bytes
= 0, total_packets
= 0;
798 unsigned int i
, count
= 0;
799 bool cleaned
= false;
801 i
= tx_ring
->next_to_clean
;
802 buffer_info
= &tx_ring
->buffer_info
[i
];
803 eop_desc
= buffer_info
->next_to_watch
;
806 /* if next_to_watch is not set then there is no work pending */
810 /* prevent any other reads prior to eop_desc */
811 read_barrier_depends();
813 /* if DD is not set pending work has not been completed */
814 if (!(eop_desc
->wb
.status
& cpu_to_le32(E1000_TXD_STAT_DD
)))
817 /* clear next_to_watch to prevent false hangs */
818 buffer_info
->next_to_watch
= NULL
;
820 for (cleaned
= false; !cleaned
; count
++) {
821 tx_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, i
);
822 cleaned
= (tx_desc
== eop_desc
);
823 skb
= buffer_info
->skb
;
826 unsigned int segs
, bytecount
;
828 /* gso_segs is currently only valid for tcp */
829 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
830 /* multiply data chunks by size of headers */
831 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
833 total_packets
+= segs
;
834 total_bytes
+= bytecount
;
837 igbvf_put_txbuf(adapter
, buffer_info
);
838 tx_desc
->wb
.status
= 0;
841 if (i
== tx_ring
->count
)
844 buffer_info
= &tx_ring
->buffer_info
[i
];
847 eop_desc
= buffer_info
->next_to_watch
;
848 } while (count
< tx_ring
->count
);
850 tx_ring
->next_to_clean
= i
;
852 if (unlikely(count
&& netif_carrier_ok(netdev
) &&
853 igbvf_desc_unused(tx_ring
) >= IGBVF_TX_QUEUE_WAKE
)) {
854 /* Make sure that anybody stopping the queue after this
855 * sees the new next_to_clean.
858 if (netif_queue_stopped(netdev
) &&
859 !(test_bit(__IGBVF_DOWN
, &adapter
->state
))) {
860 netif_wake_queue(netdev
);
861 ++adapter
->restart_queue
;
865 adapter
->net_stats
.tx_bytes
+= total_bytes
;
866 adapter
->net_stats
.tx_packets
+= total_packets
;
867 return count
< tx_ring
->count
;
870 static irqreturn_t
igbvf_msix_other(int irq
, void *data
)
872 struct net_device
*netdev
= data
;
873 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
874 struct e1000_hw
*hw
= &adapter
->hw
;
876 adapter
->int_counter1
++;
878 netif_carrier_off(netdev
);
879 hw
->mac
.get_link_status
= 1;
880 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
881 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
883 ew32(EIMS
, adapter
->eims_other
);
888 static irqreturn_t
igbvf_intr_msix_tx(int irq
, void *data
)
890 struct net_device
*netdev
= data
;
891 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
892 struct e1000_hw
*hw
= &adapter
->hw
;
893 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
895 if (tx_ring
->set_itr
) {
896 writel(tx_ring
->itr_val
,
897 adapter
->hw
.hw_addr
+ tx_ring
->itr_register
);
898 adapter
->tx_ring
->set_itr
= 0;
901 adapter
->total_tx_bytes
= 0;
902 adapter
->total_tx_packets
= 0;
904 /* auto mask will automatically re-enable the interrupt when we write
907 if (!igbvf_clean_tx_irq(tx_ring
))
908 /* Ring was not completely cleaned, so fire another interrupt */
909 ew32(EICS
, tx_ring
->eims_value
);
911 ew32(EIMS
, tx_ring
->eims_value
);
916 static irqreturn_t
igbvf_intr_msix_rx(int irq
, void *data
)
918 struct net_device
*netdev
= data
;
919 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
921 adapter
->int_counter0
++;
923 /* Write the ITR value calculated at the end of the
924 * previous interrupt.
926 if (adapter
->rx_ring
->set_itr
) {
927 writel(adapter
->rx_ring
->itr_val
,
928 adapter
->hw
.hw_addr
+ adapter
->rx_ring
->itr_register
);
929 adapter
->rx_ring
->set_itr
= 0;
932 if (napi_schedule_prep(&adapter
->rx_ring
->napi
)) {
933 adapter
->total_rx_bytes
= 0;
934 adapter
->total_rx_packets
= 0;
935 __napi_schedule(&adapter
->rx_ring
->napi
);
941 #define IGBVF_NO_QUEUE -1
943 static void igbvf_assign_vector(struct igbvf_adapter
*adapter
, int rx_queue
,
944 int tx_queue
, int msix_vector
)
946 struct e1000_hw
*hw
= &adapter
->hw
;
949 /* 82576 uses a table-based method for assigning vectors.
950 * Each queue has a single entry in the table to which we write
951 * a vector number along with a "valid" bit. Sadly, the layout
952 * of the table is somewhat counterintuitive.
954 if (rx_queue
> IGBVF_NO_QUEUE
) {
955 index
= (rx_queue
>> 1);
956 ivar
= array_er32(IVAR0
, index
);
957 if (rx_queue
& 0x1) {
958 /* vector goes into third byte of register */
959 ivar
= ivar
& 0xFF00FFFF;
960 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
962 /* vector goes into low byte of register */
963 ivar
= ivar
& 0xFFFFFF00;
964 ivar
|= msix_vector
| E1000_IVAR_VALID
;
966 adapter
->rx_ring
[rx_queue
].eims_value
= 1 << msix_vector
;
967 array_ew32(IVAR0
, index
, ivar
);
969 if (tx_queue
> IGBVF_NO_QUEUE
) {
970 index
= (tx_queue
>> 1);
971 ivar
= array_er32(IVAR0
, index
);
972 if (tx_queue
& 0x1) {
973 /* vector goes into high byte of register */
974 ivar
= ivar
& 0x00FFFFFF;
975 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
977 /* vector goes into second byte of register */
978 ivar
= ivar
& 0xFFFF00FF;
979 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
981 adapter
->tx_ring
[tx_queue
].eims_value
= 1 << msix_vector
;
982 array_ew32(IVAR0
, index
, ivar
);
987 * igbvf_configure_msix - Configure MSI-X hardware
988 * @adapter: board private structure
990 * igbvf_configure_msix sets up the hardware to properly
991 * generate MSI-X interrupts.
993 static void igbvf_configure_msix(struct igbvf_adapter
*adapter
)
996 struct e1000_hw
*hw
= &adapter
->hw
;
997 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
998 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
1001 adapter
->eims_enable_mask
= 0;
1003 igbvf_assign_vector(adapter
, IGBVF_NO_QUEUE
, 0, vector
++);
1004 adapter
->eims_enable_mask
|= tx_ring
->eims_value
;
1005 writel(tx_ring
->itr_val
, hw
->hw_addr
+ tx_ring
->itr_register
);
1006 igbvf_assign_vector(adapter
, 0, IGBVF_NO_QUEUE
, vector
++);
1007 adapter
->eims_enable_mask
|= rx_ring
->eims_value
;
1008 writel(rx_ring
->itr_val
, hw
->hw_addr
+ rx_ring
->itr_register
);
1010 /* set vector for other causes, i.e. link changes */
1012 tmp
= (vector
++ | E1000_IVAR_VALID
);
1014 ew32(IVAR_MISC
, tmp
);
1016 adapter
->eims_enable_mask
= (1 << (vector
)) - 1;
1017 adapter
->eims_other
= 1 << (vector
- 1);
1021 static void igbvf_reset_interrupt_capability(struct igbvf_adapter
*adapter
)
1023 if (adapter
->msix_entries
) {
1024 pci_disable_msix(adapter
->pdev
);
1025 kfree(adapter
->msix_entries
);
1026 adapter
->msix_entries
= NULL
;
1031 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1032 * @adapter: board private structure
1034 * Attempt to configure interrupts using the best available
1035 * capabilities of the hardware and kernel.
1037 static void igbvf_set_interrupt_capability(struct igbvf_adapter
*adapter
)
1042 /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */
1043 adapter
->msix_entries
= kcalloc(3, sizeof(struct msix_entry
),
1045 if (adapter
->msix_entries
) {
1046 for (i
= 0; i
< 3; i
++)
1047 adapter
->msix_entries
[i
].entry
= i
;
1049 err
= pci_enable_msix_range(adapter
->pdev
,
1050 adapter
->msix_entries
, 3, 3);
1055 dev_err(&adapter
->pdev
->dev
,
1056 "Failed to initialize MSI-X interrupts.\n");
1057 igbvf_reset_interrupt_capability(adapter
);
1062 * igbvf_request_msix - Initialize MSI-X interrupts
1063 * @adapter: board private structure
1065 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1068 static int igbvf_request_msix(struct igbvf_adapter
*adapter
)
1070 struct net_device
*netdev
= adapter
->netdev
;
1071 int err
= 0, vector
= 0;
1073 if (strlen(netdev
->name
) < (IFNAMSIZ
- 5)) {
1074 sprintf(adapter
->tx_ring
->name
, "%s-tx-0", netdev
->name
);
1075 sprintf(adapter
->rx_ring
->name
, "%s-rx-0", netdev
->name
);
1077 memcpy(adapter
->tx_ring
->name
, netdev
->name
, IFNAMSIZ
);
1078 memcpy(adapter
->rx_ring
->name
, netdev
->name
, IFNAMSIZ
);
1081 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1082 igbvf_intr_msix_tx
, 0, adapter
->tx_ring
->name
,
1087 adapter
->tx_ring
->itr_register
= E1000_EITR(vector
);
1088 adapter
->tx_ring
->itr_val
= adapter
->current_itr
;
1091 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1092 igbvf_intr_msix_rx
, 0, adapter
->rx_ring
->name
,
1097 adapter
->rx_ring
->itr_register
= E1000_EITR(vector
);
1098 adapter
->rx_ring
->itr_val
= adapter
->current_itr
;
1101 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1102 igbvf_msix_other
, 0, netdev
->name
, netdev
);
1106 igbvf_configure_msix(adapter
);
1113 * igbvf_alloc_queues - Allocate memory for all rings
1114 * @adapter: board private structure to initialize
1116 static int igbvf_alloc_queues(struct igbvf_adapter
*adapter
)
1118 struct net_device
*netdev
= adapter
->netdev
;
1120 adapter
->tx_ring
= kzalloc(sizeof(struct igbvf_ring
), GFP_KERNEL
);
1121 if (!adapter
->tx_ring
)
1124 adapter
->rx_ring
= kzalloc(sizeof(struct igbvf_ring
), GFP_KERNEL
);
1125 if (!adapter
->rx_ring
) {
1126 kfree(adapter
->tx_ring
);
1130 netif_napi_add(netdev
, &adapter
->rx_ring
->napi
, igbvf_poll
, 64);
1136 * igbvf_request_irq - initialize interrupts
1137 * @adapter: board private structure
1139 * Attempts to configure interrupts using the best available
1140 * capabilities of the hardware and kernel.
1142 static int igbvf_request_irq(struct igbvf_adapter
*adapter
)
1146 /* igbvf supports msi-x only */
1147 if (adapter
->msix_entries
)
1148 err
= igbvf_request_msix(adapter
);
1153 dev_err(&adapter
->pdev
->dev
,
1154 "Unable to allocate interrupt, Error: %d\n", err
);
1159 static void igbvf_free_irq(struct igbvf_adapter
*adapter
)
1161 struct net_device
*netdev
= adapter
->netdev
;
1164 if (adapter
->msix_entries
) {
1165 for (vector
= 0; vector
< 3; vector
++)
1166 free_irq(adapter
->msix_entries
[vector
].vector
, netdev
);
1171 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1172 * @adapter: board private structure
1174 static void igbvf_irq_disable(struct igbvf_adapter
*adapter
)
1176 struct e1000_hw
*hw
= &adapter
->hw
;
1180 if (adapter
->msix_entries
)
1185 * igbvf_irq_enable - Enable default interrupt generation settings
1186 * @adapter: board private structure
1188 static void igbvf_irq_enable(struct igbvf_adapter
*adapter
)
1190 struct e1000_hw
*hw
= &adapter
->hw
;
1192 ew32(EIAC
, adapter
->eims_enable_mask
);
1193 ew32(EIAM
, adapter
->eims_enable_mask
);
1194 ew32(EIMS
, adapter
->eims_enable_mask
);
1198 * igbvf_poll - NAPI Rx polling callback
1199 * @napi: struct associated with this polling callback
1200 * @budget: amount of packets driver is allowed to process this poll
1202 static int igbvf_poll(struct napi_struct
*napi
, int budget
)
1204 struct igbvf_ring
*rx_ring
= container_of(napi
, struct igbvf_ring
, napi
);
1205 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
1206 struct e1000_hw
*hw
= &adapter
->hw
;
1209 igbvf_clean_rx_irq(adapter
, &work_done
, budget
);
1211 /* If not enough Rx work done, exit the polling mode */
1212 if (work_done
< budget
) {
1213 napi_complete(napi
);
1215 if (adapter
->requested_itr
& 3)
1216 igbvf_set_itr(adapter
);
1218 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
1219 ew32(EIMS
, adapter
->rx_ring
->eims_value
);
1226 * igbvf_set_rlpml - set receive large packet maximum length
1227 * @adapter: board private structure
1229 * Configure the maximum size of packets that will be received
1231 static void igbvf_set_rlpml(struct igbvf_adapter
*adapter
)
1234 struct e1000_hw
*hw
= &adapter
->hw
;
1236 max_frame_size
= adapter
->max_frame_size
+ VLAN_TAG_SIZE
;
1237 e1000_rlpml_set_vf(hw
, max_frame_size
);
1240 static int igbvf_vlan_rx_add_vid(struct net_device
*netdev
,
1241 __be16 proto
, u16 vid
)
1243 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1244 struct e1000_hw
*hw
= &adapter
->hw
;
1246 if (hw
->mac
.ops
.set_vfta(hw
, vid
, true)) {
1247 dev_err(&adapter
->pdev
->dev
, "Failed to add vlan id %d\n", vid
);
1250 set_bit(vid
, adapter
->active_vlans
);
1254 static int igbvf_vlan_rx_kill_vid(struct net_device
*netdev
,
1255 __be16 proto
, u16 vid
)
1257 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1258 struct e1000_hw
*hw
= &adapter
->hw
;
1260 if (hw
->mac
.ops
.set_vfta(hw
, vid
, false)) {
1261 dev_err(&adapter
->pdev
->dev
,
1262 "Failed to remove vlan id %d\n", vid
);
1265 clear_bit(vid
, adapter
->active_vlans
);
1269 static void igbvf_restore_vlan(struct igbvf_adapter
*adapter
)
1273 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1274 igbvf_vlan_rx_add_vid(adapter
->netdev
, htons(ETH_P_8021Q
), vid
);
1278 * igbvf_configure_tx - Configure Transmit Unit after Reset
1279 * @adapter: board private structure
1281 * Configure the Tx unit of the MAC after a reset.
1283 static void igbvf_configure_tx(struct igbvf_adapter
*adapter
)
1285 struct e1000_hw
*hw
= &adapter
->hw
;
1286 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
1288 u32 txdctl
, dca_txctrl
;
1290 /* disable transmits */
1291 txdctl
= er32(TXDCTL(0));
1292 ew32(TXDCTL(0), txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
1296 /* Setup the HW Tx Head and Tail descriptor pointers */
1297 ew32(TDLEN(0), tx_ring
->count
* sizeof(union e1000_adv_tx_desc
));
1298 tdba
= tx_ring
->dma
;
1299 ew32(TDBAL(0), (tdba
& DMA_BIT_MASK(32)));
1300 ew32(TDBAH(0), (tdba
>> 32));
1303 tx_ring
->head
= E1000_TDH(0);
1304 tx_ring
->tail
= E1000_TDT(0);
1306 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1307 * MUST be delivered in order or it will completely screw up
1310 dca_txctrl
= er32(DCA_TXCTRL(0));
1311 dca_txctrl
&= ~E1000_DCA_TXCTRL_TX_WB_RO_EN
;
1312 ew32(DCA_TXCTRL(0), dca_txctrl
);
1314 /* enable transmits */
1315 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
1316 ew32(TXDCTL(0), txdctl
);
1318 /* Setup Transmit Descriptor Settings for eop descriptor */
1319 adapter
->txd_cmd
= E1000_ADVTXD_DCMD_EOP
| E1000_ADVTXD_DCMD_IFCS
;
1321 /* enable Report Status bit */
1322 adapter
->txd_cmd
|= E1000_ADVTXD_DCMD_RS
;
1326 * igbvf_setup_srrctl - configure the receive control registers
1327 * @adapter: Board private structure
1329 static void igbvf_setup_srrctl(struct igbvf_adapter
*adapter
)
1331 struct e1000_hw
*hw
= &adapter
->hw
;
1334 srrctl
&= ~(E1000_SRRCTL_DESCTYPE_MASK
|
1335 E1000_SRRCTL_BSIZEHDR_MASK
|
1336 E1000_SRRCTL_BSIZEPKT_MASK
);
1338 /* Enable queue drop to avoid head of line blocking */
1339 srrctl
|= E1000_SRRCTL_DROP_EN
;
1341 /* Setup buffer sizes */
1342 srrctl
|= ALIGN(adapter
->rx_buffer_len
, 1024) >>
1343 E1000_SRRCTL_BSIZEPKT_SHIFT
;
1345 if (adapter
->rx_buffer_len
< 2048) {
1346 adapter
->rx_ps_hdr_size
= 0;
1347 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1349 adapter
->rx_ps_hdr_size
= 128;
1350 srrctl
|= adapter
->rx_ps_hdr_size
<<
1351 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
1352 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1355 ew32(SRRCTL(0), srrctl
);
1359 * igbvf_configure_rx - Configure Receive Unit after Reset
1360 * @adapter: board private structure
1362 * Configure the Rx unit of the MAC after a reset.
1364 static void igbvf_configure_rx(struct igbvf_adapter
*adapter
)
1366 struct e1000_hw
*hw
= &adapter
->hw
;
1367 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
1371 /* disable receives */
1372 rxdctl
= er32(RXDCTL(0));
1373 ew32(RXDCTL(0), rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
1377 rdlen
= rx_ring
->count
* sizeof(union e1000_adv_rx_desc
);
1379 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1380 * the Base and Length of the Rx Descriptor Ring
1382 rdba
= rx_ring
->dma
;
1383 ew32(RDBAL(0), (rdba
& DMA_BIT_MASK(32)));
1384 ew32(RDBAH(0), (rdba
>> 32));
1385 ew32(RDLEN(0), rx_ring
->count
* sizeof(union e1000_adv_rx_desc
));
1386 rx_ring
->head
= E1000_RDH(0);
1387 rx_ring
->tail
= E1000_RDT(0);
1391 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
1392 rxdctl
&= 0xFFF00000;
1393 rxdctl
|= IGBVF_RX_PTHRESH
;
1394 rxdctl
|= IGBVF_RX_HTHRESH
<< 8;
1395 rxdctl
|= IGBVF_RX_WTHRESH
<< 16;
1397 igbvf_set_rlpml(adapter
);
1399 /* enable receives */
1400 ew32(RXDCTL(0), rxdctl
);
1404 * igbvf_set_multi - Multicast and Promiscuous mode set
1405 * @netdev: network interface device structure
1407 * The set_multi entry point is called whenever the multicast address
1408 * list or the network interface flags are updated. This routine is
1409 * responsible for configuring the hardware for proper multicast,
1410 * promiscuous mode, and all-multi behavior.
1412 static void igbvf_set_multi(struct net_device
*netdev
)
1414 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1415 struct e1000_hw
*hw
= &adapter
->hw
;
1416 struct netdev_hw_addr
*ha
;
1417 u8
*mta_list
= NULL
;
1420 if (!netdev_mc_empty(netdev
)) {
1421 mta_list
= kmalloc_array(netdev_mc_count(netdev
), ETH_ALEN
,
1427 /* prepare a packed array of only addresses. */
1429 netdev_for_each_mc_addr(ha
, netdev
)
1430 memcpy(mta_list
+ (i
++ * ETH_ALEN
), ha
->addr
, ETH_ALEN
);
1432 hw
->mac
.ops
.update_mc_addr_list(hw
, mta_list
, i
, 0, 0);
1437 * igbvf_configure - configure the hardware for Rx and Tx
1438 * @adapter: private board structure
1440 static void igbvf_configure(struct igbvf_adapter
*adapter
)
1442 igbvf_set_multi(adapter
->netdev
);
1444 igbvf_restore_vlan(adapter
);
1446 igbvf_configure_tx(adapter
);
1447 igbvf_setup_srrctl(adapter
);
1448 igbvf_configure_rx(adapter
);
1449 igbvf_alloc_rx_buffers(adapter
->rx_ring
,
1450 igbvf_desc_unused(adapter
->rx_ring
));
1453 /* igbvf_reset - bring the hardware into a known good state
1454 * @adapter: private board structure
1456 * This function boots the hardware and enables some settings that
1457 * require a configuration cycle of the hardware - those cannot be
1458 * set/changed during runtime. After reset the device needs to be
1459 * properly configured for Rx, Tx etc.
1461 static void igbvf_reset(struct igbvf_adapter
*adapter
)
1463 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
1464 struct net_device
*netdev
= adapter
->netdev
;
1465 struct e1000_hw
*hw
= &adapter
->hw
;
1467 /* Allow time for pending master requests to run */
1468 if (mac
->ops
.reset_hw(hw
))
1469 dev_err(&adapter
->pdev
->dev
, "PF still resetting\n");
1471 mac
->ops
.init_hw(hw
);
1473 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1474 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1476 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1480 adapter
->last_reset
= jiffies
;
1483 int igbvf_up(struct igbvf_adapter
*adapter
)
1485 struct e1000_hw
*hw
= &adapter
->hw
;
1487 /* hardware has been reset, we need to reload some things */
1488 igbvf_configure(adapter
);
1490 clear_bit(__IGBVF_DOWN
, &adapter
->state
);
1492 napi_enable(&adapter
->rx_ring
->napi
);
1493 if (adapter
->msix_entries
)
1494 igbvf_configure_msix(adapter
);
1496 /* Clear any pending interrupts. */
1498 igbvf_irq_enable(adapter
);
1500 /* start the watchdog */
1501 hw
->mac
.get_link_status
= 1;
1502 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1507 void igbvf_down(struct igbvf_adapter
*adapter
)
1509 struct net_device
*netdev
= adapter
->netdev
;
1510 struct e1000_hw
*hw
= &adapter
->hw
;
1513 /* signal that we're down so the interrupt handler does not
1514 * reschedule our watchdog timer
1516 set_bit(__IGBVF_DOWN
, &adapter
->state
);
1518 /* disable receives in the hardware */
1519 rxdctl
= er32(RXDCTL(0));
1520 ew32(RXDCTL(0), rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
1522 netif_carrier_off(netdev
);
1523 netif_stop_queue(netdev
);
1525 /* disable transmits in the hardware */
1526 txdctl
= er32(TXDCTL(0));
1527 ew32(TXDCTL(0), txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
1529 /* flush both disables and wait for them to finish */
1533 napi_disable(&adapter
->rx_ring
->napi
);
1535 igbvf_irq_disable(adapter
);
1537 del_timer_sync(&adapter
->watchdog_timer
);
1539 /* record the stats before reset*/
1540 igbvf_update_stats(adapter
);
1542 adapter
->link_speed
= 0;
1543 adapter
->link_duplex
= 0;
1545 igbvf_reset(adapter
);
1546 igbvf_clean_tx_ring(adapter
->tx_ring
);
1547 igbvf_clean_rx_ring(adapter
->rx_ring
);
1550 void igbvf_reinit_locked(struct igbvf_adapter
*adapter
)
1553 while (test_and_set_bit(__IGBVF_RESETTING
, &adapter
->state
))
1554 usleep_range(1000, 2000);
1555 igbvf_down(adapter
);
1557 clear_bit(__IGBVF_RESETTING
, &adapter
->state
);
1561 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1562 * @adapter: board private structure to initialize
1564 * igbvf_sw_init initializes the Adapter private data structure.
1565 * Fields are initialized based on PCI device information and
1566 * OS network device settings (MTU size).
1568 static int igbvf_sw_init(struct igbvf_adapter
*adapter
)
1570 struct net_device
*netdev
= adapter
->netdev
;
1573 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
;
1574 adapter
->rx_ps_hdr_size
= 0;
1575 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1576 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
1578 adapter
->tx_int_delay
= 8;
1579 adapter
->tx_abs_int_delay
= 32;
1580 adapter
->rx_int_delay
= 0;
1581 adapter
->rx_abs_int_delay
= 8;
1582 adapter
->requested_itr
= 3;
1583 adapter
->current_itr
= IGBVF_START_ITR
;
1585 /* Set various function pointers */
1586 adapter
->ei
->init_ops(&adapter
->hw
);
1588 rc
= adapter
->hw
.mac
.ops
.init_params(&adapter
->hw
);
1592 rc
= adapter
->hw
.mbx
.ops
.init_params(&adapter
->hw
);
1596 igbvf_set_interrupt_capability(adapter
);
1598 if (igbvf_alloc_queues(adapter
))
1601 spin_lock_init(&adapter
->tx_queue_lock
);
1603 /* Explicitly disable IRQ since the NIC can be in any state. */
1604 igbvf_irq_disable(adapter
);
1606 spin_lock_init(&adapter
->stats_lock
);
1608 set_bit(__IGBVF_DOWN
, &adapter
->state
);
1612 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter
*adapter
)
1614 struct e1000_hw
*hw
= &adapter
->hw
;
1616 adapter
->stats
.last_gprc
= er32(VFGPRC
);
1617 adapter
->stats
.last_gorc
= er32(VFGORC
);
1618 adapter
->stats
.last_gptc
= er32(VFGPTC
);
1619 adapter
->stats
.last_gotc
= er32(VFGOTC
);
1620 adapter
->stats
.last_mprc
= er32(VFMPRC
);
1621 adapter
->stats
.last_gotlbc
= er32(VFGOTLBC
);
1622 adapter
->stats
.last_gptlbc
= er32(VFGPTLBC
);
1623 adapter
->stats
.last_gorlbc
= er32(VFGORLBC
);
1624 adapter
->stats
.last_gprlbc
= er32(VFGPRLBC
);
1626 adapter
->stats
.base_gprc
= er32(VFGPRC
);
1627 adapter
->stats
.base_gorc
= er32(VFGORC
);
1628 adapter
->stats
.base_gptc
= er32(VFGPTC
);
1629 adapter
->stats
.base_gotc
= er32(VFGOTC
);
1630 adapter
->stats
.base_mprc
= er32(VFMPRC
);
1631 adapter
->stats
.base_gotlbc
= er32(VFGOTLBC
);
1632 adapter
->stats
.base_gptlbc
= er32(VFGPTLBC
);
1633 adapter
->stats
.base_gorlbc
= er32(VFGORLBC
);
1634 adapter
->stats
.base_gprlbc
= er32(VFGPRLBC
);
1638 * igbvf_open - Called when a network interface is made active
1639 * @netdev: network interface device structure
1641 * Returns 0 on success, negative value on failure
1643 * The open entry point is called when a network interface is made
1644 * active by the system (IFF_UP). At this point all resources needed
1645 * for transmit and receive operations are allocated, the interrupt
1646 * handler is registered with the OS, the watchdog timer is started,
1647 * and the stack is notified that the interface is ready.
1649 static int igbvf_open(struct net_device
*netdev
)
1651 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1652 struct e1000_hw
*hw
= &adapter
->hw
;
1655 /* disallow open during test */
1656 if (test_bit(__IGBVF_TESTING
, &adapter
->state
))
1659 /* allocate transmit descriptors */
1660 err
= igbvf_setup_tx_resources(adapter
, adapter
->tx_ring
);
1664 /* allocate receive descriptors */
1665 err
= igbvf_setup_rx_resources(adapter
, adapter
->rx_ring
);
1669 /* before we allocate an interrupt, we must be ready to handle it.
1670 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1671 * as soon as we call pci_request_irq, so we have to setup our
1672 * clean_rx handler before we do so.
1674 igbvf_configure(adapter
);
1676 err
= igbvf_request_irq(adapter
);
1680 /* From here on the code is the same as igbvf_up() */
1681 clear_bit(__IGBVF_DOWN
, &adapter
->state
);
1683 napi_enable(&adapter
->rx_ring
->napi
);
1685 /* clear any pending interrupts */
1688 igbvf_irq_enable(adapter
);
1690 /* start the watchdog */
1691 hw
->mac
.get_link_status
= 1;
1692 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1697 igbvf_free_rx_resources(adapter
->rx_ring
);
1699 igbvf_free_tx_resources(adapter
->tx_ring
);
1701 igbvf_reset(adapter
);
1707 * igbvf_close - Disables a network interface
1708 * @netdev: network interface device structure
1710 * Returns 0, this is not allowed to fail
1712 * The close entry point is called when an interface is de-activated
1713 * by the OS. The hardware is still under the drivers control, but
1714 * needs to be disabled. A global MAC reset is issued to stop the
1715 * hardware, and all transmit and receive resources are freed.
1717 static int igbvf_close(struct net_device
*netdev
)
1719 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1721 WARN_ON(test_bit(__IGBVF_RESETTING
, &adapter
->state
));
1722 igbvf_down(adapter
);
1724 igbvf_free_irq(adapter
);
1726 igbvf_free_tx_resources(adapter
->tx_ring
);
1727 igbvf_free_rx_resources(adapter
->rx_ring
);
1733 * igbvf_set_mac - Change the Ethernet Address of the NIC
1734 * @netdev: network interface device structure
1735 * @p: pointer to an address structure
1737 * Returns 0 on success, negative on failure
1739 static int igbvf_set_mac(struct net_device
*netdev
, void *p
)
1741 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1742 struct e1000_hw
*hw
= &adapter
->hw
;
1743 struct sockaddr
*addr
= p
;
1745 if (!is_valid_ether_addr(addr
->sa_data
))
1746 return -EADDRNOTAVAIL
;
1748 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
1750 hw
->mac
.ops
.rar_set(hw
, hw
->mac
.addr
, 0);
1752 if (!ether_addr_equal(addr
->sa_data
, hw
->mac
.addr
))
1753 return -EADDRNOTAVAIL
;
1755 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1760 #define UPDATE_VF_COUNTER(reg, name) \
1762 u32 current_counter = er32(reg); \
1763 if (current_counter < adapter->stats.last_##name) \
1764 adapter->stats.name += 0x100000000LL; \
1765 adapter->stats.last_##name = current_counter; \
1766 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1767 adapter->stats.name |= current_counter; \
1771 * igbvf_update_stats - Update the board statistics counters
1772 * @adapter: board private structure
1774 void igbvf_update_stats(struct igbvf_adapter
*adapter
)
1776 struct e1000_hw
*hw
= &adapter
->hw
;
1777 struct pci_dev
*pdev
= adapter
->pdev
;
1779 /* Prevent stats update while adapter is being reset, link is down
1780 * or if the pci connection is down.
1782 if (adapter
->link_speed
== 0)
1785 if (test_bit(__IGBVF_RESETTING
, &adapter
->state
))
1788 if (pci_channel_offline(pdev
))
1791 UPDATE_VF_COUNTER(VFGPRC
, gprc
);
1792 UPDATE_VF_COUNTER(VFGORC
, gorc
);
1793 UPDATE_VF_COUNTER(VFGPTC
, gptc
);
1794 UPDATE_VF_COUNTER(VFGOTC
, gotc
);
1795 UPDATE_VF_COUNTER(VFMPRC
, mprc
);
1796 UPDATE_VF_COUNTER(VFGOTLBC
, gotlbc
);
1797 UPDATE_VF_COUNTER(VFGPTLBC
, gptlbc
);
1798 UPDATE_VF_COUNTER(VFGORLBC
, gorlbc
);
1799 UPDATE_VF_COUNTER(VFGPRLBC
, gprlbc
);
1801 /* Fill out the OS statistics structure */
1802 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
1805 static void igbvf_print_link_info(struct igbvf_adapter
*adapter
)
1807 dev_info(&adapter
->pdev
->dev
, "Link is Up %d Mbps %s Duplex\n",
1808 adapter
->link_speed
,
1809 adapter
->link_duplex
== FULL_DUPLEX
? "Full" : "Half");
1812 static bool igbvf_has_link(struct igbvf_adapter
*adapter
)
1814 struct e1000_hw
*hw
= &adapter
->hw
;
1815 s32 ret_val
= E1000_SUCCESS
;
1818 /* If interface is down, stay link down */
1819 if (test_bit(__IGBVF_DOWN
, &adapter
->state
))
1822 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
1823 link_active
= !hw
->mac
.get_link_status
;
1825 /* if check for link returns error we will need to reset */
1826 if (ret_val
&& time_after(jiffies
, adapter
->last_reset
+ (10 * HZ
)))
1827 schedule_work(&adapter
->reset_task
);
1833 * igbvf_watchdog - Timer Call-back
1834 * @data: pointer to adapter cast into an unsigned long
1836 static void igbvf_watchdog(unsigned long data
)
1838 struct igbvf_adapter
*adapter
= (struct igbvf_adapter
*)data
;
1840 /* Do the rest outside of interrupt context */
1841 schedule_work(&adapter
->watchdog_task
);
1844 static void igbvf_watchdog_task(struct work_struct
*work
)
1846 struct igbvf_adapter
*adapter
= container_of(work
,
1847 struct igbvf_adapter
,
1849 struct net_device
*netdev
= adapter
->netdev
;
1850 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
1851 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
1852 struct e1000_hw
*hw
= &adapter
->hw
;
1856 link
= igbvf_has_link(adapter
);
1859 if (!netif_carrier_ok(netdev
)) {
1860 mac
->ops
.get_link_up_info(&adapter
->hw
,
1861 &adapter
->link_speed
,
1862 &adapter
->link_duplex
);
1863 igbvf_print_link_info(adapter
);
1865 netif_carrier_on(netdev
);
1866 netif_wake_queue(netdev
);
1869 if (netif_carrier_ok(netdev
)) {
1870 adapter
->link_speed
= 0;
1871 adapter
->link_duplex
= 0;
1872 dev_info(&adapter
->pdev
->dev
, "Link is Down\n");
1873 netif_carrier_off(netdev
);
1874 netif_stop_queue(netdev
);
1878 if (netif_carrier_ok(netdev
)) {
1879 igbvf_update_stats(adapter
);
1881 tx_pending
= (igbvf_desc_unused(tx_ring
) + 1 <
1884 /* We've lost link, so the controller stops DMA,
1885 * but we've got queued Tx work that's never going
1886 * to get done, so reset controller to flush Tx.
1887 * (Do the reset outside of interrupt context).
1889 adapter
->tx_timeout_count
++;
1890 schedule_work(&adapter
->reset_task
);
1894 /* Cause software interrupt to ensure Rx ring is cleaned */
1895 ew32(EICS
, adapter
->rx_ring
->eims_value
);
1897 /* Reset the timer */
1898 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
1899 mod_timer(&adapter
->watchdog_timer
,
1900 round_jiffies(jiffies
+ (2 * HZ
)));
1903 #define IGBVF_TX_FLAGS_CSUM 0x00000001
1904 #define IGBVF_TX_FLAGS_VLAN 0x00000002
1905 #define IGBVF_TX_FLAGS_TSO 0x00000004
1906 #define IGBVF_TX_FLAGS_IPV4 0x00000008
1907 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1908 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1910 static int igbvf_tso(struct igbvf_adapter
*adapter
,
1911 struct igbvf_ring
*tx_ring
,
1912 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
,
1915 struct e1000_adv_tx_context_desc
*context_desc
;
1916 struct igbvf_buffer
*buffer_info
;
1917 u32 info
= 0, tu_cmd
= 0;
1918 u32 mss_l4len_idx
, l4len
;
1924 err
= skb_cow_head(skb
, 0);
1926 dev_err(&adapter
->pdev
->dev
, "igbvf_tso returning an error\n");
1930 l4len
= tcp_hdrlen(skb
);
1933 if (protocol
== htons(ETH_P_IP
)) {
1934 struct iphdr
*iph
= ip_hdr(skb
);
1938 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
1942 } else if (skb_is_gso_v6(skb
)) {
1943 ipv6_hdr(skb
)->payload_len
= 0;
1944 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
1945 &ipv6_hdr(skb
)->daddr
,
1949 i
= tx_ring
->next_to_use
;
1951 buffer_info
= &tx_ring
->buffer_info
[i
];
1952 context_desc
= IGBVF_TX_CTXTDESC_ADV(*tx_ring
, i
);
1953 /* VLAN MACLEN IPLEN */
1954 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
1955 info
|= (tx_flags
& IGBVF_TX_FLAGS_VLAN_MASK
);
1956 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
1957 *hdr_len
+= skb_network_offset(skb
);
1958 info
|= (skb_transport_header(skb
) - skb_network_header(skb
));
1959 *hdr_len
+= (skb_transport_header(skb
) - skb_network_header(skb
));
1960 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
1962 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1963 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
1965 if (protocol
== htons(ETH_P_IP
))
1966 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
1967 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
1969 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
1972 mss_l4len_idx
= (skb_shinfo(skb
)->gso_size
<< E1000_ADVTXD_MSS_SHIFT
);
1973 mss_l4len_idx
|= (l4len
<< E1000_ADVTXD_L4LEN_SHIFT
);
1975 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
1976 context_desc
->seqnum_seed
= 0;
1978 buffer_info
->time_stamp
= jiffies
;
1979 buffer_info
->dma
= 0;
1981 if (i
== tx_ring
->count
)
1984 tx_ring
->next_to_use
= i
;
1989 static inline bool igbvf_tx_csum(struct igbvf_adapter
*adapter
,
1990 struct igbvf_ring
*tx_ring
,
1991 struct sk_buff
*skb
, u32 tx_flags
,
1994 struct e1000_adv_tx_context_desc
*context_desc
;
1996 struct igbvf_buffer
*buffer_info
;
1997 u32 info
= 0, tu_cmd
= 0;
1999 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
2000 (tx_flags
& IGBVF_TX_FLAGS_VLAN
)) {
2001 i
= tx_ring
->next_to_use
;
2002 buffer_info
= &tx_ring
->buffer_info
[i
];
2003 context_desc
= IGBVF_TX_CTXTDESC_ADV(*tx_ring
, i
);
2005 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
2006 info
|= (tx_flags
& IGBVF_TX_FLAGS_VLAN_MASK
);
2008 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
2009 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2010 info
|= (skb_transport_header(skb
) -
2011 skb_network_header(skb
));
2013 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
2015 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
2017 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2019 case htons(ETH_P_IP
):
2020 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
2021 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2022 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2024 case htons(ETH_P_IPV6
):
2025 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2026 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2033 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
2034 context_desc
->seqnum_seed
= 0;
2035 context_desc
->mss_l4len_idx
= 0;
2037 buffer_info
->time_stamp
= jiffies
;
2038 buffer_info
->dma
= 0;
2040 if (i
== tx_ring
->count
)
2042 tx_ring
->next_to_use
= i
;
2050 static int igbvf_maybe_stop_tx(struct net_device
*netdev
, int size
)
2052 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2054 /* there is enough descriptors then we don't need to worry */
2055 if (igbvf_desc_unused(adapter
->tx_ring
) >= size
)
2058 netif_stop_queue(netdev
);
2060 /* Herbert's original patch had:
2061 * smp_mb__after_netif_stop_queue();
2062 * but since that doesn't exist yet, just open code it.
2066 /* We need to check again just in case room has been made available */
2067 if (igbvf_desc_unused(adapter
->tx_ring
) < size
)
2070 netif_wake_queue(netdev
);
2072 ++adapter
->restart_queue
;
2076 #define IGBVF_MAX_TXD_PWR 16
2077 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2079 static inline int igbvf_tx_map_adv(struct igbvf_adapter
*adapter
,
2080 struct igbvf_ring
*tx_ring
,
2081 struct sk_buff
*skb
)
2083 struct igbvf_buffer
*buffer_info
;
2084 struct pci_dev
*pdev
= adapter
->pdev
;
2085 unsigned int len
= skb_headlen(skb
);
2086 unsigned int count
= 0, i
;
2089 i
= tx_ring
->next_to_use
;
2091 buffer_info
= &tx_ring
->buffer_info
[i
];
2092 BUG_ON(len
>= IGBVF_MAX_DATA_PER_TXD
);
2093 buffer_info
->length
= len
;
2094 /* set time_stamp *before* dma to help avoid a possible race */
2095 buffer_info
->time_stamp
= jiffies
;
2096 buffer_info
->mapped_as_page
= false;
2097 buffer_info
->dma
= dma_map_single(&pdev
->dev
, skb
->data
, len
,
2099 if (dma_mapping_error(&pdev
->dev
, buffer_info
->dma
))
2102 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
2103 const struct skb_frag_struct
*frag
;
2107 if (i
== tx_ring
->count
)
2110 frag
= &skb_shinfo(skb
)->frags
[f
];
2111 len
= skb_frag_size(frag
);
2113 buffer_info
= &tx_ring
->buffer_info
[i
];
2114 BUG_ON(len
>= IGBVF_MAX_DATA_PER_TXD
);
2115 buffer_info
->length
= len
;
2116 buffer_info
->time_stamp
= jiffies
;
2117 buffer_info
->mapped_as_page
= true;
2118 buffer_info
->dma
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, len
,
2120 if (dma_mapping_error(&pdev
->dev
, buffer_info
->dma
))
2124 tx_ring
->buffer_info
[i
].skb
= skb
;
2129 dev_err(&pdev
->dev
, "TX DMA map failed\n");
2131 /* clear timestamp and dma mappings for failed buffer_info mapping */
2132 buffer_info
->dma
= 0;
2133 buffer_info
->time_stamp
= 0;
2134 buffer_info
->length
= 0;
2135 buffer_info
->mapped_as_page
= false;
2139 /* clear timestamp and dma mappings for remaining portion of packet */
2142 i
+= tx_ring
->count
;
2144 buffer_info
= &tx_ring
->buffer_info
[i
];
2145 igbvf_put_txbuf(adapter
, buffer_info
);
2151 static inline void igbvf_tx_queue_adv(struct igbvf_adapter
*adapter
,
2152 struct igbvf_ring
*tx_ring
,
2153 int tx_flags
, int count
,
2154 unsigned int first
, u32 paylen
,
2157 union e1000_adv_tx_desc
*tx_desc
= NULL
;
2158 struct igbvf_buffer
*buffer_info
;
2159 u32 olinfo_status
= 0, cmd_type_len
;
2162 cmd_type_len
= (E1000_ADVTXD_DTYP_DATA
| E1000_ADVTXD_DCMD_IFCS
|
2163 E1000_ADVTXD_DCMD_DEXT
);
2165 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
2166 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
2168 if (tx_flags
& IGBVF_TX_FLAGS_TSO
) {
2169 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
2171 /* insert tcp checksum */
2172 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
2174 /* insert ip checksum */
2175 if (tx_flags
& IGBVF_TX_FLAGS_IPV4
)
2176 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
2178 } else if (tx_flags
& IGBVF_TX_FLAGS_CSUM
) {
2179 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
2182 olinfo_status
|= ((paylen
- hdr_len
) << E1000_ADVTXD_PAYLEN_SHIFT
);
2184 i
= tx_ring
->next_to_use
;
2186 buffer_info
= &tx_ring
->buffer_info
[i
];
2187 tx_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, i
);
2188 tx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
2189 tx_desc
->read
.cmd_type_len
=
2190 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
2191 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
2193 if (i
== tx_ring
->count
)
2197 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(adapter
->txd_cmd
);
2198 /* Force memory writes to complete before letting h/w
2199 * know there are new descriptors to fetch. (Only
2200 * applicable for weak-ordered memory model archs,
2205 tx_ring
->buffer_info
[first
].next_to_watch
= tx_desc
;
2206 tx_ring
->next_to_use
= i
;
2207 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
2208 /* we need this if more than one processor can write to our tail
2209 * at a time, it synchronizes IO on IA64/Altix systems
2214 static netdev_tx_t
igbvf_xmit_frame_ring_adv(struct sk_buff
*skb
,
2215 struct net_device
*netdev
,
2216 struct igbvf_ring
*tx_ring
)
2218 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2219 unsigned int first
, tx_flags
= 0;
2223 __be16 protocol
= vlan_get_protocol(skb
);
2225 if (test_bit(__IGBVF_DOWN
, &adapter
->state
)) {
2226 dev_kfree_skb_any(skb
);
2227 return NETDEV_TX_OK
;
2230 if (skb
->len
<= 0) {
2231 dev_kfree_skb_any(skb
);
2232 return NETDEV_TX_OK
;
2235 /* need: count + 4 desc gap to keep tail from touching
2236 * + 2 desc gap to keep tail from touching head,
2237 * + 1 desc for skb->data,
2238 * + 1 desc for context descriptor,
2239 * head, otherwise try next time
2241 if (igbvf_maybe_stop_tx(netdev
, skb_shinfo(skb
)->nr_frags
+ 4)) {
2242 /* this is a hard error */
2243 return NETDEV_TX_BUSY
;
2246 if (skb_vlan_tag_present(skb
)) {
2247 tx_flags
|= IGBVF_TX_FLAGS_VLAN
;
2248 tx_flags
|= (skb_vlan_tag_get(skb
) <<
2249 IGBVF_TX_FLAGS_VLAN_SHIFT
);
2252 if (protocol
== htons(ETH_P_IP
))
2253 tx_flags
|= IGBVF_TX_FLAGS_IPV4
;
2255 first
= tx_ring
->next_to_use
;
2257 tso
= skb_is_gso(skb
) ?
2258 igbvf_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
, protocol
) : 0;
2259 if (unlikely(tso
< 0)) {
2260 dev_kfree_skb_any(skb
);
2261 return NETDEV_TX_OK
;
2265 tx_flags
|= IGBVF_TX_FLAGS_TSO
;
2266 else if (igbvf_tx_csum(adapter
, tx_ring
, skb
, tx_flags
, protocol
) &&
2267 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
2268 tx_flags
|= IGBVF_TX_FLAGS_CSUM
;
2270 /* count reflects descriptors mapped, if 0 then mapping error
2271 * has occurred and we need to rewind the descriptor queue
2273 count
= igbvf_tx_map_adv(adapter
, tx_ring
, skb
);
2276 igbvf_tx_queue_adv(adapter
, tx_ring
, tx_flags
, count
,
2277 first
, skb
->len
, hdr_len
);
2278 /* Make sure there is space in the ring for the next send. */
2279 igbvf_maybe_stop_tx(netdev
, MAX_SKB_FRAGS
+ 4);
2281 dev_kfree_skb_any(skb
);
2282 tx_ring
->buffer_info
[first
].time_stamp
= 0;
2283 tx_ring
->next_to_use
= first
;
2286 return NETDEV_TX_OK
;
2289 static netdev_tx_t
igbvf_xmit_frame(struct sk_buff
*skb
,
2290 struct net_device
*netdev
)
2292 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2293 struct igbvf_ring
*tx_ring
;
2295 if (test_bit(__IGBVF_DOWN
, &adapter
->state
)) {
2296 dev_kfree_skb_any(skb
);
2297 return NETDEV_TX_OK
;
2300 tx_ring
= &adapter
->tx_ring
[0];
2302 return igbvf_xmit_frame_ring_adv(skb
, netdev
, tx_ring
);
2306 * igbvf_tx_timeout - Respond to a Tx Hang
2307 * @netdev: network interface device structure
2309 static void igbvf_tx_timeout(struct net_device
*netdev
)
2311 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2313 /* Do the reset outside of interrupt context */
2314 adapter
->tx_timeout_count
++;
2315 schedule_work(&adapter
->reset_task
);
2318 static void igbvf_reset_task(struct work_struct
*work
)
2320 struct igbvf_adapter
*adapter
;
2322 adapter
= container_of(work
, struct igbvf_adapter
, reset_task
);
2324 igbvf_reinit_locked(adapter
);
2328 * igbvf_get_stats - Get System Network Statistics
2329 * @netdev: network interface device structure
2331 * Returns the address of the device statistics structure.
2332 * The statistics are actually updated from the timer callback.
2334 static struct net_device_stats
*igbvf_get_stats(struct net_device
*netdev
)
2336 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2338 /* only return the current stats */
2339 return &adapter
->net_stats
;
2343 * igbvf_change_mtu - Change the Maximum Transfer Unit
2344 * @netdev: network interface device structure
2345 * @new_mtu: new value for maximum frame size
2347 * Returns 0 on success, negative on failure
2349 static int igbvf_change_mtu(struct net_device
*netdev
, int new_mtu
)
2351 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2352 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2354 if (new_mtu
< 68 || new_mtu
> INT_MAX
- ETH_HLEN
- ETH_FCS_LEN
||
2355 max_frame
> MAX_JUMBO_FRAME_SIZE
)
2358 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2359 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
2360 dev_err(&adapter
->pdev
->dev
, "MTU > 9216 not supported.\n");
2364 while (test_and_set_bit(__IGBVF_RESETTING
, &adapter
->state
))
2365 usleep_range(1000, 2000);
2366 /* igbvf_down has a dependency on max_frame_size */
2367 adapter
->max_frame_size
= max_frame
;
2368 if (netif_running(netdev
))
2369 igbvf_down(adapter
);
2371 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2372 * means we reserve 2 more, this pushes us to allocate from the next
2374 * i.e. RXBUFFER_2048 --> size-4096 slab
2375 * However with the new *_jumbo_rx* routines, jumbo receives will use
2379 if (max_frame
<= 1024)
2380 adapter
->rx_buffer_len
= 1024;
2381 else if (max_frame
<= 2048)
2382 adapter
->rx_buffer_len
= 2048;
2384 #if (PAGE_SIZE / 2) > 16384
2385 adapter
->rx_buffer_len
= 16384;
2387 adapter
->rx_buffer_len
= PAGE_SIZE
/ 2;
2390 /* adjust allocation if LPE protects us, and we aren't using SBP */
2391 if ((max_frame
== ETH_FRAME_LEN
+ ETH_FCS_LEN
) ||
2392 (max_frame
== ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
))
2393 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
+
2396 dev_info(&adapter
->pdev
->dev
, "changing MTU from %d to %d\n",
2397 netdev
->mtu
, new_mtu
);
2398 netdev
->mtu
= new_mtu
;
2400 if (netif_running(netdev
))
2403 igbvf_reset(adapter
);
2405 clear_bit(__IGBVF_RESETTING
, &adapter
->state
);
2410 static int igbvf_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2418 static int igbvf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2420 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2421 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2426 netif_device_detach(netdev
);
2428 if (netif_running(netdev
)) {
2429 WARN_ON(test_bit(__IGBVF_RESETTING
, &adapter
->state
));
2430 igbvf_down(adapter
);
2431 igbvf_free_irq(adapter
);
2435 retval
= pci_save_state(pdev
);
2440 pci_disable_device(pdev
);
2446 static int igbvf_resume(struct pci_dev
*pdev
)
2448 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2449 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2452 pci_restore_state(pdev
);
2453 err
= pci_enable_device_mem(pdev
);
2455 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
2459 pci_set_master(pdev
);
2461 if (netif_running(netdev
)) {
2462 err
= igbvf_request_irq(adapter
);
2467 igbvf_reset(adapter
);
2469 if (netif_running(netdev
))
2472 netif_device_attach(netdev
);
2478 static void igbvf_shutdown(struct pci_dev
*pdev
)
2480 igbvf_suspend(pdev
, PMSG_SUSPEND
);
2483 #ifdef CONFIG_NET_POLL_CONTROLLER
2484 /* Polling 'interrupt' - used by things like netconsole to send skbs
2485 * without having to re-enable interrupts. It's not called while
2486 * the interrupt routine is executing.
2488 static void igbvf_netpoll(struct net_device
*netdev
)
2490 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2492 disable_irq(adapter
->pdev
->irq
);
2494 igbvf_clean_tx_irq(adapter
->tx_ring
);
2496 enable_irq(adapter
->pdev
->irq
);
2501 * igbvf_io_error_detected - called when PCI error is detected
2502 * @pdev: Pointer to PCI device
2503 * @state: The current pci connection state
2505 * This function is called after a PCI bus error affecting
2506 * this device has been detected.
2508 static pci_ers_result_t
igbvf_io_error_detected(struct pci_dev
*pdev
,
2509 pci_channel_state_t state
)
2511 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2512 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2514 netif_device_detach(netdev
);
2516 if (state
== pci_channel_io_perm_failure
)
2517 return PCI_ERS_RESULT_DISCONNECT
;
2519 if (netif_running(netdev
))
2520 igbvf_down(adapter
);
2521 pci_disable_device(pdev
);
2523 /* Request a slot slot reset. */
2524 return PCI_ERS_RESULT_NEED_RESET
;
2528 * igbvf_io_slot_reset - called after the pci bus has been reset.
2529 * @pdev: Pointer to PCI device
2531 * Restart the card from scratch, as if from a cold-boot. Implementation
2532 * resembles the first-half of the igbvf_resume routine.
2534 static pci_ers_result_t
igbvf_io_slot_reset(struct pci_dev
*pdev
)
2536 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2537 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2539 if (pci_enable_device_mem(pdev
)) {
2541 "Cannot re-enable PCI device after reset.\n");
2542 return PCI_ERS_RESULT_DISCONNECT
;
2544 pci_set_master(pdev
);
2546 igbvf_reset(adapter
);
2548 return PCI_ERS_RESULT_RECOVERED
;
2552 * igbvf_io_resume - called when traffic can start flowing again.
2553 * @pdev: Pointer to PCI device
2555 * This callback is called when the error recovery driver tells us that
2556 * its OK to resume normal operation. Implementation resembles the
2557 * second-half of the igbvf_resume routine.
2559 static void igbvf_io_resume(struct pci_dev
*pdev
)
2561 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2562 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2564 if (netif_running(netdev
)) {
2565 if (igbvf_up(adapter
)) {
2567 "can't bring device back up after reset\n");
2572 netif_device_attach(netdev
);
2575 static void igbvf_print_device_info(struct igbvf_adapter
*adapter
)
2577 struct e1000_hw
*hw
= &adapter
->hw
;
2578 struct net_device
*netdev
= adapter
->netdev
;
2579 struct pci_dev
*pdev
= adapter
->pdev
;
2581 if (hw
->mac
.type
== e1000_vfadapt_i350
)
2582 dev_info(&pdev
->dev
, "Intel(R) I350 Virtual Function\n");
2584 dev_info(&pdev
->dev
, "Intel(R) 82576 Virtual Function\n");
2585 dev_info(&pdev
->dev
, "Address: %pM\n", netdev
->dev_addr
);
2588 static int igbvf_set_features(struct net_device
*netdev
,
2589 netdev_features_t features
)
2591 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2593 if (features
& NETIF_F_RXCSUM
)
2594 adapter
->flags
&= ~IGBVF_FLAG_RX_CSUM_DISABLED
;
2596 adapter
->flags
|= IGBVF_FLAG_RX_CSUM_DISABLED
;
2601 static const struct net_device_ops igbvf_netdev_ops
= {
2602 .ndo_open
= igbvf_open
,
2603 .ndo_stop
= igbvf_close
,
2604 .ndo_start_xmit
= igbvf_xmit_frame
,
2605 .ndo_get_stats
= igbvf_get_stats
,
2606 .ndo_set_rx_mode
= igbvf_set_multi
,
2607 .ndo_set_mac_address
= igbvf_set_mac
,
2608 .ndo_change_mtu
= igbvf_change_mtu
,
2609 .ndo_do_ioctl
= igbvf_ioctl
,
2610 .ndo_tx_timeout
= igbvf_tx_timeout
,
2611 .ndo_vlan_rx_add_vid
= igbvf_vlan_rx_add_vid
,
2612 .ndo_vlan_rx_kill_vid
= igbvf_vlan_rx_kill_vid
,
2613 #ifdef CONFIG_NET_POLL_CONTROLLER
2614 .ndo_poll_controller
= igbvf_netpoll
,
2616 .ndo_set_features
= igbvf_set_features
,
2620 * igbvf_probe - Device Initialization Routine
2621 * @pdev: PCI device information struct
2622 * @ent: entry in igbvf_pci_tbl
2624 * Returns 0 on success, negative on failure
2626 * igbvf_probe initializes an adapter identified by a pci_dev structure.
2627 * The OS initialization, configuring of the adapter private structure,
2628 * and a hardware reset occur.
2630 static int igbvf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2632 struct net_device
*netdev
;
2633 struct igbvf_adapter
*adapter
;
2634 struct e1000_hw
*hw
;
2635 const struct igbvf_info
*ei
= igbvf_info_tbl
[ent
->driver_data
];
2637 static int cards_found
;
2638 int err
, pci_using_dac
;
2640 err
= pci_enable_device_mem(pdev
);
2645 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
2649 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
2652 "No usable DMA configuration, aborting\n");
2657 err
= pci_request_regions(pdev
, igbvf_driver_name
);
2661 pci_set_master(pdev
);
2664 netdev
= alloc_etherdev(sizeof(struct igbvf_adapter
));
2666 goto err_alloc_etherdev
;
2668 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2670 pci_set_drvdata(pdev
, netdev
);
2671 adapter
= netdev_priv(netdev
);
2673 adapter
->netdev
= netdev
;
2674 adapter
->pdev
= pdev
;
2676 adapter
->pba
= ei
->pba
;
2677 adapter
->flags
= ei
->flags
;
2678 adapter
->hw
.back
= adapter
;
2679 adapter
->hw
.mac
.type
= ei
->mac
;
2680 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
2682 /* PCI config space info */
2684 hw
->vendor_id
= pdev
->vendor
;
2685 hw
->device_id
= pdev
->device
;
2686 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2687 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2688 hw
->revision_id
= pdev
->revision
;
2691 adapter
->hw
.hw_addr
= ioremap(pci_resource_start(pdev
, 0),
2692 pci_resource_len(pdev
, 0));
2694 if (!adapter
->hw
.hw_addr
)
2697 if (ei
->get_variants
) {
2698 err
= ei
->get_variants(adapter
);
2700 goto err_get_variants
;
2703 /* setup adapter struct */
2704 err
= igbvf_sw_init(adapter
);
2708 /* construct the net_device struct */
2709 netdev
->netdev_ops
= &igbvf_netdev_ops
;
2711 igbvf_set_ethtool_ops(netdev
);
2712 netdev
->watchdog_timeo
= 5 * HZ
;
2713 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
2715 adapter
->bd_number
= cards_found
++;
2717 netdev
->hw_features
= NETIF_F_SG
|
2724 netdev
->features
= netdev
->hw_features
|
2725 NETIF_F_HW_VLAN_CTAG_TX
|
2726 NETIF_F_HW_VLAN_CTAG_RX
|
2727 NETIF_F_HW_VLAN_CTAG_FILTER
;
2730 netdev
->features
|= NETIF_F_HIGHDMA
;
2732 netdev
->vlan_features
|= NETIF_F_TSO
;
2733 netdev
->vlan_features
|= NETIF_F_TSO6
;
2734 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
2735 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
2736 netdev
->vlan_features
|= NETIF_F_SG
;
2738 /*reset the controller to put the device in a known good state */
2739 err
= hw
->mac
.ops
.reset_hw(hw
);
2741 dev_info(&pdev
->dev
,
2742 "PF still in reset state. Is the PF interface up?\n");
2744 err
= hw
->mac
.ops
.read_mac_addr(hw
);
2746 dev_info(&pdev
->dev
, "Error reading MAC address.\n");
2747 else if (is_zero_ether_addr(adapter
->hw
.mac
.addr
))
2748 dev_info(&pdev
->dev
,
2749 "MAC address not assigned by administrator.\n");
2750 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
2754 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2755 dev_info(&pdev
->dev
, "Assigning random MAC address.\n");
2756 eth_hw_addr_random(netdev
);
2757 memcpy(adapter
->hw
.mac
.addr
, netdev
->dev_addr
,
2761 setup_timer(&adapter
->watchdog_timer
, &igbvf_watchdog
,
2762 (unsigned long)adapter
);
2764 INIT_WORK(&adapter
->reset_task
, igbvf_reset_task
);
2765 INIT_WORK(&adapter
->watchdog_task
, igbvf_watchdog_task
);
2767 /* ring size defaults */
2768 adapter
->rx_ring
->count
= 1024;
2769 adapter
->tx_ring
->count
= 1024;
2771 /* reset the hardware with the new settings */
2772 igbvf_reset(adapter
);
2774 /* set hardware-specific flags */
2775 if (adapter
->hw
.mac
.type
== e1000_vfadapt_i350
)
2776 adapter
->flags
|= IGBVF_FLAG_RX_LB_VLAN_BSWAP
;
2778 strcpy(netdev
->name
, "eth%d");
2779 err
= register_netdev(netdev
);
2783 /* tell the stack to leave us alone until igbvf_open() is called */
2784 netif_carrier_off(netdev
);
2785 netif_stop_queue(netdev
);
2787 igbvf_print_device_info(adapter
);
2789 igbvf_initialize_last_counter_stats(adapter
);
2794 kfree(adapter
->tx_ring
);
2795 kfree(adapter
->rx_ring
);
2797 igbvf_reset_interrupt_capability(adapter
);
2799 iounmap(adapter
->hw
.hw_addr
);
2801 free_netdev(netdev
);
2803 pci_release_regions(pdev
);
2806 pci_disable_device(pdev
);
2811 * igbvf_remove - Device Removal Routine
2812 * @pdev: PCI device information struct
2814 * igbvf_remove is called by the PCI subsystem to alert the driver
2815 * that it should release a PCI device. The could be caused by a
2816 * Hot-Plug event, or because the driver is going to be removed from
2819 static void igbvf_remove(struct pci_dev
*pdev
)
2821 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2822 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2823 struct e1000_hw
*hw
= &adapter
->hw
;
2825 /* The watchdog timer may be rescheduled, so explicitly
2826 * disable it from being rescheduled.
2828 set_bit(__IGBVF_DOWN
, &adapter
->state
);
2829 del_timer_sync(&adapter
->watchdog_timer
);
2831 cancel_work_sync(&adapter
->reset_task
);
2832 cancel_work_sync(&adapter
->watchdog_task
);
2834 unregister_netdev(netdev
);
2836 igbvf_reset_interrupt_capability(adapter
);
2838 /* it is important to delete the NAPI struct prior to freeing the
2839 * Rx ring so that you do not end up with null pointer refs
2841 netif_napi_del(&adapter
->rx_ring
->napi
);
2842 kfree(adapter
->tx_ring
);
2843 kfree(adapter
->rx_ring
);
2845 iounmap(hw
->hw_addr
);
2846 if (hw
->flash_address
)
2847 iounmap(hw
->flash_address
);
2848 pci_release_regions(pdev
);
2850 free_netdev(netdev
);
2852 pci_disable_device(pdev
);
2855 /* PCI Error Recovery (ERS) */
2856 static const struct pci_error_handlers igbvf_err_handler
= {
2857 .error_detected
= igbvf_io_error_detected
,
2858 .slot_reset
= igbvf_io_slot_reset
,
2859 .resume
= igbvf_io_resume
,
2862 static const struct pci_device_id igbvf_pci_tbl
[] = {
2863 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_VF
), board_vf
},
2864 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_I350_VF
), board_i350_vf
},
2865 { } /* terminate list */
2867 MODULE_DEVICE_TABLE(pci
, igbvf_pci_tbl
);
2869 /* PCI Device API Driver */
2870 static struct pci_driver igbvf_driver
= {
2871 .name
= igbvf_driver_name
,
2872 .id_table
= igbvf_pci_tbl
,
2873 .probe
= igbvf_probe
,
2874 .remove
= igbvf_remove
,
2876 /* Power Management Hooks */
2877 .suspend
= igbvf_suspend
,
2878 .resume
= igbvf_resume
,
2880 .shutdown
= igbvf_shutdown
,
2881 .err_handler
= &igbvf_err_handler
2885 * igbvf_init_module - Driver Registration Routine
2887 * igbvf_init_module is the first routine called when the driver is
2888 * loaded. All it does is register with the PCI subsystem.
2890 static int __init
igbvf_init_module(void)
2894 pr_info("%s - version %s\n", igbvf_driver_string
, igbvf_driver_version
);
2895 pr_info("%s\n", igbvf_copyright
);
2897 ret
= pci_register_driver(&igbvf_driver
);
2901 module_init(igbvf_init_module
);
2904 * igbvf_exit_module - Driver Exit Cleanup Routine
2906 * igbvf_exit_module is called just before the driver is removed
2909 static void __exit
igbvf_exit_module(void)
2911 pci_unregister_driver(&igbvf_driver
);
2913 module_exit(igbvf_exit_module
);
2915 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2916 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
2917 MODULE_LICENSE("GPL");
2918 MODULE_VERSION(DRV_VERSION
);