1 /*******************************************************************************
3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2010 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/pci.h>
32 #include <linux/vmalloc.h>
33 #include <linux/pagemap.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/tcp.h>
37 #include <linux/ipv6.h>
38 #include <linux/slab.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/mii.h>
42 #include <linux/ethtool.h>
43 #include <linux/if_vlan.h>
47 #define DRV_VERSION "1.0.8-k0"
48 char igbvf_driver_name
[] = "igbvf";
49 const char igbvf_driver_version
[] = DRV_VERSION
;
50 static const char igbvf_driver_string
[] =
51 "Intel(R) Virtual Function Network Driver";
52 static const char igbvf_copyright
[] =
53 "Copyright (c) 2009 - 2010 Intel Corporation.";
55 static int igbvf_poll(struct napi_struct
*napi
, int budget
);
56 static void igbvf_reset(struct igbvf_adapter
*);
57 static void igbvf_set_interrupt_capability(struct igbvf_adapter
*);
58 static void igbvf_reset_interrupt_capability(struct igbvf_adapter
*);
60 static struct igbvf_info igbvf_vf_info
= {
64 .init_ops
= e1000_init_function_pointers_vf
,
67 static const struct igbvf_info
*igbvf_info_tbl
[] = {
68 [board_vf
] = &igbvf_vf_info
,
72 * igbvf_desc_unused - calculate if we have unused descriptors
74 static int igbvf_desc_unused(struct igbvf_ring
*ring
)
76 if (ring
->next_to_clean
> ring
->next_to_use
)
77 return ring
->next_to_clean
- ring
->next_to_use
- 1;
79 return ring
->count
+ ring
->next_to_clean
- ring
->next_to_use
- 1;
83 * igbvf_receive_skb - helper function to handle Rx indications
84 * @adapter: board private structure
85 * @status: descriptor status field as written by hardware
86 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
87 * @skb: pointer to sk_buff to be indicated to stack
89 static void igbvf_receive_skb(struct igbvf_adapter
*adapter
,
90 struct net_device
*netdev
,
94 if (adapter
->vlgrp
&& (status
& E1000_RXD_STAT_VP
))
95 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
,
97 E1000_RXD_SPC_VLAN_MASK
);
99 netif_receive_skb(skb
);
102 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter
*adapter
,
103 u32 status_err
, struct sk_buff
*skb
)
105 skb_checksum_none_assert(skb
);
107 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
108 if ((status_err
& E1000_RXD_STAT_IXSM
) ||
109 (adapter
->flags
& IGBVF_FLAG_RX_CSUM_DISABLED
))
112 /* TCP/UDP checksum error bit is set */
114 (E1000_RXDEXT_STATERR_TCPE
| E1000_RXDEXT_STATERR_IPE
)) {
115 /* let the stack verify checksum errors */
116 adapter
->hw_csum_err
++;
120 /* It must be a TCP or UDP packet with a valid checksum */
121 if (status_err
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
))
122 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
124 adapter
->hw_csum_good
++;
128 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
129 * @rx_ring: address of ring structure to repopulate
130 * @cleaned_count: number of buffers to repopulate
132 static void igbvf_alloc_rx_buffers(struct igbvf_ring
*rx_ring
,
135 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
136 struct net_device
*netdev
= adapter
->netdev
;
137 struct pci_dev
*pdev
= adapter
->pdev
;
138 union e1000_adv_rx_desc
*rx_desc
;
139 struct igbvf_buffer
*buffer_info
;
144 i
= rx_ring
->next_to_use
;
145 buffer_info
= &rx_ring
->buffer_info
[i
];
147 if (adapter
->rx_ps_hdr_size
)
148 bufsz
= adapter
->rx_ps_hdr_size
;
150 bufsz
= adapter
->rx_buffer_len
;
152 while (cleaned_count
--) {
153 rx_desc
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
155 if (adapter
->rx_ps_hdr_size
&& !buffer_info
->page_dma
) {
156 if (!buffer_info
->page
) {
157 buffer_info
->page
= alloc_page(GFP_ATOMIC
);
158 if (!buffer_info
->page
) {
159 adapter
->alloc_rx_buff_failed
++;
162 buffer_info
->page_offset
= 0;
164 buffer_info
->page_offset
^= PAGE_SIZE
/ 2;
166 buffer_info
->page_dma
=
167 dma_map_page(&pdev
->dev
, buffer_info
->page
,
168 buffer_info
->page_offset
,
173 if (!buffer_info
->skb
) {
174 skb
= netdev_alloc_skb_ip_align(netdev
, bufsz
);
176 adapter
->alloc_rx_buff_failed
++;
180 buffer_info
->skb
= skb
;
181 buffer_info
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
185 /* Refresh the desc even if buffer_addrs didn't change because
186 * each write-back erases this info. */
187 if (adapter
->rx_ps_hdr_size
) {
188 rx_desc
->read
.pkt_addr
=
189 cpu_to_le64(buffer_info
->page_dma
);
190 rx_desc
->read
.hdr_addr
= cpu_to_le64(buffer_info
->dma
);
192 rx_desc
->read
.pkt_addr
=
193 cpu_to_le64(buffer_info
->dma
);
194 rx_desc
->read
.hdr_addr
= 0;
198 if (i
== rx_ring
->count
)
200 buffer_info
= &rx_ring
->buffer_info
[i
];
204 if (rx_ring
->next_to_use
!= i
) {
205 rx_ring
->next_to_use
= i
;
207 i
= (rx_ring
->count
- 1);
211 /* Force memory writes to complete before letting h/w
212 * know there are new descriptors to fetch. (Only
213 * applicable for weak-ordered memory model archs,
216 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
221 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
222 * @adapter: board private structure
224 * the return value indicates whether actual cleaning was done, there
225 * is no guarantee that everything was cleaned
227 static bool igbvf_clean_rx_irq(struct igbvf_adapter
*adapter
,
228 int *work_done
, int work_to_do
)
230 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
231 struct net_device
*netdev
= adapter
->netdev
;
232 struct pci_dev
*pdev
= adapter
->pdev
;
233 union e1000_adv_rx_desc
*rx_desc
, *next_rxd
;
234 struct igbvf_buffer
*buffer_info
, *next_buffer
;
236 bool cleaned
= false;
237 int cleaned_count
= 0;
238 unsigned int total_bytes
= 0, total_packets
= 0;
240 u32 length
, hlen
, staterr
;
242 i
= rx_ring
->next_to_clean
;
243 rx_desc
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
244 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
246 while (staterr
& E1000_RXD_STAT_DD
) {
247 if (*work_done
>= work_to_do
)
250 rmb(); /* read descriptor and rx_buffer_info after status DD */
252 buffer_info
= &rx_ring
->buffer_info
[i
];
254 /* HW will not DMA in data larger than the given buffer, even
255 * if it parses the (NFS, of course) header to be larger. In
256 * that case, it fills the header buffer and spills the rest
259 hlen
= (le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
) &
260 E1000_RXDADV_HDRBUFLEN_MASK
) >> E1000_RXDADV_HDRBUFLEN_SHIFT
;
261 if (hlen
> adapter
->rx_ps_hdr_size
)
262 hlen
= adapter
->rx_ps_hdr_size
;
264 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
268 skb
= buffer_info
->skb
;
269 prefetch(skb
->data
- NET_IP_ALIGN
);
270 buffer_info
->skb
= NULL
;
271 if (!adapter
->rx_ps_hdr_size
) {
272 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
273 adapter
->rx_buffer_len
,
275 buffer_info
->dma
= 0;
276 skb_put(skb
, length
);
280 if (!skb_shinfo(skb
)->nr_frags
) {
281 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
282 adapter
->rx_ps_hdr_size
,
288 dma_unmap_page(&pdev
->dev
, buffer_info
->page_dma
,
291 buffer_info
->page_dma
= 0;
293 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
295 buffer_info
->page_offset
,
298 if ((adapter
->rx_buffer_len
> (PAGE_SIZE
/ 2)) ||
299 (page_count(buffer_info
->page
) != 1))
300 buffer_info
->page
= NULL
;
302 get_page(buffer_info
->page
);
305 skb
->data_len
+= length
;
306 skb
->truesize
+= length
;
310 if (i
== rx_ring
->count
)
312 next_rxd
= IGBVF_RX_DESC_ADV(*rx_ring
, i
);
314 next_buffer
= &rx_ring
->buffer_info
[i
];
316 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
317 buffer_info
->skb
= next_buffer
->skb
;
318 buffer_info
->dma
= next_buffer
->dma
;
319 next_buffer
->skb
= skb
;
320 next_buffer
->dma
= 0;
324 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
325 dev_kfree_skb_irq(skb
);
329 total_bytes
+= skb
->len
;
332 igbvf_rx_checksum_adv(adapter
, staterr
, skb
);
334 skb
->protocol
= eth_type_trans(skb
, netdev
);
336 igbvf_receive_skb(adapter
, netdev
, skb
, staterr
,
337 rx_desc
->wb
.upper
.vlan
);
340 rx_desc
->wb
.upper
.status_error
= 0;
342 /* return some buffers to hardware, one at a time is too slow */
343 if (cleaned_count
>= IGBVF_RX_BUFFER_WRITE
) {
344 igbvf_alloc_rx_buffers(rx_ring
, cleaned_count
);
348 /* use prefetched values */
350 buffer_info
= next_buffer
;
352 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
355 rx_ring
->next_to_clean
= i
;
356 cleaned_count
= igbvf_desc_unused(rx_ring
);
359 igbvf_alloc_rx_buffers(rx_ring
, cleaned_count
);
361 adapter
->total_rx_packets
+= total_packets
;
362 adapter
->total_rx_bytes
+= total_bytes
;
363 adapter
->net_stats
.rx_bytes
+= total_bytes
;
364 adapter
->net_stats
.rx_packets
+= total_packets
;
368 static void igbvf_put_txbuf(struct igbvf_adapter
*adapter
,
369 struct igbvf_buffer
*buffer_info
)
371 if (buffer_info
->dma
) {
372 if (buffer_info
->mapped_as_page
)
373 dma_unmap_page(&adapter
->pdev
->dev
,
378 dma_unmap_single(&adapter
->pdev
->dev
,
382 buffer_info
->dma
= 0;
384 if (buffer_info
->skb
) {
385 dev_kfree_skb_any(buffer_info
->skb
);
386 buffer_info
->skb
= NULL
;
388 buffer_info
->time_stamp
= 0;
391 static void igbvf_print_tx_hang(struct igbvf_adapter
*adapter
)
393 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
394 unsigned int i
= tx_ring
->next_to_clean
;
395 unsigned int eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
396 union e1000_adv_tx_desc
*eop_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, eop
);
398 /* detected Tx unit hang */
399 dev_err(&adapter
->pdev
->dev
,
400 "Detected Tx Unit Hang:\n"
403 " next_to_use <%x>\n"
404 " next_to_clean <%x>\n"
405 "buffer_info[next_to_clean]:\n"
406 " time_stamp <%lx>\n"
407 " next_to_watch <%x>\n"
409 " next_to_watch.status <%x>\n",
410 readl(adapter
->hw
.hw_addr
+ tx_ring
->head
),
411 readl(adapter
->hw
.hw_addr
+ tx_ring
->tail
),
412 tx_ring
->next_to_use
,
413 tx_ring
->next_to_clean
,
414 tx_ring
->buffer_info
[eop
].time_stamp
,
417 eop_desc
->wb
.status
);
421 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
422 * @adapter: board private structure
424 * Return 0 on success, negative on failure
426 int igbvf_setup_tx_resources(struct igbvf_adapter
*adapter
,
427 struct igbvf_ring
*tx_ring
)
429 struct pci_dev
*pdev
= adapter
->pdev
;
432 size
= sizeof(struct igbvf_buffer
) * tx_ring
->count
;
433 tx_ring
->buffer_info
= vzalloc(size
);
434 if (!tx_ring
->buffer_info
)
437 /* round up to nearest 4K */
438 tx_ring
->size
= tx_ring
->count
* sizeof(union e1000_adv_tx_desc
);
439 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
441 tx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, tx_ring
->size
,
442 &tx_ring
->dma
, GFP_KERNEL
);
447 tx_ring
->adapter
= adapter
;
448 tx_ring
->next_to_use
= 0;
449 tx_ring
->next_to_clean
= 0;
453 vfree(tx_ring
->buffer_info
);
454 dev_err(&adapter
->pdev
->dev
,
455 "Unable to allocate memory for the transmit descriptor ring\n");
460 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
461 * @adapter: board private structure
463 * Returns 0 on success, negative on failure
465 int igbvf_setup_rx_resources(struct igbvf_adapter
*adapter
,
466 struct igbvf_ring
*rx_ring
)
468 struct pci_dev
*pdev
= adapter
->pdev
;
471 size
= sizeof(struct igbvf_buffer
) * rx_ring
->count
;
472 rx_ring
->buffer_info
= vzalloc(size
);
473 if (!rx_ring
->buffer_info
)
476 desc_len
= sizeof(union e1000_adv_rx_desc
);
478 /* Round up to nearest 4K */
479 rx_ring
->size
= rx_ring
->count
* desc_len
;
480 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
482 rx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, rx_ring
->size
,
483 &rx_ring
->dma
, GFP_KERNEL
);
488 rx_ring
->next_to_clean
= 0;
489 rx_ring
->next_to_use
= 0;
491 rx_ring
->adapter
= adapter
;
496 vfree(rx_ring
->buffer_info
);
497 rx_ring
->buffer_info
= NULL
;
498 dev_err(&adapter
->pdev
->dev
,
499 "Unable to allocate memory for the receive descriptor ring\n");
504 * igbvf_clean_tx_ring - Free Tx Buffers
505 * @tx_ring: ring to be cleaned
507 static void igbvf_clean_tx_ring(struct igbvf_ring
*tx_ring
)
509 struct igbvf_adapter
*adapter
= tx_ring
->adapter
;
510 struct igbvf_buffer
*buffer_info
;
514 if (!tx_ring
->buffer_info
)
517 /* Free all the Tx ring sk_buffs */
518 for (i
= 0; i
< tx_ring
->count
; i
++) {
519 buffer_info
= &tx_ring
->buffer_info
[i
];
520 igbvf_put_txbuf(adapter
, buffer_info
);
523 size
= sizeof(struct igbvf_buffer
) * tx_ring
->count
;
524 memset(tx_ring
->buffer_info
, 0, size
);
526 /* Zero out the descriptor ring */
527 memset(tx_ring
->desc
, 0, tx_ring
->size
);
529 tx_ring
->next_to_use
= 0;
530 tx_ring
->next_to_clean
= 0;
532 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
533 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
537 * igbvf_free_tx_resources - Free Tx Resources per Queue
538 * @tx_ring: ring to free resources from
540 * Free all transmit software resources
542 void igbvf_free_tx_resources(struct igbvf_ring
*tx_ring
)
544 struct pci_dev
*pdev
= tx_ring
->adapter
->pdev
;
546 igbvf_clean_tx_ring(tx_ring
);
548 vfree(tx_ring
->buffer_info
);
549 tx_ring
->buffer_info
= NULL
;
551 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
554 tx_ring
->desc
= NULL
;
558 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
559 * @adapter: board private structure
561 static void igbvf_clean_rx_ring(struct igbvf_ring
*rx_ring
)
563 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
564 struct igbvf_buffer
*buffer_info
;
565 struct pci_dev
*pdev
= adapter
->pdev
;
569 if (!rx_ring
->buffer_info
)
572 /* Free all the Rx ring sk_buffs */
573 for (i
= 0; i
< rx_ring
->count
; i
++) {
574 buffer_info
= &rx_ring
->buffer_info
[i
];
575 if (buffer_info
->dma
) {
576 if (adapter
->rx_ps_hdr_size
){
577 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
578 adapter
->rx_ps_hdr_size
,
581 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
582 adapter
->rx_buffer_len
,
585 buffer_info
->dma
= 0;
588 if (buffer_info
->skb
) {
589 dev_kfree_skb(buffer_info
->skb
);
590 buffer_info
->skb
= NULL
;
593 if (buffer_info
->page
) {
594 if (buffer_info
->page_dma
)
595 dma_unmap_page(&pdev
->dev
,
596 buffer_info
->page_dma
,
599 put_page(buffer_info
->page
);
600 buffer_info
->page
= NULL
;
601 buffer_info
->page_dma
= 0;
602 buffer_info
->page_offset
= 0;
606 size
= sizeof(struct igbvf_buffer
) * rx_ring
->count
;
607 memset(rx_ring
->buffer_info
, 0, size
);
609 /* Zero out the descriptor ring */
610 memset(rx_ring
->desc
, 0, rx_ring
->size
);
612 rx_ring
->next_to_clean
= 0;
613 rx_ring
->next_to_use
= 0;
615 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
616 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
620 * igbvf_free_rx_resources - Free Rx Resources
621 * @rx_ring: ring to clean the resources from
623 * Free all receive software resources
626 void igbvf_free_rx_resources(struct igbvf_ring
*rx_ring
)
628 struct pci_dev
*pdev
= rx_ring
->adapter
->pdev
;
630 igbvf_clean_rx_ring(rx_ring
);
632 vfree(rx_ring
->buffer_info
);
633 rx_ring
->buffer_info
= NULL
;
635 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
637 rx_ring
->desc
= NULL
;
641 * igbvf_update_itr - update the dynamic ITR value based on statistics
642 * @adapter: pointer to adapter
643 * @itr_setting: current adapter->itr
644 * @packets: the number of packets during this measurement interval
645 * @bytes: the number of bytes during this measurement interval
647 * Stores a new ITR value based on packets and byte
648 * counts during the last interrupt. The advantage of per interrupt
649 * computation is faster updates and more accurate ITR for the current
650 * traffic pattern. Constants in this function were computed
651 * based on theoretical maximum wire speed and thresholds were set based
652 * on testing data as well as attempting to minimize response time
653 * while increasing bulk throughput. This functionality is controlled
654 * by the InterruptThrottleRate module parameter.
656 static unsigned int igbvf_update_itr(struct igbvf_adapter
*adapter
,
657 u16 itr_setting
, int packets
,
660 unsigned int retval
= itr_setting
;
663 goto update_itr_done
;
665 switch (itr_setting
) {
667 /* handle TSO and jumbo frames */
668 if (bytes
/packets
> 8000)
669 retval
= bulk_latency
;
670 else if ((packets
< 5) && (bytes
> 512))
671 retval
= low_latency
;
673 case low_latency
: /* 50 usec aka 20000 ints/s */
675 /* this if handles the TSO accounting */
676 if (bytes
/packets
> 8000)
677 retval
= bulk_latency
;
678 else if ((packets
< 10) || ((bytes
/packets
) > 1200))
679 retval
= bulk_latency
;
680 else if ((packets
> 35))
681 retval
= lowest_latency
;
682 } else if (bytes
/packets
> 2000) {
683 retval
= bulk_latency
;
684 } else if (packets
<= 2 && bytes
< 512) {
685 retval
= lowest_latency
;
688 case bulk_latency
: /* 250 usec aka 4000 ints/s */
691 retval
= low_latency
;
692 } else if (bytes
< 6000) {
693 retval
= low_latency
;
702 static void igbvf_set_itr(struct igbvf_adapter
*adapter
)
704 struct e1000_hw
*hw
= &adapter
->hw
;
706 u32 new_itr
= adapter
->itr
;
708 adapter
->tx_itr
= igbvf_update_itr(adapter
, adapter
->tx_itr
,
709 adapter
->total_tx_packets
,
710 adapter
->total_tx_bytes
);
711 /* conservative mode (itr 3) eliminates the lowest_latency setting */
712 if (adapter
->itr_setting
== 3 && adapter
->tx_itr
== lowest_latency
)
713 adapter
->tx_itr
= low_latency
;
715 adapter
->rx_itr
= igbvf_update_itr(adapter
, adapter
->rx_itr
,
716 adapter
->total_rx_packets
,
717 adapter
->total_rx_bytes
);
718 /* conservative mode (itr 3) eliminates the lowest_latency setting */
719 if (adapter
->itr_setting
== 3 && adapter
->rx_itr
== lowest_latency
)
720 adapter
->rx_itr
= low_latency
;
722 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
724 switch (current_itr
) {
725 /* counts and packets in update_itr are dependent on these numbers */
730 new_itr
= 20000; /* aka hwitr = ~200 */
739 if (new_itr
!= adapter
->itr
) {
741 * this attempts to bias the interrupt rate towards Bulk
742 * by adding intermediate steps when interrupt rate is
745 new_itr
= new_itr
> adapter
->itr
?
746 min(adapter
->itr
+ (new_itr
>> 2), new_itr
) :
748 adapter
->itr
= new_itr
;
749 adapter
->rx_ring
->itr_val
= 1952;
751 if (adapter
->msix_entries
)
752 adapter
->rx_ring
->set_itr
= 1;
759 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
760 * @adapter: board private structure
761 * returns true if ring is completely cleaned
763 static bool igbvf_clean_tx_irq(struct igbvf_ring
*tx_ring
)
765 struct igbvf_adapter
*adapter
= tx_ring
->adapter
;
766 struct e1000_hw
*hw
= &adapter
->hw
;
767 struct net_device
*netdev
= adapter
->netdev
;
768 struct igbvf_buffer
*buffer_info
;
770 union e1000_adv_tx_desc
*tx_desc
, *eop_desc
;
771 unsigned int total_bytes
= 0, total_packets
= 0;
772 unsigned int i
, eop
, count
= 0;
773 bool cleaned
= false;
775 i
= tx_ring
->next_to_clean
;
776 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
777 eop_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, eop
);
779 while ((eop_desc
->wb
.status
& cpu_to_le32(E1000_TXD_STAT_DD
)) &&
780 (count
< tx_ring
->count
)) {
781 rmb(); /* read buffer_info after eop_desc status */
782 for (cleaned
= false; !cleaned
; count
++) {
783 tx_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, i
);
784 buffer_info
= &tx_ring
->buffer_info
[i
];
785 cleaned
= (i
== eop
);
786 skb
= buffer_info
->skb
;
789 unsigned int segs
, bytecount
;
791 /* gso_segs is currently only valid for tcp */
792 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
793 /* multiply data chunks by size of headers */
794 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
796 total_packets
+= segs
;
797 total_bytes
+= bytecount
;
800 igbvf_put_txbuf(adapter
, buffer_info
);
801 tx_desc
->wb
.status
= 0;
804 if (i
== tx_ring
->count
)
807 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
808 eop_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, eop
);
811 tx_ring
->next_to_clean
= i
;
813 if (unlikely(count
&&
814 netif_carrier_ok(netdev
) &&
815 igbvf_desc_unused(tx_ring
) >= IGBVF_TX_QUEUE_WAKE
)) {
816 /* Make sure that anybody stopping the queue after this
817 * sees the new next_to_clean.
820 if (netif_queue_stopped(netdev
) &&
821 !(test_bit(__IGBVF_DOWN
, &adapter
->state
))) {
822 netif_wake_queue(netdev
);
823 ++adapter
->restart_queue
;
827 if (adapter
->detect_tx_hung
) {
828 /* Detect a transmit hang in hardware, this serializes the
829 * check with the clearing of time_stamp and movement of i */
830 adapter
->detect_tx_hung
= false;
831 if (tx_ring
->buffer_info
[i
].time_stamp
&&
832 time_after(jiffies
, tx_ring
->buffer_info
[i
].time_stamp
+
833 (adapter
->tx_timeout_factor
* HZ
)) &&
834 !(er32(STATUS
) & E1000_STATUS_TXOFF
)) {
836 tx_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, i
);
837 /* detected Tx unit hang */
838 igbvf_print_tx_hang(adapter
);
840 netif_stop_queue(netdev
);
843 adapter
->net_stats
.tx_bytes
+= total_bytes
;
844 adapter
->net_stats
.tx_packets
+= total_packets
;
845 return count
< tx_ring
->count
;
848 static irqreturn_t
igbvf_msix_other(int irq
, void *data
)
850 struct net_device
*netdev
= data
;
851 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
852 struct e1000_hw
*hw
= &adapter
->hw
;
854 adapter
->int_counter1
++;
856 netif_carrier_off(netdev
);
857 hw
->mac
.get_link_status
= 1;
858 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
859 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
861 ew32(EIMS
, adapter
->eims_other
);
866 static irqreturn_t
igbvf_intr_msix_tx(int irq
, void *data
)
868 struct net_device
*netdev
= data
;
869 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
870 struct e1000_hw
*hw
= &adapter
->hw
;
871 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
874 adapter
->total_tx_bytes
= 0;
875 adapter
->total_tx_packets
= 0;
877 /* auto mask will automatically reenable the interrupt when we write
879 if (!igbvf_clean_tx_irq(tx_ring
))
880 /* Ring was not completely cleaned, so fire another interrupt */
881 ew32(EICS
, tx_ring
->eims_value
);
883 ew32(EIMS
, tx_ring
->eims_value
);
888 static irqreturn_t
igbvf_intr_msix_rx(int irq
, void *data
)
890 struct net_device
*netdev
= data
;
891 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
893 adapter
->int_counter0
++;
895 /* Write the ITR value calculated at the end of the
896 * previous interrupt.
898 if (adapter
->rx_ring
->set_itr
) {
899 writel(adapter
->rx_ring
->itr_val
,
900 adapter
->hw
.hw_addr
+ adapter
->rx_ring
->itr_register
);
901 adapter
->rx_ring
->set_itr
= 0;
904 if (napi_schedule_prep(&adapter
->rx_ring
->napi
)) {
905 adapter
->total_rx_bytes
= 0;
906 adapter
->total_rx_packets
= 0;
907 __napi_schedule(&adapter
->rx_ring
->napi
);
913 #define IGBVF_NO_QUEUE -1
915 static void igbvf_assign_vector(struct igbvf_adapter
*adapter
, int rx_queue
,
916 int tx_queue
, int msix_vector
)
918 struct e1000_hw
*hw
= &adapter
->hw
;
921 /* 82576 uses a table-based method for assigning vectors.
922 Each queue has a single entry in the table to which we write
923 a vector number along with a "valid" bit. Sadly, the layout
924 of the table is somewhat counterintuitive. */
925 if (rx_queue
> IGBVF_NO_QUEUE
) {
926 index
= (rx_queue
>> 1);
927 ivar
= array_er32(IVAR0
, index
);
928 if (rx_queue
& 0x1) {
929 /* vector goes into third byte of register */
930 ivar
= ivar
& 0xFF00FFFF;
931 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
933 /* vector goes into low byte of register */
934 ivar
= ivar
& 0xFFFFFF00;
935 ivar
|= msix_vector
| E1000_IVAR_VALID
;
937 adapter
->rx_ring
[rx_queue
].eims_value
= 1 << msix_vector
;
938 array_ew32(IVAR0
, index
, ivar
);
940 if (tx_queue
> IGBVF_NO_QUEUE
) {
941 index
= (tx_queue
>> 1);
942 ivar
= array_er32(IVAR0
, index
);
943 if (tx_queue
& 0x1) {
944 /* vector goes into high byte of register */
945 ivar
= ivar
& 0x00FFFFFF;
946 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
948 /* vector goes into second byte of register */
949 ivar
= ivar
& 0xFFFF00FF;
950 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
952 adapter
->tx_ring
[tx_queue
].eims_value
= 1 << msix_vector
;
953 array_ew32(IVAR0
, index
, ivar
);
958 * igbvf_configure_msix - Configure MSI-X hardware
960 * igbvf_configure_msix sets up the hardware to properly
961 * generate MSI-X interrupts.
963 static void igbvf_configure_msix(struct igbvf_adapter
*adapter
)
966 struct e1000_hw
*hw
= &adapter
->hw
;
967 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
968 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
971 adapter
->eims_enable_mask
= 0;
973 igbvf_assign_vector(adapter
, IGBVF_NO_QUEUE
, 0, vector
++);
974 adapter
->eims_enable_mask
|= tx_ring
->eims_value
;
975 if (tx_ring
->itr_val
)
976 writel(tx_ring
->itr_val
,
977 hw
->hw_addr
+ tx_ring
->itr_register
);
979 writel(1952, hw
->hw_addr
+ tx_ring
->itr_register
);
981 igbvf_assign_vector(adapter
, 0, IGBVF_NO_QUEUE
, vector
++);
982 adapter
->eims_enable_mask
|= rx_ring
->eims_value
;
983 if (rx_ring
->itr_val
)
984 writel(rx_ring
->itr_val
,
985 hw
->hw_addr
+ rx_ring
->itr_register
);
987 writel(1952, hw
->hw_addr
+ rx_ring
->itr_register
);
989 /* set vector for other causes, i.e. link changes */
991 tmp
= (vector
++ | E1000_IVAR_VALID
);
993 ew32(IVAR_MISC
, tmp
);
995 adapter
->eims_enable_mask
= (1 << (vector
)) - 1;
996 adapter
->eims_other
= 1 << (vector
- 1);
1000 static void igbvf_reset_interrupt_capability(struct igbvf_adapter
*adapter
)
1002 if (adapter
->msix_entries
) {
1003 pci_disable_msix(adapter
->pdev
);
1004 kfree(adapter
->msix_entries
);
1005 adapter
->msix_entries
= NULL
;
1010 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
1012 * Attempt to configure interrupts using the best available
1013 * capabilities of the hardware and kernel.
1015 static void igbvf_set_interrupt_capability(struct igbvf_adapter
*adapter
)
1020 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
1021 adapter
->msix_entries
= kcalloc(3, sizeof(struct msix_entry
),
1023 if (adapter
->msix_entries
) {
1024 for (i
= 0; i
< 3; i
++)
1025 adapter
->msix_entries
[i
].entry
= i
;
1027 err
= pci_enable_msix(adapter
->pdev
,
1028 adapter
->msix_entries
, 3);
1033 dev_err(&adapter
->pdev
->dev
,
1034 "Failed to initialize MSI-X interrupts.\n");
1035 igbvf_reset_interrupt_capability(adapter
);
1040 * igbvf_request_msix - Initialize MSI-X interrupts
1042 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1045 static int igbvf_request_msix(struct igbvf_adapter
*adapter
)
1047 struct net_device
*netdev
= adapter
->netdev
;
1048 int err
= 0, vector
= 0;
1050 if (strlen(netdev
->name
) < (IFNAMSIZ
- 5)) {
1051 sprintf(adapter
->tx_ring
->name
, "%s-tx-0", netdev
->name
);
1052 sprintf(adapter
->rx_ring
->name
, "%s-rx-0", netdev
->name
);
1054 memcpy(adapter
->tx_ring
->name
, netdev
->name
, IFNAMSIZ
);
1055 memcpy(adapter
->rx_ring
->name
, netdev
->name
, IFNAMSIZ
);
1058 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1059 igbvf_intr_msix_tx
, 0, adapter
->tx_ring
->name
,
1064 adapter
->tx_ring
->itr_register
= E1000_EITR(vector
);
1065 adapter
->tx_ring
->itr_val
= 1952;
1068 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1069 igbvf_intr_msix_rx
, 0, adapter
->rx_ring
->name
,
1074 adapter
->rx_ring
->itr_register
= E1000_EITR(vector
);
1075 adapter
->rx_ring
->itr_val
= 1952;
1078 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1079 igbvf_msix_other
, 0, netdev
->name
, netdev
);
1083 igbvf_configure_msix(adapter
);
1090 * igbvf_alloc_queues - Allocate memory for all rings
1091 * @adapter: board private structure to initialize
1093 static int __devinit
igbvf_alloc_queues(struct igbvf_adapter
*adapter
)
1095 struct net_device
*netdev
= adapter
->netdev
;
1097 adapter
->tx_ring
= kzalloc(sizeof(struct igbvf_ring
), GFP_KERNEL
);
1098 if (!adapter
->tx_ring
)
1101 adapter
->rx_ring
= kzalloc(sizeof(struct igbvf_ring
), GFP_KERNEL
);
1102 if (!adapter
->rx_ring
) {
1103 kfree(adapter
->tx_ring
);
1107 netif_napi_add(netdev
, &adapter
->rx_ring
->napi
, igbvf_poll
, 64);
1113 * igbvf_request_irq - initialize interrupts
1115 * Attempts to configure interrupts using the best available
1116 * capabilities of the hardware and kernel.
1118 static int igbvf_request_irq(struct igbvf_adapter
*adapter
)
1122 /* igbvf supports msi-x only */
1123 if (adapter
->msix_entries
)
1124 err
= igbvf_request_msix(adapter
);
1129 dev_err(&adapter
->pdev
->dev
,
1130 "Unable to allocate interrupt, Error: %d\n", err
);
1135 static void igbvf_free_irq(struct igbvf_adapter
*adapter
)
1137 struct net_device
*netdev
= adapter
->netdev
;
1140 if (adapter
->msix_entries
) {
1141 for (vector
= 0; vector
< 3; vector
++)
1142 free_irq(adapter
->msix_entries
[vector
].vector
, netdev
);
1147 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1149 static void igbvf_irq_disable(struct igbvf_adapter
*adapter
)
1151 struct e1000_hw
*hw
= &adapter
->hw
;
1155 if (adapter
->msix_entries
)
1160 * igbvf_irq_enable - Enable default interrupt generation settings
1162 static void igbvf_irq_enable(struct igbvf_adapter
*adapter
)
1164 struct e1000_hw
*hw
= &adapter
->hw
;
1166 ew32(EIAC
, adapter
->eims_enable_mask
);
1167 ew32(EIAM
, adapter
->eims_enable_mask
);
1168 ew32(EIMS
, adapter
->eims_enable_mask
);
1172 * igbvf_poll - NAPI Rx polling callback
1173 * @napi: struct associated with this polling callback
1174 * @budget: amount of packets driver is allowed to process this poll
1176 static int igbvf_poll(struct napi_struct
*napi
, int budget
)
1178 struct igbvf_ring
*rx_ring
= container_of(napi
, struct igbvf_ring
, napi
);
1179 struct igbvf_adapter
*adapter
= rx_ring
->adapter
;
1180 struct e1000_hw
*hw
= &adapter
->hw
;
1183 igbvf_clean_rx_irq(adapter
, &work_done
, budget
);
1185 /* If not enough Rx work done, exit the polling mode */
1186 if (work_done
< budget
) {
1187 napi_complete(napi
);
1189 if (adapter
->itr_setting
& 3)
1190 igbvf_set_itr(adapter
);
1192 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
1193 ew32(EIMS
, adapter
->rx_ring
->eims_value
);
1200 * igbvf_set_rlpml - set receive large packet maximum length
1201 * @adapter: board private structure
1203 * Configure the maximum size of packets that will be received
1205 static void igbvf_set_rlpml(struct igbvf_adapter
*adapter
)
1207 int max_frame_size
= adapter
->max_frame_size
;
1208 struct e1000_hw
*hw
= &adapter
->hw
;
1211 max_frame_size
+= VLAN_TAG_SIZE
;
1213 e1000_rlpml_set_vf(hw
, max_frame_size
);
1216 static void igbvf_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1218 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1219 struct e1000_hw
*hw
= &adapter
->hw
;
1221 if (hw
->mac
.ops
.set_vfta(hw
, vid
, true))
1222 dev_err(&adapter
->pdev
->dev
, "Failed to add vlan id %d\n", vid
);
1225 static void igbvf_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1227 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1228 struct e1000_hw
*hw
= &adapter
->hw
;
1230 igbvf_irq_disable(adapter
);
1231 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
1233 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
1234 igbvf_irq_enable(adapter
);
1236 if (hw
->mac
.ops
.set_vfta(hw
, vid
, false))
1237 dev_err(&adapter
->pdev
->dev
,
1238 "Failed to remove vlan id %d\n", vid
);
1241 static void igbvf_vlan_rx_register(struct net_device
*netdev
,
1242 struct vlan_group
*grp
)
1244 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1246 adapter
->vlgrp
= grp
;
1249 static void igbvf_restore_vlan(struct igbvf_adapter
*adapter
)
1253 if (!adapter
->vlgrp
)
1256 for (vid
= 0; vid
< VLAN_N_VID
; vid
++) {
1257 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
1259 igbvf_vlan_rx_add_vid(adapter
->netdev
, vid
);
1262 igbvf_set_rlpml(adapter
);
1266 * igbvf_configure_tx - Configure Transmit Unit after Reset
1267 * @adapter: board private structure
1269 * Configure the Tx unit of the MAC after a reset.
1271 static void igbvf_configure_tx(struct igbvf_adapter
*adapter
)
1273 struct e1000_hw
*hw
= &adapter
->hw
;
1274 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
1276 u32 txdctl
, dca_txctrl
;
1278 /* disable transmits */
1279 txdctl
= er32(TXDCTL(0));
1280 ew32(TXDCTL(0), txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
1283 /* Setup the HW Tx Head and Tail descriptor pointers */
1284 ew32(TDLEN(0), tx_ring
->count
* sizeof(union e1000_adv_tx_desc
));
1285 tdba
= tx_ring
->dma
;
1286 ew32(TDBAL(0), (tdba
& DMA_BIT_MASK(32)));
1287 ew32(TDBAH(0), (tdba
>> 32));
1290 tx_ring
->head
= E1000_TDH(0);
1291 tx_ring
->tail
= E1000_TDT(0);
1293 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1294 * MUST be delivered in order or it will completely screw up
1297 dca_txctrl
= er32(DCA_TXCTRL(0));
1298 dca_txctrl
&= ~E1000_DCA_TXCTRL_TX_WB_RO_EN
;
1299 ew32(DCA_TXCTRL(0), dca_txctrl
);
1301 /* enable transmits */
1302 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
1303 ew32(TXDCTL(0), txdctl
);
1305 /* Setup Transmit Descriptor Settings for eop descriptor */
1306 adapter
->txd_cmd
= E1000_ADVTXD_DCMD_EOP
| E1000_ADVTXD_DCMD_IFCS
;
1308 /* enable Report Status bit */
1309 adapter
->txd_cmd
|= E1000_ADVTXD_DCMD_RS
;
1313 * igbvf_setup_srrctl - configure the receive control registers
1314 * @adapter: Board private structure
1316 static void igbvf_setup_srrctl(struct igbvf_adapter
*adapter
)
1318 struct e1000_hw
*hw
= &adapter
->hw
;
1321 srrctl
&= ~(E1000_SRRCTL_DESCTYPE_MASK
|
1322 E1000_SRRCTL_BSIZEHDR_MASK
|
1323 E1000_SRRCTL_BSIZEPKT_MASK
);
1325 /* Enable queue drop to avoid head of line blocking */
1326 srrctl
|= E1000_SRRCTL_DROP_EN
;
1328 /* Setup buffer sizes */
1329 srrctl
|= ALIGN(adapter
->rx_buffer_len
, 1024) >>
1330 E1000_SRRCTL_BSIZEPKT_SHIFT
;
1332 if (adapter
->rx_buffer_len
< 2048) {
1333 adapter
->rx_ps_hdr_size
= 0;
1334 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1336 adapter
->rx_ps_hdr_size
= 128;
1337 srrctl
|= adapter
->rx_ps_hdr_size
<<
1338 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
1339 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1342 ew32(SRRCTL(0), srrctl
);
1346 * igbvf_configure_rx - Configure Receive Unit after Reset
1347 * @adapter: board private structure
1349 * Configure the Rx unit of the MAC after a reset.
1351 static void igbvf_configure_rx(struct igbvf_adapter
*adapter
)
1353 struct e1000_hw
*hw
= &adapter
->hw
;
1354 struct igbvf_ring
*rx_ring
= adapter
->rx_ring
;
1358 /* disable receives */
1359 rxdctl
= er32(RXDCTL(0));
1360 ew32(RXDCTL(0), rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
1363 rdlen
= rx_ring
->count
* sizeof(union e1000_adv_rx_desc
);
1366 * Setup the HW Rx Head and Tail Descriptor Pointers and
1367 * the Base and Length of the Rx Descriptor Ring
1369 rdba
= rx_ring
->dma
;
1370 ew32(RDBAL(0), (rdba
& DMA_BIT_MASK(32)));
1371 ew32(RDBAH(0), (rdba
>> 32));
1372 ew32(RDLEN(0), rx_ring
->count
* sizeof(union e1000_adv_rx_desc
));
1373 rx_ring
->head
= E1000_RDH(0);
1374 rx_ring
->tail
= E1000_RDT(0);
1378 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
1379 rxdctl
&= 0xFFF00000;
1380 rxdctl
|= IGBVF_RX_PTHRESH
;
1381 rxdctl
|= IGBVF_RX_HTHRESH
<< 8;
1382 rxdctl
|= IGBVF_RX_WTHRESH
<< 16;
1384 igbvf_set_rlpml(adapter
);
1386 /* enable receives */
1387 ew32(RXDCTL(0), rxdctl
);
1391 * igbvf_set_multi - Multicast and Promiscuous mode set
1392 * @netdev: network interface device structure
1394 * The set_multi entry point is called whenever the multicast address
1395 * list or the network interface flags are updated. This routine is
1396 * responsible for configuring the hardware for proper multicast,
1397 * promiscuous mode, and all-multi behavior.
1399 static void igbvf_set_multi(struct net_device
*netdev
)
1401 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1402 struct e1000_hw
*hw
= &adapter
->hw
;
1403 struct netdev_hw_addr
*ha
;
1404 u8
*mta_list
= NULL
;
1407 if (!netdev_mc_empty(netdev
)) {
1408 mta_list
= kmalloc(netdev_mc_count(netdev
) * 6, GFP_ATOMIC
);
1410 dev_err(&adapter
->pdev
->dev
,
1411 "failed to allocate multicast filter list\n");
1416 /* prepare a packed array of only addresses. */
1418 netdev_for_each_mc_addr(ha
, netdev
)
1419 memcpy(mta_list
+ (i
++ * ETH_ALEN
), ha
->addr
, ETH_ALEN
);
1421 hw
->mac
.ops
.update_mc_addr_list(hw
, mta_list
, i
, 0, 0);
1426 * igbvf_configure - configure the hardware for Rx and Tx
1427 * @adapter: private board structure
1429 static void igbvf_configure(struct igbvf_adapter
*adapter
)
1431 igbvf_set_multi(adapter
->netdev
);
1433 igbvf_restore_vlan(adapter
);
1435 igbvf_configure_tx(adapter
);
1436 igbvf_setup_srrctl(adapter
);
1437 igbvf_configure_rx(adapter
);
1438 igbvf_alloc_rx_buffers(adapter
->rx_ring
,
1439 igbvf_desc_unused(adapter
->rx_ring
));
1442 /* igbvf_reset - bring the hardware into a known good state
1444 * This function boots the hardware and enables some settings that
1445 * require a configuration cycle of the hardware - those cannot be
1446 * set/changed during runtime. After reset the device needs to be
1447 * properly configured for Rx, Tx etc.
1449 static void igbvf_reset(struct igbvf_adapter
*adapter
)
1451 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
1452 struct net_device
*netdev
= adapter
->netdev
;
1453 struct e1000_hw
*hw
= &adapter
->hw
;
1455 /* Allow time for pending master requests to run */
1456 if (mac
->ops
.reset_hw(hw
))
1457 dev_err(&adapter
->pdev
->dev
, "PF still resetting\n");
1459 mac
->ops
.init_hw(hw
);
1461 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1462 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1464 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1468 adapter
->last_reset
= jiffies
;
1471 int igbvf_up(struct igbvf_adapter
*adapter
)
1473 struct e1000_hw
*hw
= &adapter
->hw
;
1475 /* hardware has been reset, we need to reload some things */
1476 igbvf_configure(adapter
);
1478 clear_bit(__IGBVF_DOWN
, &adapter
->state
);
1480 napi_enable(&adapter
->rx_ring
->napi
);
1481 if (adapter
->msix_entries
)
1482 igbvf_configure_msix(adapter
);
1484 /* Clear any pending interrupts. */
1486 igbvf_irq_enable(adapter
);
1488 /* start the watchdog */
1489 hw
->mac
.get_link_status
= 1;
1490 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1496 void igbvf_down(struct igbvf_adapter
*adapter
)
1498 struct net_device
*netdev
= adapter
->netdev
;
1499 struct e1000_hw
*hw
= &adapter
->hw
;
1503 * signal that we're down so the interrupt handler does not
1504 * reschedule our watchdog timer
1506 set_bit(__IGBVF_DOWN
, &adapter
->state
);
1508 /* disable receives in the hardware */
1509 rxdctl
= er32(RXDCTL(0));
1510 ew32(RXDCTL(0), rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
1512 netif_stop_queue(netdev
);
1514 /* disable transmits in the hardware */
1515 txdctl
= er32(TXDCTL(0));
1516 ew32(TXDCTL(0), txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
1518 /* flush both disables and wait for them to finish */
1522 napi_disable(&adapter
->rx_ring
->napi
);
1524 igbvf_irq_disable(adapter
);
1526 del_timer_sync(&adapter
->watchdog_timer
);
1528 netif_carrier_off(netdev
);
1530 /* record the stats before reset*/
1531 igbvf_update_stats(adapter
);
1533 adapter
->link_speed
= 0;
1534 adapter
->link_duplex
= 0;
1536 igbvf_reset(adapter
);
1537 igbvf_clean_tx_ring(adapter
->tx_ring
);
1538 igbvf_clean_rx_ring(adapter
->rx_ring
);
1541 void igbvf_reinit_locked(struct igbvf_adapter
*adapter
)
1544 while (test_and_set_bit(__IGBVF_RESETTING
, &adapter
->state
))
1546 igbvf_down(adapter
);
1548 clear_bit(__IGBVF_RESETTING
, &adapter
->state
);
1552 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1553 * @adapter: board private structure to initialize
1555 * igbvf_sw_init initializes the Adapter private data structure.
1556 * Fields are initialized based on PCI device information and
1557 * OS network device settings (MTU size).
1559 static int __devinit
igbvf_sw_init(struct igbvf_adapter
*adapter
)
1561 struct net_device
*netdev
= adapter
->netdev
;
1564 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
;
1565 adapter
->rx_ps_hdr_size
= 0;
1566 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1567 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
1569 adapter
->tx_int_delay
= 8;
1570 adapter
->tx_abs_int_delay
= 32;
1571 adapter
->rx_int_delay
= 0;
1572 adapter
->rx_abs_int_delay
= 8;
1573 adapter
->itr_setting
= 3;
1574 adapter
->itr
= 20000;
1576 /* Set various function pointers */
1577 adapter
->ei
->init_ops(&adapter
->hw
);
1579 rc
= adapter
->hw
.mac
.ops
.init_params(&adapter
->hw
);
1583 rc
= adapter
->hw
.mbx
.ops
.init_params(&adapter
->hw
);
1587 igbvf_set_interrupt_capability(adapter
);
1589 if (igbvf_alloc_queues(adapter
))
1592 spin_lock_init(&adapter
->tx_queue_lock
);
1594 /* Explicitly disable IRQ since the NIC can be in any state. */
1595 igbvf_irq_disable(adapter
);
1597 spin_lock_init(&adapter
->stats_lock
);
1599 set_bit(__IGBVF_DOWN
, &adapter
->state
);
1603 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter
*adapter
)
1605 struct e1000_hw
*hw
= &adapter
->hw
;
1607 adapter
->stats
.last_gprc
= er32(VFGPRC
);
1608 adapter
->stats
.last_gorc
= er32(VFGORC
);
1609 adapter
->stats
.last_gptc
= er32(VFGPTC
);
1610 adapter
->stats
.last_gotc
= er32(VFGOTC
);
1611 adapter
->stats
.last_mprc
= er32(VFMPRC
);
1612 adapter
->stats
.last_gotlbc
= er32(VFGOTLBC
);
1613 adapter
->stats
.last_gptlbc
= er32(VFGPTLBC
);
1614 adapter
->stats
.last_gorlbc
= er32(VFGORLBC
);
1615 adapter
->stats
.last_gprlbc
= er32(VFGPRLBC
);
1617 adapter
->stats
.base_gprc
= er32(VFGPRC
);
1618 adapter
->stats
.base_gorc
= er32(VFGORC
);
1619 adapter
->stats
.base_gptc
= er32(VFGPTC
);
1620 adapter
->stats
.base_gotc
= er32(VFGOTC
);
1621 adapter
->stats
.base_mprc
= er32(VFMPRC
);
1622 adapter
->stats
.base_gotlbc
= er32(VFGOTLBC
);
1623 adapter
->stats
.base_gptlbc
= er32(VFGPTLBC
);
1624 adapter
->stats
.base_gorlbc
= er32(VFGORLBC
);
1625 adapter
->stats
.base_gprlbc
= er32(VFGPRLBC
);
1629 * igbvf_open - Called when a network interface is made active
1630 * @netdev: network interface device structure
1632 * Returns 0 on success, negative value on failure
1634 * The open entry point is called when a network interface is made
1635 * active by the system (IFF_UP). At this point all resources needed
1636 * for transmit and receive operations are allocated, the interrupt
1637 * handler is registered with the OS, the watchdog timer is started,
1638 * and the stack is notified that the interface is ready.
1640 static int igbvf_open(struct net_device
*netdev
)
1642 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1643 struct e1000_hw
*hw
= &adapter
->hw
;
1646 /* disallow open during test */
1647 if (test_bit(__IGBVF_TESTING
, &adapter
->state
))
1650 /* allocate transmit descriptors */
1651 err
= igbvf_setup_tx_resources(adapter
, adapter
->tx_ring
);
1655 /* allocate receive descriptors */
1656 err
= igbvf_setup_rx_resources(adapter
, adapter
->rx_ring
);
1661 * before we allocate an interrupt, we must be ready to handle it.
1662 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1663 * as soon as we call pci_request_irq, so we have to setup our
1664 * clean_rx handler before we do so.
1666 igbvf_configure(adapter
);
1668 err
= igbvf_request_irq(adapter
);
1672 /* From here on the code is the same as igbvf_up() */
1673 clear_bit(__IGBVF_DOWN
, &adapter
->state
);
1675 napi_enable(&adapter
->rx_ring
->napi
);
1677 /* clear any pending interrupts */
1680 igbvf_irq_enable(adapter
);
1682 /* start the watchdog */
1683 hw
->mac
.get_link_status
= 1;
1684 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1689 igbvf_free_rx_resources(adapter
->rx_ring
);
1691 igbvf_free_tx_resources(adapter
->tx_ring
);
1693 igbvf_reset(adapter
);
1699 * igbvf_close - Disables a network interface
1700 * @netdev: network interface device structure
1702 * Returns 0, this is not allowed to fail
1704 * The close entry point is called when an interface is de-activated
1705 * by the OS. The hardware is still under the drivers control, but
1706 * needs to be disabled. A global MAC reset is issued to stop the
1707 * hardware, and all transmit and receive resources are freed.
1709 static int igbvf_close(struct net_device
*netdev
)
1711 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1713 WARN_ON(test_bit(__IGBVF_RESETTING
, &adapter
->state
));
1714 igbvf_down(adapter
);
1716 igbvf_free_irq(adapter
);
1718 igbvf_free_tx_resources(adapter
->tx_ring
);
1719 igbvf_free_rx_resources(adapter
->rx_ring
);
1724 * igbvf_set_mac - Change the Ethernet Address of the NIC
1725 * @netdev: network interface device structure
1726 * @p: pointer to an address structure
1728 * Returns 0 on success, negative on failure
1730 static int igbvf_set_mac(struct net_device
*netdev
, void *p
)
1732 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
1733 struct e1000_hw
*hw
= &adapter
->hw
;
1734 struct sockaddr
*addr
= p
;
1736 if (!is_valid_ether_addr(addr
->sa_data
))
1737 return -EADDRNOTAVAIL
;
1739 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
1741 hw
->mac
.ops
.rar_set(hw
, hw
->mac
.addr
, 0);
1743 if (memcmp(addr
->sa_data
, hw
->mac
.addr
, 6))
1744 return -EADDRNOTAVAIL
;
1746 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1751 #define UPDATE_VF_COUNTER(reg, name) \
1753 u32 current_counter = er32(reg); \
1754 if (current_counter < adapter->stats.last_##name) \
1755 adapter->stats.name += 0x100000000LL; \
1756 adapter->stats.last_##name = current_counter; \
1757 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1758 adapter->stats.name |= current_counter; \
1762 * igbvf_update_stats - Update the board statistics counters
1763 * @adapter: board private structure
1765 void igbvf_update_stats(struct igbvf_adapter
*adapter
)
1767 struct e1000_hw
*hw
= &adapter
->hw
;
1768 struct pci_dev
*pdev
= adapter
->pdev
;
1771 * Prevent stats update while adapter is being reset, link is down
1772 * or if the pci connection is down.
1774 if (adapter
->link_speed
== 0)
1777 if (test_bit(__IGBVF_RESETTING
, &adapter
->state
))
1780 if (pci_channel_offline(pdev
))
1783 UPDATE_VF_COUNTER(VFGPRC
, gprc
);
1784 UPDATE_VF_COUNTER(VFGORC
, gorc
);
1785 UPDATE_VF_COUNTER(VFGPTC
, gptc
);
1786 UPDATE_VF_COUNTER(VFGOTC
, gotc
);
1787 UPDATE_VF_COUNTER(VFMPRC
, mprc
);
1788 UPDATE_VF_COUNTER(VFGOTLBC
, gotlbc
);
1789 UPDATE_VF_COUNTER(VFGPTLBC
, gptlbc
);
1790 UPDATE_VF_COUNTER(VFGORLBC
, gorlbc
);
1791 UPDATE_VF_COUNTER(VFGPRLBC
, gprlbc
);
1793 /* Fill out the OS statistics structure */
1794 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
1797 static void igbvf_print_link_info(struct igbvf_adapter
*adapter
)
1799 dev_info(&adapter
->pdev
->dev
, "Link is Up %d Mbps %s\n",
1800 adapter
->link_speed
,
1801 ((adapter
->link_duplex
== FULL_DUPLEX
) ?
1802 "Full Duplex" : "Half Duplex"));
1805 static bool igbvf_has_link(struct igbvf_adapter
*adapter
)
1807 struct e1000_hw
*hw
= &adapter
->hw
;
1808 s32 ret_val
= E1000_SUCCESS
;
1811 /* If interface is down, stay link down */
1812 if (test_bit(__IGBVF_DOWN
, &adapter
->state
))
1815 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
1816 link_active
= !hw
->mac
.get_link_status
;
1818 /* if check for link returns error we will need to reset */
1819 if (ret_val
&& time_after(jiffies
, adapter
->last_reset
+ (10 * HZ
)))
1820 schedule_work(&adapter
->reset_task
);
1826 * igbvf_watchdog - Timer Call-back
1827 * @data: pointer to adapter cast into an unsigned long
1829 static void igbvf_watchdog(unsigned long data
)
1831 struct igbvf_adapter
*adapter
= (struct igbvf_adapter
*) data
;
1833 /* Do the rest outside of interrupt context */
1834 schedule_work(&adapter
->watchdog_task
);
1837 static void igbvf_watchdog_task(struct work_struct
*work
)
1839 struct igbvf_adapter
*adapter
= container_of(work
,
1840 struct igbvf_adapter
,
1842 struct net_device
*netdev
= adapter
->netdev
;
1843 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
1844 struct igbvf_ring
*tx_ring
= adapter
->tx_ring
;
1845 struct e1000_hw
*hw
= &adapter
->hw
;
1849 link
= igbvf_has_link(adapter
);
1852 if (!netif_carrier_ok(netdev
)) {
1853 mac
->ops
.get_link_up_info(&adapter
->hw
,
1854 &adapter
->link_speed
,
1855 &adapter
->link_duplex
);
1856 igbvf_print_link_info(adapter
);
1858 /* adjust timeout factor according to speed/duplex */
1859 adapter
->tx_timeout_factor
= 1;
1860 switch (adapter
->link_speed
) {
1862 adapter
->tx_timeout_factor
= 16;
1865 /* maybe add some timeout factor ? */
1869 netif_carrier_on(netdev
);
1870 netif_wake_queue(netdev
);
1873 if (netif_carrier_ok(netdev
)) {
1874 adapter
->link_speed
= 0;
1875 adapter
->link_duplex
= 0;
1876 dev_info(&adapter
->pdev
->dev
, "Link is Down\n");
1877 netif_carrier_off(netdev
);
1878 netif_stop_queue(netdev
);
1882 if (netif_carrier_ok(netdev
)) {
1883 igbvf_update_stats(adapter
);
1885 tx_pending
= (igbvf_desc_unused(tx_ring
) + 1 <
1889 * We've lost link, so the controller stops DMA,
1890 * but we've got queued Tx work that's never going
1891 * to get done, so reset controller to flush Tx.
1892 * (Do the reset outside of interrupt context).
1894 adapter
->tx_timeout_count
++;
1895 schedule_work(&adapter
->reset_task
);
1899 /* Cause software interrupt to ensure Rx ring is cleaned */
1900 ew32(EICS
, adapter
->rx_ring
->eims_value
);
1902 /* Force detection of hung controller every watchdog period */
1903 adapter
->detect_tx_hung
= 1;
1905 /* Reset the timer */
1906 if (!test_bit(__IGBVF_DOWN
, &adapter
->state
))
1907 mod_timer(&adapter
->watchdog_timer
,
1908 round_jiffies(jiffies
+ (2 * HZ
)));
1911 #define IGBVF_TX_FLAGS_CSUM 0x00000001
1912 #define IGBVF_TX_FLAGS_VLAN 0x00000002
1913 #define IGBVF_TX_FLAGS_TSO 0x00000004
1914 #define IGBVF_TX_FLAGS_IPV4 0x00000008
1915 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1916 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1918 static int igbvf_tso(struct igbvf_adapter
*adapter
,
1919 struct igbvf_ring
*tx_ring
,
1920 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
1922 struct e1000_adv_tx_context_desc
*context_desc
;
1925 struct igbvf_buffer
*buffer_info
;
1926 u32 info
= 0, tu_cmd
= 0;
1927 u32 mss_l4len_idx
, l4len
;
1930 if (skb_header_cloned(skb
)) {
1931 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
1933 dev_err(&adapter
->pdev
->dev
,
1934 "igbvf_tso returning an error\n");
1939 l4len
= tcp_hdrlen(skb
);
1942 if (skb
->protocol
== htons(ETH_P_IP
)) {
1943 struct iphdr
*iph
= ip_hdr(skb
);
1946 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
1950 } else if (skb_is_gso_v6(skb
)) {
1951 ipv6_hdr(skb
)->payload_len
= 0;
1952 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
1953 &ipv6_hdr(skb
)->daddr
,
1957 i
= tx_ring
->next_to_use
;
1959 buffer_info
= &tx_ring
->buffer_info
[i
];
1960 context_desc
= IGBVF_TX_CTXTDESC_ADV(*tx_ring
, i
);
1961 /* VLAN MACLEN IPLEN */
1962 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
1963 info
|= (tx_flags
& IGBVF_TX_FLAGS_VLAN_MASK
);
1964 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
1965 *hdr_len
+= skb_network_offset(skb
);
1966 info
|= (skb_transport_header(skb
) - skb_network_header(skb
));
1967 *hdr_len
+= (skb_transport_header(skb
) - skb_network_header(skb
));
1968 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
1970 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1971 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
1973 if (skb
->protocol
== htons(ETH_P_IP
))
1974 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
1975 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
1977 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
1980 mss_l4len_idx
= (skb_shinfo(skb
)->gso_size
<< E1000_ADVTXD_MSS_SHIFT
);
1981 mss_l4len_idx
|= (l4len
<< E1000_ADVTXD_L4LEN_SHIFT
);
1983 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
1984 context_desc
->seqnum_seed
= 0;
1986 buffer_info
->time_stamp
= jiffies
;
1987 buffer_info
->next_to_watch
= i
;
1988 buffer_info
->dma
= 0;
1990 if (i
== tx_ring
->count
)
1993 tx_ring
->next_to_use
= i
;
1998 static inline bool igbvf_tx_csum(struct igbvf_adapter
*adapter
,
1999 struct igbvf_ring
*tx_ring
,
2000 struct sk_buff
*skb
, u32 tx_flags
)
2002 struct e1000_adv_tx_context_desc
*context_desc
;
2004 struct igbvf_buffer
*buffer_info
;
2005 u32 info
= 0, tu_cmd
= 0;
2007 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
2008 (tx_flags
& IGBVF_TX_FLAGS_VLAN
)) {
2009 i
= tx_ring
->next_to_use
;
2010 buffer_info
= &tx_ring
->buffer_info
[i
];
2011 context_desc
= IGBVF_TX_CTXTDESC_ADV(*tx_ring
, i
);
2013 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
2014 info
|= (tx_flags
& IGBVF_TX_FLAGS_VLAN_MASK
);
2016 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
2017 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2018 info
|= (skb_transport_header(skb
) -
2019 skb_network_header(skb
));
2022 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
2024 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
2026 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2027 switch (skb
->protocol
) {
2028 case __constant_htons(ETH_P_IP
):
2029 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
2030 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2031 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2033 case __constant_htons(ETH_P_IPV6
):
2034 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2035 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2042 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
2043 context_desc
->seqnum_seed
= 0;
2044 context_desc
->mss_l4len_idx
= 0;
2046 buffer_info
->time_stamp
= jiffies
;
2047 buffer_info
->next_to_watch
= i
;
2048 buffer_info
->dma
= 0;
2050 if (i
== tx_ring
->count
)
2052 tx_ring
->next_to_use
= i
;
2060 static int igbvf_maybe_stop_tx(struct net_device
*netdev
, int size
)
2062 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2064 /* there is enough descriptors then we don't need to worry */
2065 if (igbvf_desc_unused(adapter
->tx_ring
) >= size
)
2068 netif_stop_queue(netdev
);
2072 /* We need to check again just in case room has been made available */
2073 if (igbvf_desc_unused(adapter
->tx_ring
) < size
)
2076 netif_wake_queue(netdev
);
2078 ++adapter
->restart_queue
;
2082 #define IGBVF_MAX_TXD_PWR 16
2083 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2085 static inline int igbvf_tx_map_adv(struct igbvf_adapter
*adapter
,
2086 struct igbvf_ring
*tx_ring
,
2087 struct sk_buff
*skb
,
2090 struct igbvf_buffer
*buffer_info
;
2091 struct pci_dev
*pdev
= adapter
->pdev
;
2092 unsigned int len
= skb_headlen(skb
);
2093 unsigned int count
= 0, i
;
2096 i
= tx_ring
->next_to_use
;
2098 buffer_info
= &tx_ring
->buffer_info
[i
];
2099 BUG_ON(len
>= IGBVF_MAX_DATA_PER_TXD
);
2100 buffer_info
->length
= len
;
2101 /* set time_stamp *before* dma to help avoid a possible race */
2102 buffer_info
->time_stamp
= jiffies
;
2103 buffer_info
->next_to_watch
= i
;
2104 buffer_info
->mapped_as_page
= false;
2105 buffer_info
->dma
= dma_map_single(&pdev
->dev
, skb
->data
, len
,
2107 if (dma_mapping_error(&pdev
->dev
, buffer_info
->dma
))
2111 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
2112 struct skb_frag_struct
*frag
;
2116 if (i
== tx_ring
->count
)
2119 frag
= &skb_shinfo(skb
)->frags
[f
];
2122 buffer_info
= &tx_ring
->buffer_info
[i
];
2123 BUG_ON(len
>= IGBVF_MAX_DATA_PER_TXD
);
2124 buffer_info
->length
= len
;
2125 buffer_info
->time_stamp
= jiffies
;
2126 buffer_info
->next_to_watch
= i
;
2127 buffer_info
->mapped_as_page
= true;
2128 buffer_info
->dma
= dma_map_page(&pdev
->dev
,
2133 if (dma_mapping_error(&pdev
->dev
, buffer_info
->dma
))
2137 tx_ring
->buffer_info
[i
].skb
= skb
;
2138 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
2143 dev_err(&pdev
->dev
, "TX DMA map failed\n");
2145 /* clear timestamp and dma mappings for failed buffer_info mapping */
2146 buffer_info
->dma
= 0;
2147 buffer_info
->time_stamp
= 0;
2148 buffer_info
->length
= 0;
2149 buffer_info
->next_to_watch
= 0;
2150 buffer_info
->mapped_as_page
= false;
2154 /* clear timestamp and dma mappings for remaining portion of packet */
2157 i
+= tx_ring
->count
;
2159 buffer_info
= &tx_ring
->buffer_info
[i
];
2160 igbvf_put_txbuf(adapter
, buffer_info
);
2166 static inline void igbvf_tx_queue_adv(struct igbvf_adapter
*adapter
,
2167 struct igbvf_ring
*tx_ring
,
2168 int tx_flags
, int count
, u32 paylen
,
2171 union e1000_adv_tx_desc
*tx_desc
= NULL
;
2172 struct igbvf_buffer
*buffer_info
;
2173 u32 olinfo_status
= 0, cmd_type_len
;
2176 cmd_type_len
= (E1000_ADVTXD_DTYP_DATA
| E1000_ADVTXD_DCMD_IFCS
|
2177 E1000_ADVTXD_DCMD_DEXT
);
2179 if (tx_flags
& IGBVF_TX_FLAGS_VLAN
)
2180 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
2182 if (tx_flags
& IGBVF_TX_FLAGS_TSO
) {
2183 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
2185 /* insert tcp checksum */
2186 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
2188 /* insert ip checksum */
2189 if (tx_flags
& IGBVF_TX_FLAGS_IPV4
)
2190 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
2192 } else if (tx_flags
& IGBVF_TX_FLAGS_CSUM
) {
2193 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
2196 olinfo_status
|= ((paylen
- hdr_len
) << E1000_ADVTXD_PAYLEN_SHIFT
);
2198 i
= tx_ring
->next_to_use
;
2200 buffer_info
= &tx_ring
->buffer_info
[i
];
2201 tx_desc
= IGBVF_TX_DESC_ADV(*tx_ring
, i
);
2202 tx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
2203 tx_desc
->read
.cmd_type_len
=
2204 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
2205 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
2207 if (i
== tx_ring
->count
)
2211 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(adapter
->txd_cmd
);
2212 /* Force memory writes to complete before letting h/w
2213 * know there are new descriptors to fetch. (Only
2214 * applicable for weak-ordered memory model archs,
2215 * such as IA-64). */
2218 tx_ring
->next_to_use
= i
;
2219 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
2220 /* we need this if more than one processor can write to our tail
2221 * at a time, it syncronizes IO on IA64/Altix systems */
2225 static netdev_tx_t
igbvf_xmit_frame_ring_adv(struct sk_buff
*skb
,
2226 struct net_device
*netdev
,
2227 struct igbvf_ring
*tx_ring
)
2229 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2230 unsigned int first
, tx_flags
= 0;
2235 if (test_bit(__IGBVF_DOWN
, &adapter
->state
)) {
2236 dev_kfree_skb_any(skb
);
2237 return NETDEV_TX_OK
;
2240 if (skb
->len
<= 0) {
2241 dev_kfree_skb_any(skb
);
2242 return NETDEV_TX_OK
;
2246 * need: count + 4 desc gap to keep tail from touching
2247 * + 2 desc gap to keep tail from touching head,
2248 * + 1 desc for skb->data,
2249 * + 1 desc for context descriptor,
2250 * head, otherwise try next time
2252 if (igbvf_maybe_stop_tx(netdev
, skb_shinfo(skb
)->nr_frags
+ 4)) {
2253 /* this is a hard error */
2254 return NETDEV_TX_BUSY
;
2257 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2258 tx_flags
|= IGBVF_TX_FLAGS_VLAN
;
2259 tx_flags
|= (vlan_tx_tag_get(skb
) << IGBVF_TX_FLAGS_VLAN_SHIFT
);
2262 if (skb
->protocol
== htons(ETH_P_IP
))
2263 tx_flags
|= IGBVF_TX_FLAGS_IPV4
;
2265 first
= tx_ring
->next_to_use
;
2267 tso
= skb_is_gso(skb
) ?
2268 igbvf_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
) : 0;
2269 if (unlikely(tso
< 0)) {
2270 dev_kfree_skb_any(skb
);
2271 return NETDEV_TX_OK
;
2275 tx_flags
|= IGBVF_TX_FLAGS_TSO
;
2276 else if (igbvf_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
2277 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
2278 tx_flags
|= IGBVF_TX_FLAGS_CSUM
;
2281 * count reflects descriptors mapped, if 0 then mapping error
2282 * has occured and we need to rewind the descriptor queue
2284 count
= igbvf_tx_map_adv(adapter
, tx_ring
, skb
, first
);
2287 igbvf_tx_queue_adv(adapter
, tx_ring
, tx_flags
, count
,
2289 /* Make sure there is space in the ring for the next send. */
2290 igbvf_maybe_stop_tx(netdev
, MAX_SKB_FRAGS
+ 4);
2292 dev_kfree_skb_any(skb
);
2293 tx_ring
->buffer_info
[first
].time_stamp
= 0;
2294 tx_ring
->next_to_use
= first
;
2297 return NETDEV_TX_OK
;
2300 static netdev_tx_t
igbvf_xmit_frame(struct sk_buff
*skb
,
2301 struct net_device
*netdev
)
2303 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2304 struct igbvf_ring
*tx_ring
;
2306 if (test_bit(__IGBVF_DOWN
, &adapter
->state
)) {
2307 dev_kfree_skb_any(skb
);
2308 return NETDEV_TX_OK
;
2311 tx_ring
= &adapter
->tx_ring
[0];
2313 return igbvf_xmit_frame_ring_adv(skb
, netdev
, tx_ring
);
2317 * igbvf_tx_timeout - Respond to a Tx Hang
2318 * @netdev: network interface device structure
2320 static void igbvf_tx_timeout(struct net_device
*netdev
)
2322 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2324 /* Do the reset outside of interrupt context */
2325 adapter
->tx_timeout_count
++;
2326 schedule_work(&adapter
->reset_task
);
2329 static void igbvf_reset_task(struct work_struct
*work
)
2331 struct igbvf_adapter
*adapter
;
2332 adapter
= container_of(work
, struct igbvf_adapter
, reset_task
);
2334 igbvf_reinit_locked(adapter
);
2338 * igbvf_get_stats - Get System Network Statistics
2339 * @netdev: network interface device structure
2341 * Returns the address of the device statistics structure.
2342 * The statistics are actually updated from the timer callback.
2344 static struct net_device_stats
*igbvf_get_stats(struct net_device
*netdev
)
2346 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2348 /* only return the current stats */
2349 return &adapter
->net_stats
;
2353 * igbvf_change_mtu - Change the Maximum Transfer Unit
2354 * @netdev: network interface device structure
2355 * @new_mtu: new value for maximum frame size
2357 * Returns 0 on success, negative on failure
2359 static int igbvf_change_mtu(struct net_device
*netdev
, int new_mtu
)
2361 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2362 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2364 if ((new_mtu
< 68) || (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
2365 dev_err(&adapter
->pdev
->dev
, "Invalid MTU setting\n");
2369 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2370 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
2371 dev_err(&adapter
->pdev
->dev
, "MTU > 9216 not supported.\n");
2375 while (test_and_set_bit(__IGBVF_RESETTING
, &adapter
->state
))
2377 /* igbvf_down has a dependency on max_frame_size */
2378 adapter
->max_frame_size
= max_frame
;
2379 if (netif_running(netdev
))
2380 igbvf_down(adapter
);
2383 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2384 * means we reserve 2 more, this pushes us to allocate from the next
2386 * i.e. RXBUFFER_2048 --> size-4096 slab
2387 * However with the new *_jumbo_rx* routines, jumbo receives will use
2391 if (max_frame
<= 1024)
2392 adapter
->rx_buffer_len
= 1024;
2393 else if (max_frame
<= 2048)
2394 adapter
->rx_buffer_len
= 2048;
2396 #if (PAGE_SIZE / 2) > 16384
2397 adapter
->rx_buffer_len
= 16384;
2399 adapter
->rx_buffer_len
= PAGE_SIZE
/ 2;
2403 /* adjust allocation if LPE protects us, and we aren't using SBP */
2404 if ((max_frame
== ETH_FRAME_LEN
+ ETH_FCS_LEN
) ||
2405 (max_frame
== ETH_FRAME_LEN
+ VLAN_HLEN
+ ETH_FCS_LEN
))
2406 adapter
->rx_buffer_len
= ETH_FRAME_LEN
+ VLAN_HLEN
+
2409 dev_info(&adapter
->pdev
->dev
, "changing MTU from %d to %d\n",
2410 netdev
->mtu
, new_mtu
);
2411 netdev
->mtu
= new_mtu
;
2413 if (netif_running(netdev
))
2416 igbvf_reset(adapter
);
2418 clear_bit(__IGBVF_RESETTING
, &adapter
->state
);
2423 static int igbvf_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2431 static int igbvf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2433 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2434 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2439 netif_device_detach(netdev
);
2441 if (netif_running(netdev
)) {
2442 WARN_ON(test_bit(__IGBVF_RESETTING
, &adapter
->state
));
2443 igbvf_down(adapter
);
2444 igbvf_free_irq(adapter
);
2448 retval
= pci_save_state(pdev
);
2453 pci_disable_device(pdev
);
2459 static int igbvf_resume(struct pci_dev
*pdev
)
2461 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2462 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2465 pci_restore_state(pdev
);
2466 err
= pci_enable_device_mem(pdev
);
2468 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
2472 pci_set_master(pdev
);
2474 if (netif_running(netdev
)) {
2475 err
= igbvf_request_irq(adapter
);
2480 igbvf_reset(adapter
);
2482 if (netif_running(netdev
))
2485 netif_device_attach(netdev
);
2491 static void igbvf_shutdown(struct pci_dev
*pdev
)
2493 igbvf_suspend(pdev
, PMSG_SUSPEND
);
2496 #ifdef CONFIG_NET_POLL_CONTROLLER
2498 * Polling 'interrupt' - used by things like netconsole to send skbs
2499 * without having to re-enable interrupts. It's not called while
2500 * the interrupt routine is executing.
2502 static void igbvf_netpoll(struct net_device
*netdev
)
2504 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2506 disable_irq(adapter
->pdev
->irq
);
2508 igbvf_clean_tx_irq(adapter
->tx_ring
);
2510 enable_irq(adapter
->pdev
->irq
);
2515 * igbvf_io_error_detected - called when PCI error is detected
2516 * @pdev: Pointer to PCI device
2517 * @state: The current pci connection state
2519 * This function is called after a PCI bus error affecting
2520 * this device has been detected.
2522 static pci_ers_result_t
igbvf_io_error_detected(struct pci_dev
*pdev
,
2523 pci_channel_state_t state
)
2525 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2526 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2528 netif_device_detach(netdev
);
2530 if (state
== pci_channel_io_perm_failure
)
2531 return PCI_ERS_RESULT_DISCONNECT
;
2533 if (netif_running(netdev
))
2534 igbvf_down(adapter
);
2535 pci_disable_device(pdev
);
2537 /* Request a slot slot reset. */
2538 return PCI_ERS_RESULT_NEED_RESET
;
2542 * igbvf_io_slot_reset - called after the pci bus has been reset.
2543 * @pdev: Pointer to PCI device
2545 * Restart the card from scratch, as if from a cold-boot. Implementation
2546 * resembles the first-half of the igbvf_resume routine.
2548 static pci_ers_result_t
igbvf_io_slot_reset(struct pci_dev
*pdev
)
2550 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2551 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2553 if (pci_enable_device_mem(pdev
)) {
2555 "Cannot re-enable PCI device after reset.\n");
2556 return PCI_ERS_RESULT_DISCONNECT
;
2558 pci_set_master(pdev
);
2560 igbvf_reset(adapter
);
2562 return PCI_ERS_RESULT_RECOVERED
;
2566 * igbvf_io_resume - called when traffic can start flowing again.
2567 * @pdev: Pointer to PCI device
2569 * This callback is called when the error recovery driver tells us that
2570 * its OK to resume normal operation. Implementation resembles the
2571 * second-half of the igbvf_resume routine.
2573 static void igbvf_io_resume(struct pci_dev
*pdev
)
2575 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2576 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2578 if (netif_running(netdev
)) {
2579 if (igbvf_up(adapter
)) {
2581 "can't bring device back up after reset\n");
2586 netif_device_attach(netdev
);
2589 static void igbvf_print_device_info(struct igbvf_adapter
*adapter
)
2591 struct e1000_hw
*hw
= &adapter
->hw
;
2592 struct net_device
*netdev
= adapter
->netdev
;
2593 struct pci_dev
*pdev
= adapter
->pdev
;
2595 dev_info(&pdev
->dev
, "Intel(R) 82576 Virtual Function\n");
2596 dev_info(&pdev
->dev
, "Address: %pM\n", netdev
->dev_addr
);
2597 dev_info(&pdev
->dev
, "MAC: %d\n", hw
->mac
.type
);
2600 static const struct net_device_ops igbvf_netdev_ops
= {
2601 .ndo_open
= igbvf_open
,
2602 .ndo_stop
= igbvf_close
,
2603 .ndo_start_xmit
= igbvf_xmit_frame
,
2604 .ndo_get_stats
= igbvf_get_stats
,
2605 .ndo_set_multicast_list
= igbvf_set_multi
,
2606 .ndo_set_mac_address
= igbvf_set_mac
,
2607 .ndo_change_mtu
= igbvf_change_mtu
,
2608 .ndo_do_ioctl
= igbvf_ioctl
,
2609 .ndo_tx_timeout
= igbvf_tx_timeout
,
2610 .ndo_vlan_rx_register
= igbvf_vlan_rx_register
,
2611 .ndo_vlan_rx_add_vid
= igbvf_vlan_rx_add_vid
,
2612 .ndo_vlan_rx_kill_vid
= igbvf_vlan_rx_kill_vid
,
2613 #ifdef CONFIG_NET_POLL_CONTROLLER
2614 .ndo_poll_controller
= igbvf_netpoll
,
2619 * igbvf_probe - Device Initialization Routine
2620 * @pdev: PCI device information struct
2621 * @ent: entry in igbvf_pci_tbl
2623 * Returns 0 on success, negative on failure
2625 * igbvf_probe initializes an adapter identified by a pci_dev structure.
2626 * The OS initialization, configuring of the adapter private structure,
2627 * and a hardware reset occur.
2629 static int __devinit
igbvf_probe(struct pci_dev
*pdev
,
2630 const struct pci_device_id
*ent
)
2632 struct net_device
*netdev
;
2633 struct igbvf_adapter
*adapter
;
2634 struct e1000_hw
*hw
;
2635 const struct igbvf_info
*ei
= igbvf_info_tbl
[ent
->driver_data
];
2637 static int cards_found
;
2638 int err
, pci_using_dac
;
2640 err
= pci_enable_device_mem(pdev
);
2645 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
2647 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64));
2651 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
2653 err
= dma_set_coherent_mask(&pdev
->dev
,
2656 dev_err(&pdev
->dev
, "No usable DMA "
2657 "configuration, aborting\n");
2663 err
= pci_request_regions(pdev
, igbvf_driver_name
);
2667 pci_set_master(pdev
);
2670 netdev
= alloc_etherdev(sizeof(struct igbvf_adapter
));
2672 goto err_alloc_etherdev
;
2674 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2676 pci_set_drvdata(pdev
, netdev
);
2677 adapter
= netdev_priv(netdev
);
2679 adapter
->netdev
= netdev
;
2680 adapter
->pdev
= pdev
;
2682 adapter
->pba
= ei
->pba
;
2683 adapter
->flags
= ei
->flags
;
2684 adapter
->hw
.back
= adapter
;
2685 adapter
->hw
.mac
.type
= ei
->mac
;
2686 adapter
->msg_enable
= (1 << NETIF_MSG_DRV
| NETIF_MSG_PROBE
) - 1;
2688 /* PCI config space info */
2690 hw
->vendor_id
= pdev
->vendor
;
2691 hw
->device_id
= pdev
->device
;
2692 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2693 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2695 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
2698 adapter
->hw
.hw_addr
= ioremap(pci_resource_start(pdev
, 0),
2699 pci_resource_len(pdev
, 0));
2701 if (!adapter
->hw
.hw_addr
)
2704 if (ei
->get_variants
) {
2705 err
= ei
->get_variants(adapter
);
2710 /* setup adapter struct */
2711 err
= igbvf_sw_init(adapter
);
2715 /* construct the net_device struct */
2716 netdev
->netdev_ops
= &igbvf_netdev_ops
;
2718 igbvf_set_ethtool_ops(netdev
);
2719 netdev
->watchdog_timeo
= 5 * HZ
;
2720 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
2722 adapter
->bd_number
= cards_found
++;
2724 netdev
->features
= NETIF_F_SG
|
2726 NETIF_F_HW_VLAN_TX
|
2727 NETIF_F_HW_VLAN_RX
|
2728 NETIF_F_HW_VLAN_FILTER
;
2730 netdev
->features
|= NETIF_F_IPV6_CSUM
;
2731 netdev
->features
|= NETIF_F_TSO
;
2732 netdev
->features
|= NETIF_F_TSO6
;
2735 netdev
->features
|= NETIF_F_HIGHDMA
;
2737 netdev
->vlan_features
|= NETIF_F_TSO
;
2738 netdev
->vlan_features
|= NETIF_F_TSO6
;
2739 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
2740 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
2741 netdev
->vlan_features
|= NETIF_F_SG
;
2743 /*reset the controller to put the device in a known good state */
2744 err
= hw
->mac
.ops
.reset_hw(hw
);
2746 dev_info(&pdev
->dev
,
2747 "PF still in reset state, assigning new address."
2748 " Is the PF interface up?\n");
2749 dev_hw_addr_random(adapter
->netdev
, hw
->mac
.addr
);
2751 err
= hw
->mac
.ops
.read_mac_addr(hw
);
2753 dev_err(&pdev
->dev
, "Error reading MAC address\n");
2758 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
2759 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
2761 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
2762 dev_err(&pdev
->dev
, "Invalid MAC Address: %pM\n",
2768 setup_timer(&adapter
->watchdog_timer
, &igbvf_watchdog
,
2769 (unsigned long) adapter
);
2771 INIT_WORK(&adapter
->reset_task
, igbvf_reset_task
);
2772 INIT_WORK(&adapter
->watchdog_task
, igbvf_watchdog_task
);
2774 /* ring size defaults */
2775 adapter
->rx_ring
->count
= 1024;
2776 adapter
->tx_ring
->count
= 1024;
2778 /* reset the hardware with the new settings */
2779 igbvf_reset(adapter
);
2781 strcpy(netdev
->name
, "eth%d");
2782 err
= register_netdev(netdev
);
2786 /* tell the stack to leave us alone until igbvf_open() is called */
2787 netif_carrier_off(netdev
);
2788 netif_stop_queue(netdev
);
2790 igbvf_print_device_info(adapter
);
2792 igbvf_initialize_last_counter_stats(adapter
);
2797 kfree(adapter
->tx_ring
);
2798 kfree(adapter
->rx_ring
);
2800 igbvf_reset_interrupt_capability(adapter
);
2801 iounmap(adapter
->hw
.hw_addr
);
2803 free_netdev(netdev
);
2805 pci_release_regions(pdev
);
2808 pci_disable_device(pdev
);
2813 * igbvf_remove - Device Removal Routine
2814 * @pdev: PCI device information struct
2816 * igbvf_remove is called by the PCI subsystem to alert the driver
2817 * that it should release a PCI device. The could be caused by a
2818 * Hot-Plug event, or because the driver is going to be removed from
2821 static void __devexit
igbvf_remove(struct pci_dev
*pdev
)
2823 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2824 struct igbvf_adapter
*adapter
= netdev_priv(netdev
);
2825 struct e1000_hw
*hw
= &adapter
->hw
;
2828 * flush_scheduled work may reschedule our watchdog task, so
2829 * explicitly disable watchdog tasks from being rescheduled
2831 set_bit(__IGBVF_DOWN
, &adapter
->state
);
2832 del_timer_sync(&adapter
->watchdog_timer
);
2834 flush_scheduled_work();
2836 unregister_netdev(netdev
);
2838 igbvf_reset_interrupt_capability(adapter
);
2841 * it is important to delete the napi struct prior to freeing the
2842 * rx ring so that you do not end up with null pointer refs
2844 netif_napi_del(&adapter
->rx_ring
->napi
);
2845 kfree(adapter
->tx_ring
);
2846 kfree(adapter
->rx_ring
);
2848 iounmap(hw
->hw_addr
);
2849 if (hw
->flash_address
)
2850 iounmap(hw
->flash_address
);
2851 pci_release_regions(pdev
);
2853 free_netdev(netdev
);
2855 pci_disable_device(pdev
);
2858 /* PCI Error Recovery (ERS) */
2859 static struct pci_error_handlers igbvf_err_handler
= {
2860 .error_detected
= igbvf_io_error_detected
,
2861 .slot_reset
= igbvf_io_slot_reset
,
2862 .resume
= igbvf_io_resume
,
2865 static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl
) = {
2866 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_VF
), board_vf
},
2867 { } /* terminate list */
2869 MODULE_DEVICE_TABLE(pci
, igbvf_pci_tbl
);
2871 /* PCI Device API Driver */
2872 static struct pci_driver igbvf_driver
= {
2873 .name
= igbvf_driver_name
,
2874 .id_table
= igbvf_pci_tbl
,
2875 .probe
= igbvf_probe
,
2876 .remove
= __devexit_p(igbvf_remove
),
2878 /* Power Management Hooks */
2879 .suspend
= igbvf_suspend
,
2880 .resume
= igbvf_resume
,
2882 .shutdown
= igbvf_shutdown
,
2883 .err_handler
= &igbvf_err_handler
2887 * igbvf_init_module - Driver Registration Routine
2889 * igbvf_init_module is the first routine called when the driver is
2890 * loaded. All it does is register with the PCI subsystem.
2892 static int __init
igbvf_init_module(void)
2895 printk(KERN_INFO
"%s - version %s\n",
2896 igbvf_driver_string
, igbvf_driver_version
);
2897 printk(KERN_INFO
"%s\n", igbvf_copyright
);
2899 ret
= pci_register_driver(&igbvf_driver
);
2903 module_init(igbvf_init_module
);
2906 * igbvf_exit_module - Driver Exit Cleanup Routine
2908 * igbvf_exit_module is called just before the driver is removed
2911 static void __exit
igbvf_exit_module(void)
2913 pci_unregister_driver(&igbvf_driver
);
2915 module_exit(igbvf_exit_module
);
2918 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2919 MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver");
2920 MODULE_LICENSE("GPL");
2921 MODULE_VERSION(DRV_VERSION
);