1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/ipv6.h>
46 #include <linux/slab.h>
47 #include <net/checksum.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/ethtool.h>
51 #include <linux/if_vlan.h>
52 #include <linux/prefetch.h>
56 const char ixgbevf_driver_name
[] = "ixgbevf";
57 static const char ixgbevf_driver_string
[] =
58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60 #define DRV_VERSION "2.6.0-k"
61 const char ixgbevf_driver_version
[] = DRV_VERSION
;
62 static char ixgbevf_copyright
[] =
63 "Copyright (c) 2009 - 2012 Intel Corporation.";
65 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
66 [board_82599_vf
] = &ixgbevf_82599_vf_info
,
67 [board_X540_vf
] = &ixgbevf_X540_vf_info
,
70 /* ixgbevf_pci_tbl - PCI Device ID Table
72 * Wildcard entries (PCI_ANY_ID) should come last
73 * Last entry must be all 0s
75 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
76 * Class, Class Mask, private data (not used) }
78 static struct pci_device_id ixgbevf_pci_tbl
[] = {
79 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
),
81 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF
),
84 /* required last entry */
87 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
89 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
90 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION
);
94 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
95 static int debug
= -1;
96 module_param(debug
, int, 0);
97 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
100 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector
*q_vector
);
101 static void ixgbevf_write_eitr(struct ixgbevf_adapter
*adapter
, int v_idx
,
104 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw
*hw
,
105 struct ixgbevf_ring
*rx_ring
,
109 * Force memory writes to complete before letting h/w
110 * know there are new descriptors to fetch. (Only
111 * applicable for weak-ordered memory model archs,
115 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(rx_ring
->reg_idx
), val
);
119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
120 * @adapter: pointer to adapter struct
121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
122 * @queue: queue to map the corresponding interrupt to
123 * @msix_vector: the vector to map to the corresponding queue
126 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
127 u8 queue
, u8 msix_vector
)
130 struct ixgbe_hw
*hw
= &adapter
->hw
;
131 if (direction
== -1) {
133 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
134 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
137 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
139 /* tx or rx causes */
140 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
141 index
= ((16 * (queue
& 1)) + (8 * direction
));
142 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
143 ivar
&= ~(0xFF << index
);
144 ivar
|= (msix_vector
<< index
);
145 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
149 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter
*adapter
,
150 struct ixgbevf_tx_buffer
153 if (tx_buffer_info
->dma
) {
154 if (tx_buffer_info
->mapped_as_page
)
155 dma_unmap_page(&adapter
->pdev
->dev
,
157 tx_buffer_info
->length
,
160 dma_unmap_single(&adapter
->pdev
->dev
,
162 tx_buffer_info
->length
,
164 tx_buffer_info
->dma
= 0;
166 if (tx_buffer_info
->skb
) {
167 dev_kfree_skb_any(tx_buffer_info
->skb
);
168 tx_buffer_info
->skb
= NULL
;
170 tx_buffer_info
->time_stamp
= 0;
171 /* tx_buffer_info must be completely set up in the transmit path */
174 #define IXGBE_MAX_TXD_PWR 14
175 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
177 /* Tx Descriptors needed, worst case */
178 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
179 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
181 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
182 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
184 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
187 static void ixgbevf_tx_timeout(struct net_device
*netdev
);
190 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
191 * @adapter: board private structure
192 * @tx_ring: tx ring to clean
194 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter
*adapter
,
195 struct ixgbevf_ring
*tx_ring
)
197 struct net_device
*netdev
= adapter
->netdev
;
198 struct ixgbe_hw
*hw
= &adapter
->hw
;
199 union ixgbe_adv_tx_desc
*tx_desc
, *eop_desc
;
200 struct ixgbevf_tx_buffer
*tx_buffer_info
;
201 unsigned int i
, eop
, count
= 0;
202 unsigned int total_bytes
= 0, total_packets
= 0;
204 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
207 i
= tx_ring
->next_to_clean
;
208 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
209 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
211 while ((eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)) &&
212 (count
< tx_ring
->work_limit
)) {
213 bool cleaned
= false;
214 rmb(); /* read buffer_info after eop_desc */
215 /* eop could change between read and DD-check */
216 if (unlikely(eop
!= tx_ring
->tx_buffer_info
[i
].next_to_watch
))
218 for ( ; !cleaned
; count
++) {
220 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
221 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
222 cleaned
= (i
== eop
);
223 skb
= tx_buffer_info
->skb
;
225 if (cleaned
&& skb
) {
226 unsigned int segs
, bytecount
;
228 /* gso_segs is currently only valid for tcp */
229 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
230 /* multiply data chunks by size of headers */
231 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
233 total_packets
+= segs
;
234 total_bytes
+= bytecount
;
237 ixgbevf_unmap_and_free_tx_resource(adapter
,
240 tx_desc
->wb
.status
= 0;
243 if (i
== tx_ring
->count
)
248 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
249 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
252 tx_ring
->next_to_clean
= i
;
254 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
255 if (unlikely(count
&& netif_carrier_ok(netdev
) &&
256 (IXGBE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
257 /* Make sure that anybody stopping the queue after this
258 * sees the new next_to_clean.
262 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
263 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
264 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
265 ++adapter
->restart_queue
;
268 if (netif_queue_stopped(netdev
) &&
269 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
270 netif_wake_queue(netdev
);
271 ++adapter
->restart_queue
;
276 /* re-arm the interrupt */
277 if ((count
>= tx_ring
->work_limit
) &&
278 (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))) {
279 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, tx_ring
->v_idx
);
282 u64_stats_update_begin(&tx_ring
->syncp
);
283 tx_ring
->total_bytes
+= total_bytes
;
284 tx_ring
->total_packets
+= total_packets
;
285 u64_stats_update_end(&tx_ring
->syncp
);
287 return count
< tx_ring
->work_limit
;
291 * ixgbevf_receive_skb - Send a completed packet up the stack
292 * @q_vector: structure containing interrupt and ring information
293 * @skb: packet to send up
294 * @status: hardware indication of status of receive
295 * @rx_ring: rx descriptor ring (for a specific queue) to setup
296 * @rx_desc: rx descriptor
298 static void ixgbevf_receive_skb(struct ixgbevf_q_vector
*q_vector
,
299 struct sk_buff
*skb
, u8 status
,
300 struct ixgbevf_ring
*ring
,
301 union ixgbe_adv_rx_desc
*rx_desc
)
303 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
304 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
305 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
307 if (is_vlan
&& test_bit(tag
, adapter
->active_vlans
))
308 __vlan_hwaccel_put_tag(skb
, tag
);
310 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
))
311 napi_gro_receive(&q_vector
->napi
, skb
);
317 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
318 * @adapter: address of board private structure
319 * @status_err: hardware indication of status of receive
320 * @skb: skb currently being received and modified
322 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter
*adapter
,
323 u32 status_err
, struct sk_buff
*skb
)
325 skb_checksum_none_assert(skb
);
327 /* Rx csum disabled */
328 if (!(adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
))
331 /* if IP and error */
332 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
333 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
334 adapter
->hw_csum_rx_error
++;
338 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
341 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
342 adapter
->hw_csum_rx_error
++;
346 /* It must be a TCP or UDP packet with a valid checksum */
347 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
348 adapter
->hw_csum_rx_good
++;
352 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
353 * @adapter: address of board private structure
355 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter
*adapter
,
356 struct ixgbevf_ring
*rx_ring
,
359 struct pci_dev
*pdev
= adapter
->pdev
;
360 union ixgbe_adv_rx_desc
*rx_desc
;
361 struct ixgbevf_rx_buffer
*bi
;
364 unsigned int bufsz
= rx_ring
->rx_buf_len
+ NET_IP_ALIGN
;
366 i
= rx_ring
->next_to_use
;
367 bi
= &rx_ring
->rx_buffer_info
[i
];
369 while (cleaned_count
--) {
370 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
373 (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
)) {
375 bi
->page
= alloc_page(GFP_ATOMIC
| __GFP_COLD
);
377 adapter
->alloc_rx_page_failed
++;
382 /* use a half page if we're re-using */
383 bi
->page_offset
^= (PAGE_SIZE
/ 2);
386 bi
->page_dma
= dma_map_page(&pdev
->dev
, bi
->page
,
394 skb
= netdev_alloc_skb(adapter
->netdev
,
398 adapter
->alloc_rx_buff_failed
++;
403 * Make buffer alignment 2 beyond a 16 byte boundary
404 * this will result in a 16 byte aligned IP header after
405 * the 14 byte MAC header is removed
407 skb_reserve(skb
, NET_IP_ALIGN
);
412 bi
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
416 /* Refresh the desc even if buffer_addrs didn't change because
417 * each write-back erases this info. */
418 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
419 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
420 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
422 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
426 if (i
== rx_ring
->count
)
428 bi
= &rx_ring
->rx_buffer_info
[i
];
432 if (rx_ring
->next_to_use
!= i
) {
433 rx_ring
->next_to_use
= i
;
435 i
= (rx_ring
->count
- 1);
437 ixgbevf_release_rx_desc(&adapter
->hw
, rx_ring
, i
);
441 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
445 struct ixgbe_hw
*hw
= &adapter
->hw
;
447 mask
= (qmask
& 0xFFFFFFFF);
448 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, mask
);
451 static inline u16
ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc
*rx_desc
)
453 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
;
456 static inline u16
ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc
*rx_desc
)
458 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
461 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
462 struct ixgbevf_ring
*rx_ring
,
463 int *work_done
, int work_to_do
)
465 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
466 struct pci_dev
*pdev
= adapter
->pdev
;
467 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
468 struct ixgbevf_rx_buffer
*rx_buffer_info
, *next_buffer
;
473 bool cleaned
= false;
474 int cleaned_count
= 0;
475 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
477 i
= rx_ring
->next_to_clean
;
478 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
479 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
480 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
482 while (staterr
& IXGBE_RXD_STAT_DD
) {
484 if (*work_done
>= work_to_do
)
488 rmb(); /* read descriptor and rx_buffer_info after status DD */
489 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
490 hdr_info
= le16_to_cpu(ixgbevf_get_hdr_info(rx_desc
));
491 len
= (hdr_info
& IXGBE_RXDADV_HDRBUFLEN_MASK
) >>
492 IXGBE_RXDADV_HDRBUFLEN_SHIFT
;
493 if (hdr_info
& IXGBE_RXDADV_SPH
)
494 adapter
->rx_hdr_split
++;
495 if (len
> IXGBEVF_RX_HDR_SIZE
)
496 len
= IXGBEVF_RX_HDR_SIZE
;
497 upper_len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
499 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
502 skb
= rx_buffer_info
->skb
;
503 prefetch(skb
->data
- NET_IP_ALIGN
);
504 rx_buffer_info
->skb
= NULL
;
506 if (rx_buffer_info
->dma
) {
507 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
510 rx_buffer_info
->dma
= 0;
515 dma_unmap_page(&pdev
->dev
, rx_buffer_info
->page_dma
,
516 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
517 rx_buffer_info
->page_dma
= 0;
518 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
519 rx_buffer_info
->page
,
520 rx_buffer_info
->page_offset
,
523 if ((rx_ring
->rx_buf_len
> (PAGE_SIZE
/ 2)) ||
524 (page_count(rx_buffer_info
->page
) != 1))
525 rx_buffer_info
->page
= NULL
;
527 get_page(rx_buffer_info
->page
);
529 skb
->len
+= upper_len
;
530 skb
->data_len
+= upper_len
;
531 skb
->truesize
+= upper_len
;
535 if (i
== rx_ring
->count
)
538 next_rxd
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
542 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
544 if (!(staterr
& IXGBE_RXD_STAT_EOP
)) {
545 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
546 rx_buffer_info
->skb
= next_buffer
->skb
;
547 rx_buffer_info
->dma
= next_buffer
->dma
;
548 next_buffer
->skb
= skb
;
549 next_buffer
->dma
= 0;
551 skb
->next
= next_buffer
->skb
;
552 skb
->next
->prev
= skb
;
554 adapter
->non_eop_descs
++;
558 /* ERR_MASK will only have valid bits if EOP set */
559 if (unlikely(staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
)) {
560 dev_kfree_skb_irq(skb
);
564 ixgbevf_rx_checksum(adapter
, staterr
, skb
);
566 /* probably a little skewed due to removing CRC */
567 total_rx_bytes
+= skb
->len
;
571 * Work around issue of some types of VM to VM loop back
572 * packets not getting split correctly
574 if (staterr
& IXGBE_RXD_STAT_LB
) {
575 u32 header_fixup_len
= skb_headlen(skb
);
576 if (header_fixup_len
< 14)
577 skb_push(skb
, header_fixup_len
);
579 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
581 ixgbevf_receive_skb(q_vector
, skb
, staterr
, rx_ring
, rx_desc
);
584 rx_desc
->wb
.upper
.status_error
= 0;
586 /* return some buffers to hardware, one at a time is too slow */
587 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
588 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
,
593 /* use prefetched values */
595 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
597 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
600 rx_ring
->next_to_clean
= i
;
601 cleaned_count
= IXGBE_DESC_UNUSED(rx_ring
);
604 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
606 u64_stats_update_begin(&rx_ring
->syncp
);
607 rx_ring
->total_packets
+= total_rx_packets
;
608 rx_ring
->total_bytes
+= total_rx_bytes
;
609 u64_stats_update_end(&rx_ring
->syncp
);
615 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
616 * @napi: napi struct with our devices info in it
617 * @budget: amount of work driver is allowed to do this pass, in packets
619 * This function is optimized for cleaning one queue only on a single
622 static int ixgbevf_clean_rxonly(struct napi_struct
*napi
, int budget
)
624 struct ixgbevf_q_vector
*q_vector
=
625 container_of(napi
, struct ixgbevf_q_vector
, napi
);
626 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
627 struct ixgbevf_ring
*rx_ring
= NULL
;
631 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
632 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
634 ixgbevf_clean_rx_irq(q_vector
, rx_ring
, &work_done
, budget
);
636 /* If all Rx work done, exit the polling mode */
637 if (work_done
< budget
) {
639 if (adapter
->itr_setting
& 1)
640 ixgbevf_set_itr_msix(q_vector
);
641 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
642 ixgbevf_irq_enable_queues(adapter
, rx_ring
->v_idx
);
649 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
650 * @napi: napi struct with our devices info in it
651 * @budget: amount of work driver is allowed to do this pass, in packets
653 * This function will clean more than one rx queue associated with a
656 static int ixgbevf_clean_rxonly_many(struct napi_struct
*napi
, int budget
)
658 struct ixgbevf_q_vector
*q_vector
=
659 container_of(napi
, struct ixgbevf_q_vector
, napi
);
660 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
661 struct ixgbevf_ring
*rx_ring
= NULL
;
662 int work_done
= 0, i
;
666 /* attempt to distribute budget to each queue fairly, but don't allow
667 * the budget to go below 1 because we'll exit polling */
668 budget
/= (q_vector
->rxr_count
?: 1);
669 budget
= max(budget
, 1);
670 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
671 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
672 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
673 ixgbevf_clean_rx_irq(q_vector
, rx_ring
, &work_done
, budget
);
674 enable_mask
|= rx_ring
->v_idx
;
675 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
679 #ifndef HAVE_NETDEV_NAPI_LIST
680 if (!netif_running(adapter
->netdev
))
684 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
685 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
687 /* If all Rx work done, exit the polling mode */
688 if (work_done
< budget
) {
690 if (adapter
->itr_setting
& 1)
691 ixgbevf_set_itr_msix(q_vector
);
692 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
693 ixgbevf_irq_enable_queues(adapter
, enable_mask
);
701 * ixgbevf_configure_msix - Configure MSI-X hardware
702 * @adapter: board private structure
704 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
707 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
709 struct ixgbevf_q_vector
*q_vector
;
710 struct ixgbe_hw
*hw
= &adapter
->hw
;
711 int i
, j
, q_vectors
, v_idx
, r_idx
;
714 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
717 * Populate the IVAR table and set the ITR values to the
718 * corresponding register.
720 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
721 q_vector
= adapter
->q_vector
[v_idx
];
722 /* XXX for_each_set_bit(...) */
723 r_idx
= find_first_bit(q_vector
->rxr_idx
,
724 adapter
->num_rx_queues
);
726 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
727 j
= adapter
->rx_ring
[r_idx
].reg_idx
;
728 ixgbevf_set_ivar(adapter
, 0, j
, v_idx
);
729 r_idx
= find_next_bit(q_vector
->rxr_idx
,
730 adapter
->num_rx_queues
,
733 r_idx
= find_first_bit(q_vector
->txr_idx
,
734 adapter
->num_tx_queues
);
736 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
737 j
= adapter
->tx_ring
[r_idx
].reg_idx
;
738 ixgbevf_set_ivar(adapter
, 1, j
, v_idx
);
739 r_idx
= find_next_bit(q_vector
->txr_idx
,
740 adapter
->num_tx_queues
,
744 /* if this is a tx only vector halve the interrupt rate */
745 if (q_vector
->txr_count
&& !q_vector
->rxr_count
)
746 q_vector
->eitr
= (adapter
->eitr_param
>> 1);
747 else if (q_vector
->rxr_count
)
749 q_vector
->eitr
= adapter
->eitr_param
;
751 ixgbevf_write_eitr(adapter
, v_idx
, q_vector
->eitr
);
754 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
756 /* set up to autoclear timer, and the vectors */
757 mask
= IXGBE_EIMS_ENABLE_MASK
;
758 mask
&= ~IXGBE_EIMS_OTHER
;
759 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, mask
);
766 latency_invalid
= 255
770 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
771 * @adapter: pointer to adapter
772 * @eitr: eitr setting (ints per sec) to give last timeslice
773 * @itr_setting: current throttle rate in ints/second
774 * @packets: the number of packets during this measurement interval
775 * @bytes: the number of bytes during this measurement interval
777 * Stores a new ITR value based on packets and byte
778 * counts during the last interrupt. The advantage of per interrupt
779 * computation is faster updates and more accurate ITR for the current
780 * traffic pattern. Constants in this function were computed
781 * based on theoretical maximum wire speed and thresholds were set based
782 * on testing data as well as attempting to minimize response time
783 * while increasing bulk throughput.
785 static u8
ixgbevf_update_itr(struct ixgbevf_adapter
*adapter
,
786 u32 eitr
, u8 itr_setting
,
787 int packets
, int bytes
)
789 unsigned int retval
= itr_setting
;
794 goto update_itr_done
;
797 /* simple throttlerate management
798 * 0-20MB/s lowest (100000 ints/s)
799 * 20-100MB/s low (20000 ints/s)
800 * 100-1249MB/s bulk (8000 ints/s)
802 /* what was last interrupt timeslice? */
803 timepassed_us
= 1000000/eitr
;
804 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
806 switch (itr_setting
) {
808 if (bytes_perint
> adapter
->eitr_low
)
809 retval
= low_latency
;
812 if (bytes_perint
> adapter
->eitr_high
)
813 retval
= bulk_latency
;
814 else if (bytes_perint
<= adapter
->eitr_low
)
815 retval
= lowest_latency
;
818 if (bytes_perint
<= adapter
->eitr_high
)
819 retval
= low_latency
;
828 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
829 * @adapter: pointer to adapter struct
830 * @v_idx: vector index into q_vector array
831 * @itr_reg: new value to be written in *register* format, not ints/s
833 * This function is made to be called by ethtool and by the driver
834 * when it needs to update VTEITR registers at runtime. Hardware
835 * specific quirks/differences are taken care of here.
837 static void ixgbevf_write_eitr(struct ixgbevf_adapter
*adapter
, int v_idx
,
840 struct ixgbe_hw
*hw
= &adapter
->hw
;
842 itr_reg
= EITR_INTS_PER_SEC_TO_REG(itr_reg
);
845 * set the WDIS bit to not clear the timer bits and cause an
846 * immediate assertion of the interrupt
848 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
850 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
853 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector
*q_vector
)
855 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
857 u8 current_itr
, ret_itr
;
858 int i
, r_idx
, v_idx
= q_vector
->v_idx
;
859 struct ixgbevf_ring
*rx_ring
, *tx_ring
;
861 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
862 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
863 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
864 ret_itr
= ixgbevf_update_itr(adapter
, q_vector
->eitr
,
866 tx_ring
->total_packets
,
867 tx_ring
->total_bytes
);
868 /* if the result for this queue would decrease interrupt
869 * rate for this vector then use that result */
870 q_vector
->tx_itr
= ((q_vector
->tx_itr
> ret_itr
) ?
871 q_vector
->tx_itr
- 1 : ret_itr
);
872 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
876 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
877 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
878 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
879 ret_itr
= ixgbevf_update_itr(adapter
, q_vector
->eitr
,
881 rx_ring
->total_packets
,
882 rx_ring
->total_bytes
);
883 /* if the result for this queue would decrease interrupt
884 * rate for this vector then use that result */
885 q_vector
->rx_itr
= ((q_vector
->rx_itr
> ret_itr
) ?
886 q_vector
->rx_itr
- 1 : ret_itr
);
887 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
891 current_itr
= max(q_vector
->rx_itr
, q_vector
->tx_itr
);
893 switch (current_itr
) {
894 /* counts and packets in update_itr are dependent on these numbers */
899 new_itr
= 20000; /* aka hwitr = ~200 */
907 if (new_itr
!= q_vector
->eitr
) {
910 /* save the algorithm value here, not the smoothed one */
911 q_vector
->eitr
= new_itr
;
912 /* do an exponential smoothing */
913 new_itr
= ((q_vector
->eitr
* 90)/100) + ((new_itr
* 10)/100);
914 itr_reg
= EITR_INTS_PER_SEC_TO_REG(new_itr
);
915 ixgbevf_write_eitr(adapter
, v_idx
, itr_reg
);
919 static irqreturn_t
ixgbevf_msix_mbx(int irq
, void *data
)
921 struct net_device
*netdev
= data
;
922 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
923 struct ixgbe_hw
*hw
= &adapter
->hw
;
926 bool got_ack
= false;
928 eicr
= IXGBE_READ_REG(hw
, IXGBE_VTEICS
);
929 IXGBE_WRITE_REG(hw
, IXGBE_VTEICR
, eicr
);
931 if (!hw
->mbx
.ops
.check_for_ack(hw
))
934 if (!hw
->mbx
.ops
.check_for_msg(hw
)) {
935 hw
->mbx
.ops
.read(hw
, &msg
, 1);
937 if ((msg
& IXGBE_MBVFICR_VFREQ_MASK
) == IXGBE_PF_CONTROL_MSG
)
938 mod_timer(&adapter
->watchdog_timer
,
939 round_jiffies(jiffies
+ 1));
941 if (msg
& IXGBE_VT_MSGTYPE_NACK
)
942 pr_warn("Last Request of type %2.2x to PF Nacked\n",
945 * Restore the PFSTS bit in case someone is polling for a
946 * return message from the PF
948 hw
->mbx
.v2p_mailbox
|= IXGBE_VFMAILBOX_PFSTS
;
952 * checking for the ack clears the PFACK bit. Place
953 * it back in the v2p_mailbox cache so that anyone
954 * polling for an ack will not miss it
957 hw
->mbx
.v2p_mailbox
|= IXGBE_VFMAILBOX_PFACK
;
962 static irqreturn_t
ixgbevf_msix_clean_tx(int irq
, void *data
)
964 struct ixgbevf_q_vector
*q_vector
= data
;
965 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
966 struct ixgbevf_ring
*tx_ring
;
969 if (!q_vector
->txr_count
)
972 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
973 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
974 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
975 ixgbevf_clean_tx_irq(adapter
, tx_ring
);
976 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
980 if (adapter
->itr_setting
& 1)
981 ixgbevf_set_itr_msix(q_vector
);
987 * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
989 * @data: pointer to our q_vector struct for this interrupt vector
991 static irqreturn_t
ixgbevf_msix_clean_rx(int irq
, void *data
)
993 struct ixgbevf_q_vector
*q_vector
= data
;
994 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
995 struct ixgbe_hw
*hw
= &adapter
->hw
;
996 struct ixgbevf_ring
*rx_ring
;
999 if (!q_vector
->rxr_count
)
1002 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1003 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1004 /* disable interrupts on this vector only */
1005 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, rx_ring
->v_idx
);
1006 napi_schedule(&q_vector
->napi
);
1012 static irqreturn_t
ixgbevf_msix_clean_many(int irq
, void *data
)
1014 ixgbevf_msix_clean_rx(irq
, data
);
1015 ixgbevf_msix_clean_tx(irq
, data
);
1020 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
1023 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1025 set_bit(r_idx
, q_vector
->rxr_idx
);
1026 q_vector
->rxr_count
++;
1027 a
->rx_ring
[r_idx
].v_idx
= 1 << v_idx
;
1030 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
1033 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1035 set_bit(t_idx
, q_vector
->txr_idx
);
1036 q_vector
->txr_count
++;
1037 a
->tx_ring
[t_idx
].v_idx
= 1 << v_idx
;
1041 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1042 * @adapter: board private structure to initialize
1044 * This function maps descriptor rings to the queue-specific vectors
1045 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1046 * one vector per ring/queue, but on a constrained vector budget, we
1047 * group the rings as "efficiently" as possible. You would add new
1048 * mapping configurations in here.
1050 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
1054 int rxr_idx
= 0, txr_idx
= 0;
1055 int rxr_remaining
= adapter
->num_rx_queues
;
1056 int txr_remaining
= adapter
->num_tx_queues
;
1061 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1064 * The ideal configuration...
1065 * We have enough vectors to map one per queue.
1067 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
1068 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
1069 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
1071 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
1072 map_vector_to_txq(adapter
, v_start
, txr_idx
);
1077 * If we don't have enough vectors for a 1-to-1
1078 * mapping, we'll have to group them so there are
1079 * multiple queues per vector.
1081 /* Re-adjusting *qpv takes care of the remainder. */
1082 for (i
= v_start
; i
< q_vectors
; i
++) {
1083 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
1084 for (j
= 0; j
< rqpv
; j
++) {
1085 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1090 for (i
= v_start
; i
< q_vectors
; i
++) {
1091 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
1092 for (j
= 0; j
< tqpv
; j
++) {
1093 map_vector_to_txq(adapter
, i
, txr_idx
);
1104 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1105 * @adapter: board private structure
1107 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1108 * interrupts from the kernel.
1110 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
1112 struct net_device
*netdev
= adapter
->netdev
;
1113 irqreturn_t (*handler
)(int, void *);
1114 int i
, vector
, q_vectors
, err
;
1117 /* Decrement for Other and TCP Timer vectors */
1118 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1120 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1121 ? &ixgbevf_msix_clean_many : \
1122 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1123 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1125 for (vector
= 0; vector
< q_vectors
; vector
++) {
1126 handler
= SET_HANDLER(adapter
->q_vector
[vector
]);
1128 if (handler
== &ixgbevf_msix_clean_rx
) {
1129 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1130 netdev
->name
, "rx", ri
++);
1131 } else if (handler
== &ixgbevf_msix_clean_tx
) {
1132 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1133 netdev
->name
, "tx", ti
++);
1134 } else if (handler
== &ixgbevf_msix_clean_many
) {
1135 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1136 netdev
->name
, "TxRx", vector
);
1138 /* skip this unused q_vector */
1141 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1142 handler
, 0, adapter
->name
[vector
],
1143 adapter
->q_vector
[vector
]);
1145 hw_dbg(&adapter
->hw
,
1146 "request_irq failed for MSIX interrupt "
1147 "Error: %d\n", err
);
1148 goto free_queue_irqs
;
1152 sprintf(adapter
->name
[vector
], "%s:mbx", netdev
->name
);
1153 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1154 &ixgbevf_msix_mbx
, 0, adapter
->name
[vector
], netdev
);
1156 hw_dbg(&adapter
->hw
,
1157 "request_irq for msix_mbx failed: %d\n", err
);
1158 goto free_queue_irqs
;
1164 for (i
= vector
- 1; i
>= 0; i
--)
1165 free_irq(adapter
->msix_entries
[--vector
].vector
,
1166 &(adapter
->q_vector
[i
]));
1167 pci_disable_msix(adapter
->pdev
);
1168 kfree(adapter
->msix_entries
);
1169 adapter
->msix_entries
= NULL
;
1173 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
1175 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1177 for (i
= 0; i
< q_vectors
; i
++) {
1178 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
1179 bitmap_zero(q_vector
->rxr_idx
, MAX_RX_QUEUES
);
1180 bitmap_zero(q_vector
->txr_idx
, MAX_TX_QUEUES
);
1181 q_vector
->rxr_count
= 0;
1182 q_vector
->txr_count
= 0;
1183 q_vector
->eitr
= adapter
->eitr_param
;
1188 * ixgbevf_request_irq - initialize interrupts
1189 * @adapter: board private structure
1191 * Attempts to configure interrupts using the best available
1192 * capabilities of the hardware and kernel.
1194 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
1198 err
= ixgbevf_request_msix_irqs(adapter
);
1201 hw_dbg(&adapter
->hw
,
1202 "request_irq failed, Error %d\n", err
);
1207 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1209 struct net_device
*netdev
= adapter
->netdev
;
1212 q_vectors
= adapter
->num_msix_vectors
;
1216 free_irq(adapter
->msix_entries
[i
].vector
, netdev
);
1219 for (; i
>= 0; i
--) {
1220 free_irq(adapter
->msix_entries
[i
].vector
,
1221 adapter
->q_vector
[i
]);
1224 ixgbevf_reset_q_vectors(adapter
);
1228 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1229 * @adapter: board private structure
1231 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1234 struct ixgbe_hw
*hw
= &adapter
->hw
;
1236 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1238 IXGBE_WRITE_FLUSH(hw
);
1240 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1241 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1245 * ixgbevf_irq_enable - Enable default interrupt generation settings
1246 * @adapter: board private structure
1248 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
,
1249 bool queues
, bool flush
)
1251 struct ixgbe_hw
*hw
= &adapter
->hw
;
1255 mask
= (IXGBE_EIMS_ENABLE_MASK
& ~IXGBE_EIMS_RTX_QUEUE
);
1258 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, mask
);
1261 ixgbevf_irq_enable_queues(adapter
, qmask
);
1264 IXGBE_WRITE_FLUSH(hw
);
1268 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1269 * @adapter: board private structure
1271 * Configure the Tx unit of the MAC after a reset.
1273 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1276 struct ixgbe_hw
*hw
= &adapter
->hw
;
1277 u32 i
, j
, tdlen
, txctrl
;
1279 /* Setup the HW Tx Head and Tail descriptor pointers */
1280 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1281 struct ixgbevf_ring
*ring
= &adapter
->tx_ring
[i
];
1284 tdlen
= ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
1285 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(j
),
1286 (tdba
& DMA_BIT_MASK(32)));
1287 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(j
), (tdba
>> 32));
1288 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(j
), tdlen
);
1289 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(j
), 0);
1290 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(j
), 0);
1291 adapter
->tx_ring
[i
].head
= IXGBE_VFTDH(j
);
1292 adapter
->tx_ring
[i
].tail
= IXGBE_VFTDT(j
);
1293 /* Disable Tx Head Writeback RO bit, since this hoses
1294 * bookkeeping if things aren't delivered in order.
1296 txctrl
= IXGBE_READ_REG(hw
, IXGBE_VFDCA_TXCTRL(j
));
1297 txctrl
&= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN
;
1298 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(j
), txctrl
);
1302 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1304 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1306 struct ixgbevf_ring
*rx_ring
;
1307 struct ixgbe_hw
*hw
= &adapter
->hw
;
1310 rx_ring
= &adapter
->rx_ring
[index
];
1312 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1314 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1315 u16 bufsz
= IXGBEVF_RXBUFFER_2048
;
1316 /* grow the amount we can receive on large page machines */
1317 if (bufsz
< (PAGE_SIZE
/ 2))
1318 bufsz
= (PAGE_SIZE
/ 2);
1319 /* cap the bufsz at our largest descriptor size */
1320 bufsz
= min((u16
)IXGBEVF_MAX_RXBUFFER
, bufsz
);
1322 srrctl
|= bufsz
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1323 srrctl
|= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1324 srrctl
|= ((IXGBEVF_RX_HDR_SIZE
<<
1325 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
) &
1326 IXGBE_SRRCTL_BSIZEHDR_MASK
);
1328 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1330 if (rx_ring
->rx_buf_len
== MAXIMUM_ETHERNET_VLAN_SIZE
)
1331 srrctl
|= IXGBEVF_RXBUFFER_2048
>>
1332 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1334 srrctl
|= rx_ring
->rx_buf_len
>>
1335 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1337 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1341 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1342 * @adapter: board private structure
1344 * Configure the Rx unit of the MAC after a reset.
1346 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1349 struct ixgbe_hw
*hw
= &adapter
->hw
;
1350 struct net_device
*netdev
= adapter
->netdev
;
1351 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1356 /* Decide whether to use packet split mode or not */
1357 if (netdev
->mtu
> ETH_DATA_LEN
) {
1358 if (adapter
->flags
& IXGBE_FLAG_RX_PS_CAPABLE
)
1359 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1361 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
1363 if (adapter
->flags
& IXGBE_FLAG_RX_1BUF_CAPABLE
)
1364 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
1366 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1369 /* Set the RX buffer length according to the mode */
1370 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1371 /* PSRTYPE must be initialized in 82599 */
1372 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
|
1373 IXGBE_PSRTYPE_UDPHDR
|
1374 IXGBE_PSRTYPE_IPV4HDR
|
1375 IXGBE_PSRTYPE_IPV6HDR
|
1376 IXGBE_PSRTYPE_L2HDR
;
1377 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, psrtype
);
1378 rx_buf_len
= IXGBEVF_RX_HDR_SIZE
;
1380 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, 0);
1381 if (netdev
->mtu
<= ETH_DATA_LEN
)
1382 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1384 rx_buf_len
= ALIGN(max_frame
, 1024);
1387 rdlen
= adapter
->rx_ring
[0].count
* sizeof(union ixgbe_adv_rx_desc
);
1388 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1389 * the Base and Length of the Rx Descriptor Ring */
1390 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1391 rdba
= adapter
->rx_ring
[i
].dma
;
1392 j
= adapter
->rx_ring
[i
].reg_idx
;
1393 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(j
),
1394 (rdba
& DMA_BIT_MASK(32)));
1395 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(j
), (rdba
>> 32));
1396 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(j
), rdlen
);
1397 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(j
), 0);
1398 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(j
), 0);
1399 adapter
->rx_ring
[i
].head
= IXGBE_VFRDH(j
);
1400 adapter
->rx_ring
[i
].tail
= IXGBE_VFRDT(j
);
1401 adapter
->rx_ring
[i
].rx_buf_len
= rx_buf_len
;
1403 ixgbevf_configure_srrctl(adapter
, j
);
1407 static int ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1409 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1410 struct ixgbe_hw
*hw
= &adapter
->hw
;
1412 /* add VID to filter table */
1413 if (hw
->mac
.ops
.set_vfta
)
1414 hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1415 set_bit(vid
, adapter
->active_vlans
);
1420 static int ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1422 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1423 struct ixgbe_hw
*hw
= &adapter
->hw
;
1425 /* remove VID from filter table */
1426 if (hw
->mac
.ops
.set_vfta
)
1427 hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1428 clear_bit(vid
, adapter
->active_vlans
);
1433 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1437 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1438 ixgbevf_vlan_rx_add_vid(adapter
->netdev
, vid
);
1441 static int ixgbevf_write_uc_addr_list(struct net_device
*netdev
)
1443 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1444 struct ixgbe_hw
*hw
= &adapter
->hw
;
1447 if ((netdev_uc_count(netdev
)) > 10) {
1448 pr_err("Too many unicast filters - No Space\n");
1452 if (!netdev_uc_empty(netdev
)) {
1453 struct netdev_hw_addr
*ha
;
1454 netdev_for_each_uc_addr(ha
, netdev
) {
1455 hw
->mac
.ops
.set_uc_addr(hw
, ++count
, ha
->addr
);
1460 * If the list is empty then send message to PF driver to
1461 * clear all macvlans on this VF.
1463 hw
->mac
.ops
.set_uc_addr(hw
, 0, NULL
);
1470 * ixgbevf_set_rx_mode - Multicast set
1471 * @netdev: network interface device structure
1473 * The set_rx_method entry point is called whenever the multicast address
1474 * list or the network interface flags are updated. This routine is
1475 * responsible for configuring the hardware for proper multicast mode.
1477 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1479 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1480 struct ixgbe_hw
*hw
= &adapter
->hw
;
1482 /* reprogram multicast list */
1483 if (hw
->mac
.ops
.update_mc_addr_list
)
1484 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1486 ixgbevf_write_uc_addr_list(netdev
);
1489 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1492 struct ixgbevf_q_vector
*q_vector
;
1493 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1495 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1496 struct napi_struct
*napi
;
1497 q_vector
= adapter
->q_vector
[q_idx
];
1498 if (!q_vector
->rxr_count
)
1500 napi
= &q_vector
->napi
;
1501 if (q_vector
->rxr_count
> 1)
1502 napi
->poll
= &ixgbevf_clean_rxonly_many
;
1508 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1511 struct ixgbevf_q_vector
*q_vector
;
1512 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1514 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1515 q_vector
= adapter
->q_vector
[q_idx
];
1516 if (!q_vector
->rxr_count
)
1518 napi_disable(&q_vector
->napi
);
1522 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1524 struct net_device
*netdev
= adapter
->netdev
;
1527 ixgbevf_set_rx_mode(netdev
);
1529 ixgbevf_restore_vlan(adapter
);
1531 ixgbevf_configure_tx(adapter
);
1532 ixgbevf_configure_rx(adapter
);
1533 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1534 struct ixgbevf_ring
*ring
= &adapter
->rx_ring
[i
];
1535 ixgbevf_alloc_rx_buffers(adapter
, ring
, ring
->count
);
1536 ring
->next_to_use
= ring
->count
- 1;
1537 writel(ring
->next_to_use
, adapter
->hw
.hw_addr
+ ring
->tail
);
1541 #define IXGBE_MAX_RX_DESC_POLL 10
1542 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1545 struct ixgbe_hw
*hw
= &adapter
->hw
;
1546 int j
= adapter
->rx_ring
[rxr
].reg_idx
;
1549 for (k
= 0; k
< IXGBE_MAX_RX_DESC_POLL
; k
++) {
1550 if (IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
)) & IXGBE_RXDCTL_ENABLE
)
1555 if (k
>= IXGBE_MAX_RX_DESC_POLL
) {
1556 hw_dbg(hw
, "RXDCTL.ENABLE on Rx queue %d "
1557 "not set within the polling period\n", rxr
);
1560 ixgbevf_release_rx_desc(&adapter
->hw
, &adapter
->rx_ring
[rxr
],
1561 (adapter
->rx_ring
[rxr
].count
- 1));
1564 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
1566 /* Only save pre-reset stats if there are some */
1567 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
1568 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
1569 adapter
->stats
.base_vfgprc
;
1570 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
1571 adapter
->stats
.base_vfgptc
;
1572 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
1573 adapter
->stats
.base_vfgorc
;
1574 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
1575 adapter
->stats
.base_vfgotc
;
1576 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
1577 adapter
->stats
.base_vfmprc
;
1581 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
1583 struct ixgbe_hw
*hw
= &adapter
->hw
;
1585 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
1586 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
1587 adapter
->stats
.last_vfgorc
|=
1588 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
1589 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
1590 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
1591 adapter
->stats
.last_vfgotc
|=
1592 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
1593 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
1595 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
1596 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
1597 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
1598 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
1599 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
1602 static void ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
1604 struct net_device
*netdev
= adapter
->netdev
;
1605 struct ixgbe_hw
*hw
= &adapter
->hw
;
1607 int num_rx_rings
= adapter
->num_rx_queues
;
1611 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1612 j
= adapter
->tx_ring
[i
].reg_idx
;
1613 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1614 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1615 txdctl
|= (8 << 16);
1616 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1619 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1620 j
= adapter
->tx_ring
[i
].reg_idx
;
1621 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1622 txdctl
|= IXGBE_TXDCTL_ENABLE
;
1623 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1626 for (i
= 0; i
< num_rx_rings
; i
++) {
1627 j
= adapter
->rx_ring
[i
].reg_idx
;
1628 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
));
1629 rxdctl
|= IXGBE_RXDCTL_ENABLE
| IXGBE_RXDCTL_VME
;
1630 if (hw
->mac
.type
== ixgbe_mac_X540_vf
) {
1631 rxdctl
&= ~IXGBE_RXDCTL_RLPMLMASK
;
1632 rxdctl
|= ((netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
) |
1633 IXGBE_RXDCTL_RLPML_EN
);
1635 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(j
), rxdctl
);
1636 ixgbevf_rx_desc_queue_enable(adapter
, i
);
1639 ixgbevf_configure_msix(adapter
);
1641 if (hw
->mac
.ops
.set_rar
) {
1642 if (is_valid_ether_addr(hw
->mac
.addr
))
1643 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
1645 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
1648 msg
[0] = IXGBE_VF_SET_LPE
;
1649 msg
[1] = netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1650 hw
->mbx
.ops
.write_posted(hw
, msg
, 2);
1652 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1653 ixgbevf_napi_enable_all(adapter
);
1655 /* enable transmits */
1656 netif_tx_start_all_queues(netdev
);
1658 ixgbevf_save_reset_stats(adapter
);
1659 ixgbevf_init_last_counter_stats(adapter
);
1661 /* bring the link up in the watchdog, this could race with our first
1662 * link up interrupt but shouldn't be a problem */
1663 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
1664 adapter
->link_check_timeout
= jiffies
;
1665 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1668 void ixgbevf_up(struct ixgbevf_adapter
*adapter
)
1670 struct ixgbe_hw
*hw
= &adapter
->hw
;
1672 ixgbevf_configure(adapter
);
1674 ixgbevf_up_complete(adapter
);
1676 /* clear any pending interrupts, may auto mask */
1677 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
1679 ixgbevf_irq_enable(adapter
, true, true);
1683 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1684 * @adapter: board private structure
1685 * @rx_ring: ring to free buffers from
1687 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter
*adapter
,
1688 struct ixgbevf_ring
*rx_ring
)
1690 struct pci_dev
*pdev
= adapter
->pdev
;
1694 if (!rx_ring
->rx_buffer_info
)
1697 /* Free all the Rx ring sk_buffs */
1698 for (i
= 0; i
< rx_ring
->count
; i
++) {
1699 struct ixgbevf_rx_buffer
*rx_buffer_info
;
1701 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1702 if (rx_buffer_info
->dma
) {
1703 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
1704 rx_ring
->rx_buf_len
,
1706 rx_buffer_info
->dma
= 0;
1708 if (rx_buffer_info
->skb
) {
1709 struct sk_buff
*skb
= rx_buffer_info
->skb
;
1710 rx_buffer_info
->skb
= NULL
;
1712 struct sk_buff
*this = skb
;
1714 dev_kfree_skb(this);
1717 if (!rx_buffer_info
->page
)
1719 dma_unmap_page(&pdev
->dev
, rx_buffer_info
->page_dma
,
1720 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
1721 rx_buffer_info
->page_dma
= 0;
1722 put_page(rx_buffer_info
->page
);
1723 rx_buffer_info
->page
= NULL
;
1724 rx_buffer_info
->page_offset
= 0;
1727 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
1728 memset(rx_ring
->rx_buffer_info
, 0, size
);
1730 /* Zero out the descriptor ring */
1731 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1733 rx_ring
->next_to_clean
= 0;
1734 rx_ring
->next_to_use
= 0;
1737 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
1739 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
1743 * ixgbevf_clean_tx_ring - Free Tx Buffers
1744 * @adapter: board private structure
1745 * @tx_ring: ring to be cleaned
1747 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter
*adapter
,
1748 struct ixgbevf_ring
*tx_ring
)
1750 struct ixgbevf_tx_buffer
*tx_buffer_info
;
1754 if (!tx_ring
->tx_buffer_info
)
1757 /* Free all the Tx ring sk_buffs */
1759 for (i
= 0; i
< tx_ring
->count
; i
++) {
1760 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
1761 ixgbevf_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
1764 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
1765 memset(tx_ring
->tx_buffer_info
, 0, size
);
1767 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1769 tx_ring
->next_to_use
= 0;
1770 tx_ring
->next_to_clean
= 0;
1773 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1775 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1779 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1780 * @adapter: board private structure
1782 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
1786 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1787 ixgbevf_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
1791 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1792 * @adapter: board private structure
1794 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
1798 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1799 ixgbevf_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
1802 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
1804 struct net_device
*netdev
= adapter
->netdev
;
1805 struct ixgbe_hw
*hw
= &adapter
->hw
;
1809 /* signal that we are down to the interrupt handler */
1810 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1811 /* disable receives */
1813 netif_tx_disable(netdev
);
1817 netif_tx_stop_all_queues(netdev
);
1819 ixgbevf_irq_disable(adapter
);
1821 ixgbevf_napi_disable_all(adapter
);
1823 del_timer_sync(&adapter
->watchdog_timer
);
1824 /* can't call flush scheduled work here because it can deadlock
1825 * if linkwatch_event tries to acquire the rtnl_lock which we are
1827 while (adapter
->flags
& IXGBE_FLAG_IN_WATCHDOG_TASK
)
1830 /* disable transmits in the hardware now that interrupts are off */
1831 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1832 j
= adapter
->tx_ring
[i
].reg_idx
;
1833 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1834 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
),
1835 (txdctl
& ~IXGBE_TXDCTL_ENABLE
));
1838 netif_carrier_off(netdev
);
1840 if (!pci_channel_offline(adapter
->pdev
))
1841 ixgbevf_reset(adapter
);
1843 ixgbevf_clean_all_tx_rings(adapter
);
1844 ixgbevf_clean_all_rx_rings(adapter
);
1847 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
1849 struct ixgbe_hw
*hw
= &adapter
->hw
;
1851 WARN_ON(in_interrupt());
1853 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
1857 * Check if PF is up before re-init. If not then skip until
1858 * later when the PF is up and ready to service requests from
1859 * the VF via mailbox. If the VF is up and running then the
1860 * watchdog task will continue to schedule reset tasks until
1861 * the PF is up and running.
1863 if (!hw
->mac
.ops
.reset_hw(hw
)) {
1864 ixgbevf_down(adapter
);
1865 ixgbevf_up(adapter
);
1868 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
1871 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
1873 struct ixgbe_hw
*hw
= &adapter
->hw
;
1874 struct net_device
*netdev
= adapter
->netdev
;
1876 if (hw
->mac
.ops
.reset_hw(hw
))
1877 hw_dbg(hw
, "PF still resetting\n");
1879 hw
->mac
.ops
.init_hw(hw
);
1881 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1882 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1884 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1889 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
1892 int err
, vector_threshold
;
1894 /* We'll want at least 3 (vector_threshold):
1897 * 3) Other (Link Status Change, etc.)
1899 vector_threshold
= MIN_MSIX_COUNT
;
1901 /* The more we get, the more we will assign to Tx/Rx Cleanup
1902 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1903 * Right now, we simply care about how many we'll get; we'll
1904 * set them up later while requesting irq's.
1906 while (vectors
>= vector_threshold
) {
1907 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1909 if (!err
) /* Success in acquiring all requested vectors. */
1912 vectors
= 0; /* Nasty failure, quit now */
1913 else /* err == number of vectors we should try again with */
1917 if (vectors
< vector_threshold
) {
1918 /* Can't allocate enough MSI-X interrupts? Oh well.
1919 * This just means we'll go with either a single MSI
1920 * vector or fall back to legacy interrupts.
1922 hw_dbg(&adapter
->hw
,
1923 "Unable to allocate MSI-X interrupts\n");
1924 kfree(adapter
->msix_entries
);
1925 adapter
->msix_entries
= NULL
;
1928 * Adjust for only the vectors we'll use, which is minimum
1929 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1930 * vectors we were allocated.
1932 adapter
->num_msix_vectors
= vectors
;
1937 * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
1938 * @adapter: board private structure to initialize
1940 * This is the top level queue allocation routine. The order here is very
1941 * important, starting with the "most" number of features turned on at once,
1942 * and ending with the smallest set of features. This way large combinations
1943 * can be allocated if they're turned on, and smaller combinations are the
1944 * fallthrough conditions.
1947 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
1949 /* Start with base case */
1950 adapter
->num_rx_queues
= 1;
1951 adapter
->num_tx_queues
= 1;
1952 adapter
->num_rx_pools
= adapter
->num_rx_queues
;
1953 adapter
->num_rx_queues_per_pool
= 1;
1957 * ixgbevf_alloc_queues - Allocate memory for all rings
1958 * @adapter: board private structure to initialize
1960 * We allocate one ring per queue at run-time since we don't know the
1961 * number of queues at compile-time. The polling_netdev array is
1962 * intended for Multiqueue, but should work fine with a single queue.
1964 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
1968 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
1969 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1970 if (!adapter
->tx_ring
)
1971 goto err_tx_ring_allocation
;
1973 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
1974 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1975 if (!adapter
->rx_ring
)
1976 goto err_rx_ring_allocation
;
1978 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1979 adapter
->tx_ring
[i
].count
= adapter
->tx_ring_count
;
1980 adapter
->tx_ring
[i
].queue_index
= i
;
1981 adapter
->tx_ring
[i
].reg_idx
= i
;
1984 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1985 adapter
->rx_ring
[i
].count
= adapter
->rx_ring_count
;
1986 adapter
->rx_ring
[i
].queue_index
= i
;
1987 adapter
->rx_ring
[i
].reg_idx
= i
;
1992 err_rx_ring_allocation
:
1993 kfree(adapter
->tx_ring
);
1994 err_tx_ring_allocation
:
1999 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2000 * @adapter: board private structure to initialize
2002 * Attempt to configure the interrupts using the best available
2003 * capabilities of the hardware and the kernel.
2005 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2008 int vector
, v_budget
;
2011 * It's easy to be greedy for MSI-X vectors, but it really
2012 * doesn't do us much good if we have a lot more vectors
2013 * than CPU's. So let's be conservative and only ask for
2014 * (roughly) twice the number of vectors as there are CPU's.
2016 v_budget
= min(adapter
->num_rx_queues
+ adapter
->num_tx_queues
,
2017 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS
;
2019 /* A failure in MSI-X entry allocation isn't fatal, but it does
2020 * mean we disable MSI-X capabilities of the adapter. */
2021 adapter
->msix_entries
= kcalloc(v_budget
,
2022 sizeof(struct msix_entry
), GFP_KERNEL
);
2023 if (!adapter
->msix_entries
) {
2028 for (vector
= 0; vector
< v_budget
; vector
++)
2029 adapter
->msix_entries
[vector
].entry
= vector
;
2031 ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
2038 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2039 * @adapter: board private structure to initialize
2041 * We allocate one q_vector per queue interrupt. If allocation fails we
2044 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
2046 int q_idx
, num_q_vectors
;
2047 struct ixgbevf_q_vector
*q_vector
;
2049 int (*poll
)(struct napi_struct
*, int);
2051 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2052 napi_vectors
= adapter
->num_rx_queues
;
2053 poll
= &ixgbevf_clean_rxonly
;
2055 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2056 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
2059 q_vector
->adapter
= adapter
;
2060 q_vector
->v_idx
= q_idx
;
2061 q_vector
->eitr
= adapter
->eitr_param
;
2062 if (q_idx
< napi_vectors
)
2063 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
2065 adapter
->q_vector
[q_idx
] = q_vector
;
2073 q_vector
= adapter
->q_vector
[q_idx
];
2074 netif_napi_del(&q_vector
->napi
);
2076 adapter
->q_vector
[q_idx
] = NULL
;
2082 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2083 * @adapter: board private structure to initialize
2085 * This function frees the memory allocated to the q_vectors. In addition if
2086 * NAPI is enabled it will delete any references to the NAPI struct prior
2087 * to freeing the q_vector.
2089 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
2091 int q_idx
, num_q_vectors
;
2094 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2095 napi_vectors
= adapter
->num_rx_queues
;
2097 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2098 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
2100 adapter
->q_vector
[q_idx
] = NULL
;
2101 if (q_idx
< napi_vectors
)
2102 netif_napi_del(&q_vector
->napi
);
2108 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2109 * @adapter: board private structure
2112 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2114 pci_disable_msix(adapter
->pdev
);
2115 kfree(adapter
->msix_entries
);
2116 adapter
->msix_entries
= NULL
;
2120 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2121 * @adapter: board private structure to initialize
2124 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2128 /* Number of supported queues */
2129 ixgbevf_set_num_queues(adapter
);
2131 err
= ixgbevf_set_interrupt_capability(adapter
);
2133 hw_dbg(&adapter
->hw
,
2134 "Unable to setup interrupt capabilities\n");
2135 goto err_set_interrupt
;
2138 err
= ixgbevf_alloc_q_vectors(adapter
);
2140 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue "
2142 goto err_alloc_q_vectors
;
2145 err
= ixgbevf_alloc_queues(adapter
);
2147 pr_err("Unable to allocate memory for queues\n");
2148 goto err_alloc_queues
;
2151 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, "
2152 "Tx Queue count = %u\n",
2153 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2154 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2156 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2160 ixgbevf_free_q_vectors(adapter
);
2161 err_alloc_q_vectors
:
2162 ixgbevf_reset_interrupt_capability(adapter
);
2168 * ixgbevf_sw_init - Initialize general software structures
2169 * (struct ixgbevf_adapter)
2170 * @adapter: board private structure to initialize
2172 * ixgbevf_sw_init initializes the Adapter private data structure.
2173 * Fields are initialized based on PCI device information and
2174 * OS network device settings (MTU size).
2176 static int __devinit
ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2178 struct ixgbe_hw
*hw
= &adapter
->hw
;
2179 struct pci_dev
*pdev
= adapter
->pdev
;
2182 /* PCI config space info */
2184 hw
->vendor_id
= pdev
->vendor
;
2185 hw
->device_id
= pdev
->device
;
2186 hw
->revision_id
= pdev
->revision
;
2187 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2188 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2190 hw
->mbx
.ops
.init_params(hw
);
2191 hw
->mac
.max_tx_queues
= MAX_TX_QUEUES
;
2192 hw
->mac
.max_rx_queues
= MAX_RX_QUEUES
;
2193 err
= hw
->mac
.ops
.reset_hw(hw
);
2195 dev_info(&pdev
->dev
,
2196 "PF still in reset state, assigning new address\n");
2197 eth_hw_addr_random(adapter
->netdev
);
2198 memcpy(adapter
->hw
.mac
.addr
, adapter
->netdev
->dev_addr
,
2199 adapter
->netdev
->addr_len
);
2201 err
= hw
->mac
.ops
.init_hw(hw
);
2203 pr_err("init_shared_code failed: %d\n", err
);
2206 memcpy(adapter
->netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
2207 adapter
->netdev
->addr_len
);
2210 /* Enable dynamic interrupt throttling rates */
2211 adapter
->eitr_param
= 20000;
2212 adapter
->itr_setting
= 1;
2214 /* set defaults for eitr in MegaBytes */
2215 adapter
->eitr_low
= 10;
2216 adapter
->eitr_high
= 20;
2218 /* set default ring sizes */
2219 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2220 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2222 /* enable rx csum by default */
2223 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
2225 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2232 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2234 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2235 if (current_counter < last_counter) \
2236 counter += 0x100000000LL; \
2237 last_counter = current_counter; \
2238 counter &= 0xFFFFFFFF00000000LL; \
2239 counter |= current_counter; \
2242 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2244 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2245 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2246 u64 current_counter = (current_counter_msb << 32) | \
2247 current_counter_lsb; \
2248 if (current_counter < last_counter) \
2249 counter += 0x1000000000LL; \
2250 last_counter = current_counter; \
2251 counter &= 0xFFFFFFF000000000LL; \
2252 counter |= current_counter; \
2255 * ixgbevf_update_stats - Update the board statistics counters.
2256 * @adapter: board private structure
2258 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2260 struct ixgbe_hw
*hw
= &adapter
->hw
;
2262 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2263 adapter
->stats
.vfgprc
);
2264 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2265 adapter
->stats
.vfgptc
);
2266 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2267 adapter
->stats
.last_vfgorc
,
2268 adapter
->stats
.vfgorc
);
2269 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2270 adapter
->stats
.last_vfgotc
,
2271 adapter
->stats
.vfgotc
);
2272 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2273 adapter
->stats
.vfmprc
);
2277 * ixgbevf_watchdog - Timer Call-back
2278 * @data: pointer to adapter cast into an unsigned long
2280 static void ixgbevf_watchdog(unsigned long data
)
2282 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2283 struct ixgbe_hw
*hw
= &adapter
->hw
;
2288 * Do the watchdog outside of interrupt context due to the lovely
2289 * delays that some of the newer hardware requires
2292 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2293 goto watchdog_short_circuit
;
2295 /* get one bit for every active tx/rx interrupt vector */
2296 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2297 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2298 if (qv
->rxr_count
|| qv
->txr_count
)
2302 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, (u32
)eics
);
2304 watchdog_short_circuit
:
2305 schedule_work(&adapter
->watchdog_task
);
2309 * ixgbevf_tx_timeout - Respond to a Tx Hang
2310 * @netdev: network interface device structure
2312 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
2314 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2316 /* Do the reset outside of interrupt context */
2317 schedule_work(&adapter
->reset_task
);
2320 static void ixgbevf_reset_task(struct work_struct
*work
)
2322 struct ixgbevf_adapter
*adapter
;
2323 adapter
= container_of(work
, struct ixgbevf_adapter
, reset_task
);
2325 /* If we're already down or resetting, just bail */
2326 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2327 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2330 adapter
->tx_timeout_count
++;
2332 ixgbevf_reinit_locked(adapter
);
2336 * ixgbevf_watchdog_task - worker thread to bring link up
2337 * @work: pointer to work_struct containing our data
2339 static void ixgbevf_watchdog_task(struct work_struct
*work
)
2341 struct ixgbevf_adapter
*adapter
= container_of(work
,
2342 struct ixgbevf_adapter
,
2344 struct net_device
*netdev
= adapter
->netdev
;
2345 struct ixgbe_hw
*hw
= &adapter
->hw
;
2346 u32 link_speed
= adapter
->link_speed
;
2347 bool link_up
= adapter
->link_up
;
2349 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
2352 * Always check the link on the watchdog because we have
2355 if (hw
->mac
.ops
.check_link
) {
2356 if ((hw
->mac
.ops
.check_link(hw
, &link_speed
,
2357 &link_up
, false)) != 0) {
2358 adapter
->link_up
= link_up
;
2359 adapter
->link_speed
= link_speed
;
2360 netif_carrier_off(netdev
);
2361 netif_tx_stop_all_queues(netdev
);
2362 schedule_work(&adapter
->reset_task
);
2366 /* always assume link is up, if no check link
2368 link_speed
= IXGBE_LINK_SPEED_10GB_FULL
;
2371 adapter
->link_up
= link_up
;
2372 adapter
->link_speed
= link_speed
;
2375 if (!netif_carrier_ok(netdev
)) {
2376 hw_dbg(&adapter
->hw
, "NIC Link is Up, %u Gbps\n",
2377 (link_speed
== IXGBE_LINK_SPEED_10GB_FULL
) ?
2379 netif_carrier_on(netdev
);
2380 netif_tx_wake_all_queues(netdev
);
2383 adapter
->link_up
= false;
2384 adapter
->link_speed
= 0;
2385 if (netif_carrier_ok(netdev
)) {
2386 hw_dbg(&adapter
->hw
, "NIC Link is Down\n");
2387 netif_carrier_off(netdev
);
2388 netif_tx_stop_all_queues(netdev
);
2392 ixgbevf_update_stats(adapter
);
2395 /* Reset the timer */
2396 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2397 mod_timer(&adapter
->watchdog_timer
,
2398 round_jiffies(jiffies
+ (2 * HZ
)));
2400 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
2404 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2405 * @adapter: board private structure
2406 * @tx_ring: Tx descriptor ring for a specific queue
2408 * Free all transmit software resources
2410 void ixgbevf_free_tx_resources(struct ixgbevf_adapter
*adapter
,
2411 struct ixgbevf_ring
*tx_ring
)
2413 struct pci_dev
*pdev
= adapter
->pdev
;
2415 ixgbevf_clean_tx_ring(adapter
, tx_ring
);
2417 vfree(tx_ring
->tx_buffer_info
);
2418 tx_ring
->tx_buffer_info
= NULL
;
2420 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
2423 tx_ring
->desc
= NULL
;
2427 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2428 * @adapter: board private structure
2430 * Free all transmit software resources
2432 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2436 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2437 if (adapter
->tx_ring
[i
].desc
)
2438 ixgbevf_free_tx_resources(adapter
,
2439 &adapter
->tx_ring
[i
]);
2444 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2445 * @adapter: board private structure
2446 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2448 * Return 0 on success, negative on failure
2450 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter
*adapter
,
2451 struct ixgbevf_ring
*tx_ring
)
2453 struct pci_dev
*pdev
= adapter
->pdev
;
2456 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2457 tx_ring
->tx_buffer_info
= vzalloc(size
);
2458 if (!tx_ring
->tx_buffer_info
)
2461 /* round up to nearest 4K */
2462 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2463 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2465 tx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, tx_ring
->size
,
2466 &tx_ring
->dma
, GFP_KERNEL
);
2470 tx_ring
->next_to_use
= 0;
2471 tx_ring
->next_to_clean
= 0;
2472 tx_ring
->work_limit
= tx_ring
->count
;
2476 vfree(tx_ring
->tx_buffer_info
);
2477 tx_ring
->tx_buffer_info
= NULL
;
2478 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit "
2479 "descriptor ring\n");
2484 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2485 * @adapter: board private structure
2487 * If this function returns with an error, then it's possible one or
2488 * more of the rings is populated (while the rest are not). It is the
2489 * callers duty to clean those orphaned rings.
2491 * Return 0 on success, negative on failure
2493 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2497 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2498 err
= ixgbevf_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2501 hw_dbg(&adapter
->hw
,
2502 "Allocation for Tx Queue %u failed\n", i
);
2510 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2511 * @adapter: board private structure
2512 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2514 * Returns 0 on success, negative on failure
2516 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter
*adapter
,
2517 struct ixgbevf_ring
*rx_ring
)
2519 struct pci_dev
*pdev
= adapter
->pdev
;
2522 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2523 rx_ring
->rx_buffer_info
= vzalloc(size
);
2524 if (!rx_ring
->rx_buffer_info
)
2527 /* Round up to nearest 4K */
2528 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2529 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2531 rx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, rx_ring
->size
,
2532 &rx_ring
->dma
, GFP_KERNEL
);
2534 if (!rx_ring
->desc
) {
2535 hw_dbg(&adapter
->hw
,
2536 "Unable to allocate memory for "
2537 "the receive descriptor ring\n");
2538 vfree(rx_ring
->rx_buffer_info
);
2539 rx_ring
->rx_buffer_info
= NULL
;
2543 rx_ring
->next_to_clean
= 0;
2544 rx_ring
->next_to_use
= 0;
2552 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2553 * @adapter: board private structure
2555 * If this function returns with an error, then it's possible one or
2556 * more of the rings is populated (while the rest are not). It is the
2557 * callers duty to clean those orphaned rings.
2559 * Return 0 on success, negative on failure
2561 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2565 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2566 err
= ixgbevf_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2569 hw_dbg(&adapter
->hw
,
2570 "Allocation for Rx Queue %u failed\n", i
);
2577 * ixgbevf_free_rx_resources - Free Rx Resources
2578 * @adapter: board private structure
2579 * @rx_ring: ring to clean the resources from
2581 * Free all receive software resources
2583 void ixgbevf_free_rx_resources(struct ixgbevf_adapter
*adapter
,
2584 struct ixgbevf_ring
*rx_ring
)
2586 struct pci_dev
*pdev
= adapter
->pdev
;
2588 ixgbevf_clean_rx_ring(adapter
, rx_ring
);
2590 vfree(rx_ring
->rx_buffer_info
);
2591 rx_ring
->rx_buffer_info
= NULL
;
2593 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
2596 rx_ring
->desc
= NULL
;
2600 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2601 * @adapter: board private structure
2603 * Free all receive software resources
2605 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2609 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2610 if (adapter
->rx_ring
[i
].desc
)
2611 ixgbevf_free_rx_resources(adapter
,
2612 &adapter
->rx_ring
[i
]);
2616 * ixgbevf_open - Called when a network interface is made active
2617 * @netdev: network interface device structure
2619 * Returns 0 on success, negative value on failure
2621 * The open entry point is called when a network interface is made
2622 * active by the system (IFF_UP). At this point all resources needed
2623 * for transmit and receive operations are allocated, the interrupt
2624 * handler is registered with the OS, the watchdog timer is started,
2625 * and the stack is notified that the interface is ready.
2627 static int ixgbevf_open(struct net_device
*netdev
)
2629 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2630 struct ixgbe_hw
*hw
= &adapter
->hw
;
2633 /* disallow open during test */
2634 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
2637 if (hw
->adapter_stopped
) {
2638 ixgbevf_reset(adapter
);
2639 /* if adapter is still stopped then PF isn't up and
2640 * the vf can't start. */
2641 if (hw
->adapter_stopped
) {
2642 err
= IXGBE_ERR_MBX
;
2643 pr_err("Unable to start - perhaps the PF Driver isn't "
2645 goto err_setup_reset
;
2649 /* allocate transmit descriptors */
2650 err
= ixgbevf_setup_all_tx_resources(adapter
);
2654 /* allocate receive descriptors */
2655 err
= ixgbevf_setup_all_rx_resources(adapter
);
2659 ixgbevf_configure(adapter
);
2662 * Map the Tx/Rx rings to the vectors we were allotted.
2663 * if request_irq will be called in this function map_rings
2664 * must be called *before* up_complete
2666 ixgbevf_map_rings_to_vectors(adapter
);
2668 ixgbevf_up_complete(adapter
);
2670 /* clear any pending interrupts, may auto mask */
2671 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2672 err
= ixgbevf_request_irq(adapter
);
2676 ixgbevf_irq_enable(adapter
, true, true);
2681 ixgbevf_down(adapter
);
2682 ixgbevf_free_irq(adapter
);
2684 ixgbevf_free_all_rx_resources(adapter
);
2686 ixgbevf_free_all_tx_resources(adapter
);
2687 ixgbevf_reset(adapter
);
2695 * ixgbevf_close - Disables a network interface
2696 * @netdev: network interface device structure
2698 * Returns 0, this is not allowed to fail
2700 * The close entry point is called when an interface is de-activated
2701 * by the OS. The hardware is still under the drivers control, but
2702 * needs to be disabled. A global MAC reset is issued to stop the
2703 * hardware, and all transmit and receive resources are freed.
2705 static int ixgbevf_close(struct net_device
*netdev
)
2707 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2709 ixgbevf_down(adapter
);
2710 ixgbevf_free_irq(adapter
);
2712 ixgbevf_free_all_tx_resources(adapter
);
2713 ixgbevf_free_all_rx_resources(adapter
);
2718 static int ixgbevf_tso(struct ixgbevf_adapter
*adapter
,
2719 struct ixgbevf_ring
*tx_ring
,
2720 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
2722 struct ixgbe_adv_tx_context_desc
*context_desc
;
2725 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2726 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
;
2727 u32 mss_l4len_idx
, l4len
;
2729 if (skb_is_gso(skb
)) {
2730 if (skb_header_cloned(skb
)) {
2731 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2735 l4len
= tcp_hdrlen(skb
);
2738 if (skb
->protocol
== htons(ETH_P_IP
)) {
2739 struct iphdr
*iph
= ip_hdr(skb
);
2742 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2746 adapter
->hw_tso_ctxt
++;
2747 } else if (skb_is_gso_v6(skb
)) {
2748 ipv6_hdr(skb
)->payload_len
= 0;
2749 tcp_hdr(skb
)->check
=
2750 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2751 &ipv6_hdr(skb
)->daddr
,
2753 adapter
->hw_tso6_ctxt
++;
2756 i
= tx_ring
->next_to_use
;
2758 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2759 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2761 /* VLAN MACLEN IPLEN */
2762 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2764 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
2765 vlan_macip_lens
|= ((skb_network_offset(skb
)) <<
2766 IXGBE_ADVTXD_MACLEN_SHIFT
);
2767 *hdr_len
+= skb_network_offset(skb
);
2769 (skb_transport_header(skb
) - skb_network_header(skb
));
2771 (skb_transport_header(skb
) - skb_network_header(skb
));
2772 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2773 context_desc
->seqnum_seed
= 0;
2775 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2776 type_tucmd_mlhl
= (IXGBE_TXD_CMD_DEXT
|
2777 IXGBE_ADVTXD_DTYP_CTXT
);
2779 if (skb
->protocol
== htons(ETH_P_IP
))
2780 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2781 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2782 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2786 (skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
);
2787 mss_l4len_idx
|= (l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
);
2788 /* use index 1 for TSO */
2789 mss_l4len_idx
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
2790 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
2792 tx_buffer_info
->time_stamp
= jiffies
;
2793 tx_buffer_info
->next_to_watch
= i
;
2796 if (i
== tx_ring
->count
)
2798 tx_ring
->next_to_use
= i
;
2806 static bool ixgbevf_tx_csum(struct ixgbevf_adapter
*adapter
,
2807 struct ixgbevf_ring
*tx_ring
,
2808 struct sk_buff
*skb
, u32 tx_flags
)
2810 struct ixgbe_adv_tx_context_desc
*context_desc
;
2812 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2813 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
= 0;
2815 if (skb
->ip_summed
== CHECKSUM_PARTIAL
||
2816 (tx_flags
& IXGBE_TX_FLAGS_VLAN
)) {
2817 i
= tx_ring
->next_to_use
;
2818 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2819 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2821 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2822 vlan_macip_lens
|= (tx_flags
&
2823 IXGBE_TX_FLAGS_VLAN_MASK
);
2824 vlan_macip_lens
|= (skb_network_offset(skb
) <<
2825 IXGBE_ADVTXD_MACLEN_SHIFT
);
2826 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2827 vlan_macip_lens
|= (skb_transport_header(skb
) -
2828 skb_network_header(skb
));
2830 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2831 context_desc
->seqnum_seed
= 0;
2833 type_tucmd_mlhl
|= (IXGBE_TXD_CMD_DEXT
|
2834 IXGBE_ADVTXD_DTYP_CTXT
);
2836 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2837 switch (skb
->protocol
) {
2838 case __constant_htons(ETH_P_IP
):
2839 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2840 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2842 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2844 case __constant_htons(ETH_P_IPV6
):
2845 /* XXX what about other V6 headers?? */
2846 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2848 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2851 if (unlikely(net_ratelimit())) {
2852 pr_warn("partial checksum but "
2853 "proto=%x!\n", skb
->protocol
);
2859 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2860 /* use index zero for tx checksum offload */
2861 context_desc
->mss_l4len_idx
= 0;
2863 tx_buffer_info
->time_stamp
= jiffies
;
2864 tx_buffer_info
->next_to_watch
= i
;
2866 adapter
->hw_csum_tx_good
++;
2868 if (i
== tx_ring
->count
)
2870 tx_ring
->next_to_use
= i
;
2878 static int ixgbevf_tx_map(struct ixgbevf_adapter
*adapter
,
2879 struct ixgbevf_ring
*tx_ring
,
2880 struct sk_buff
*skb
, u32 tx_flags
,
2883 struct pci_dev
*pdev
= adapter
->pdev
;
2884 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2886 unsigned int total
= skb
->len
;
2887 unsigned int offset
= 0, size
;
2889 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2893 i
= tx_ring
->next_to_use
;
2895 len
= min(skb_headlen(skb
), total
);
2897 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2898 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2900 tx_buffer_info
->length
= size
;
2901 tx_buffer_info
->mapped_as_page
= false;
2902 tx_buffer_info
->dma
= dma_map_single(&adapter
->pdev
->dev
,
2904 size
, DMA_TO_DEVICE
);
2905 if (dma_mapping_error(&pdev
->dev
, tx_buffer_info
->dma
))
2907 tx_buffer_info
->time_stamp
= jiffies
;
2908 tx_buffer_info
->next_to_watch
= i
;
2915 if (i
== tx_ring
->count
)
2919 for (f
= 0; f
< nr_frags
; f
++) {
2920 const struct skb_frag_struct
*frag
;
2922 frag
= &skb_shinfo(skb
)->frags
[f
];
2923 len
= min((unsigned int)skb_frag_size(frag
), total
);
2927 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2928 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2930 tx_buffer_info
->length
= size
;
2931 tx_buffer_info
->dma
=
2932 skb_frag_dma_map(&adapter
->pdev
->dev
, frag
,
2933 offset
, size
, DMA_TO_DEVICE
);
2934 tx_buffer_info
->mapped_as_page
= true;
2935 if (dma_mapping_error(&pdev
->dev
, tx_buffer_info
->dma
))
2937 tx_buffer_info
->time_stamp
= jiffies
;
2938 tx_buffer_info
->next_to_watch
= i
;
2945 if (i
== tx_ring
->count
)
2953 i
= tx_ring
->count
- 1;
2956 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
2957 tx_ring
->tx_buffer_info
[first
].next_to_watch
= i
;
2962 dev_err(&pdev
->dev
, "TX DMA map failed\n");
2964 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2965 tx_buffer_info
->dma
= 0;
2966 tx_buffer_info
->time_stamp
= 0;
2967 tx_buffer_info
->next_to_watch
= 0;
2970 /* clear timestamp and dma mappings for remaining portion of packet */
2971 while (count
>= 0) {
2975 i
+= tx_ring
->count
;
2976 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2977 ixgbevf_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
2983 static void ixgbevf_tx_queue(struct ixgbevf_adapter
*adapter
,
2984 struct ixgbevf_ring
*tx_ring
, int tx_flags
,
2985 int count
, u32 paylen
, u8 hdr_len
)
2987 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
2988 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2989 u32 olinfo_status
= 0, cmd_type_len
= 0;
2992 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
2994 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
2996 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
2998 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2999 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
3001 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
3002 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
3004 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3005 IXGBE_ADVTXD_POPTS_SHIFT
;
3007 /* use index 1 context for tso */
3008 olinfo_status
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
3009 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3010 olinfo_status
|= IXGBE_TXD_POPTS_IXSM
<<
3011 IXGBE_ADVTXD_POPTS_SHIFT
;
3013 } else if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3014 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3015 IXGBE_ADVTXD_POPTS_SHIFT
;
3017 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
3019 i
= tx_ring
->next_to_use
;
3021 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3022 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
3023 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
3024 tx_desc
->read
.cmd_type_len
=
3025 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
3026 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3028 if (i
== tx_ring
->count
)
3032 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
3035 * Force memory writes to complete before letting h/w
3036 * know there are new descriptors to fetch. (Only
3037 * applicable for weak-ordered memory model archs,
3042 tx_ring
->next_to_use
= i
;
3043 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3046 static int __ixgbevf_maybe_stop_tx(struct net_device
*netdev
,
3047 struct ixgbevf_ring
*tx_ring
, int size
)
3049 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3051 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3052 /* Herbert's original patch had:
3053 * smp_mb__after_netif_stop_queue();
3054 * but since that doesn't exist yet, just open code it. */
3057 /* We need to check again in a case another CPU has just
3058 * made room available. */
3059 if (likely(IXGBE_DESC_UNUSED(tx_ring
) < size
))
3062 /* A reprieve! - use start_queue because it doesn't call schedule */
3063 netif_start_subqueue(netdev
, tx_ring
->queue_index
);
3064 ++adapter
->restart_queue
;
3068 static int ixgbevf_maybe_stop_tx(struct net_device
*netdev
,
3069 struct ixgbevf_ring
*tx_ring
, int size
)
3071 if (likely(IXGBE_DESC_UNUSED(tx_ring
) >= size
))
3073 return __ixgbevf_maybe_stop_tx(netdev
, tx_ring
, size
);
3076 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3078 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3079 struct ixgbevf_ring
*tx_ring
;
3081 unsigned int tx_flags
= 0;
3088 tx_ring
= &adapter
->tx_ring
[r_idx
];
3090 if (vlan_tx_tag_present(skb
)) {
3091 tx_flags
|= vlan_tx_tag_get(skb
);
3092 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3093 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3096 /* four things can cause us to need a context descriptor */
3097 if (skb_is_gso(skb
) ||
3098 (skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3099 (tx_flags
& IXGBE_TX_FLAGS_VLAN
))
3102 count
+= TXD_USE_COUNT(skb_headlen(skb
));
3103 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3104 count
+= TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb
)->frags
[f
]));
3106 if (ixgbevf_maybe_stop_tx(netdev
, tx_ring
, count
)) {
3108 return NETDEV_TX_BUSY
;
3111 first
= tx_ring
->next_to_use
;
3113 if (skb
->protocol
== htons(ETH_P_IP
))
3114 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
3115 tso
= ixgbevf_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
);
3117 dev_kfree_skb_any(skb
);
3118 return NETDEV_TX_OK
;
3122 tx_flags
|= IXGBE_TX_FLAGS_TSO
;
3123 else if (ixgbevf_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
3124 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3125 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3127 ixgbevf_tx_queue(adapter
, tx_ring
, tx_flags
,
3128 ixgbevf_tx_map(adapter
, tx_ring
, skb
, tx_flags
, first
),
3131 ixgbevf_maybe_stop_tx(netdev
, tx_ring
, DESC_NEEDED
);
3133 return NETDEV_TX_OK
;
3137 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3138 * @netdev: network interface device structure
3139 * @p: pointer to an address structure
3141 * Returns 0 on success, negative on failure
3143 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3145 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3146 struct ixgbe_hw
*hw
= &adapter
->hw
;
3147 struct sockaddr
*addr
= p
;
3149 if (!is_valid_ether_addr(addr
->sa_data
))
3150 return -EADDRNOTAVAIL
;
3152 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3153 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3155 if (hw
->mac
.ops
.set_rar
)
3156 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
3162 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3163 * @netdev: network interface device structure
3164 * @new_mtu: new value for maximum frame size
3166 * Returns 0 on success, negative on failure
3168 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3170 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3171 struct ixgbe_hw
*hw
= &adapter
->hw
;
3172 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3173 int max_possible_frame
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3176 if (adapter
->hw
.mac
.type
== ixgbe_mac_X540_vf
)
3177 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3179 /* MTU < 68 is an error and causes problems on some kernels */
3180 if ((new_mtu
< 68) || (max_frame
> max_possible_frame
))
3183 hw_dbg(&adapter
->hw
, "changing MTU from %d to %d\n",
3184 netdev
->mtu
, new_mtu
);
3185 /* must set new MTU before calling down or up */
3186 netdev
->mtu
= new_mtu
;
3188 if (!netif_running(netdev
)) {
3189 msg
[0] = IXGBE_VF_SET_LPE
;
3191 hw
->mbx
.ops
.write_posted(hw
, msg
, 2);
3194 if (netif_running(netdev
))
3195 ixgbevf_reinit_locked(adapter
);
3200 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3202 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3203 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3205 netif_device_detach(netdev
);
3207 if (netif_running(netdev
)) {
3208 ixgbevf_down(adapter
);
3209 ixgbevf_free_irq(adapter
);
3210 ixgbevf_free_all_tx_resources(adapter
);
3211 ixgbevf_free_all_rx_resources(adapter
);
3215 pci_save_state(pdev
);
3218 pci_disable_device(pdev
);
3221 static struct rtnl_link_stats64
*ixgbevf_get_stats(struct net_device
*netdev
,
3222 struct rtnl_link_stats64
*stats
)
3224 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3227 const struct ixgbevf_ring
*ring
;
3230 ixgbevf_update_stats(adapter
);
3232 stats
->multicast
= adapter
->stats
.vfmprc
- adapter
->stats
.base_vfmprc
;
3234 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3235 ring
= &adapter
->rx_ring
[i
];
3237 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
3238 bytes
= ring
->total_bytes
;
3239 packets
= ring
->total_packets
;
3240 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
3241 stats
->rx_bytes
+= bytes
;
3242 stats
->rx_packets
+= packets
;
3245 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3246 ring
= &adapter
->tx_ring
[i
];
3248 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
3249 bytes
= ring
->total_bytes
;
3250 packets
= ring
->total_packets
;
3251 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
3252 stats
->tx_bytes
+= bytes
;
3253 stats
->tx_packets
+= packets
;
3259 static int ixgbevf_set_features(struct net_device
*netdev
,
3260 netdev_features_t features
)
3262 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3264 if (features
& NETIF_F_RXCSUM
)
3265 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
3267 adapter
->flags
&= ~IXGBE_FLAG_RX_CSUM_ENABLED
;
3272 static const struct net_device_ops ixgbe_netdev_ops
= {
3273 .ndo_open
= ixgbevf_open
,
3274 .ndo_stop
= ixgbevf_close
,
3275 .ndo_start_xmit
= ixgbevf_xmit_frame
,
3276 .ndo_set_rx_mode
= ixgbevf_set_rx_mode
,
3277 .ndo_get_stats64
= ixgbevf_get_stats
,
3278 .ndo_validate_addr
= eth_validate_addr
,
3279 .ndo_set_mac_address
= ixgbevf_set_mac
,
3280 .ndo_change_mtu
= ixgbevf_change_mtu
,
3281 .ndo_tx_timeout
= ixgbevf_tx_timeout
,
3282 .ndo_vlan_rx_add_vid
= ixgbevf_vlan_rx_add_vid
,
3283 .ndo_vlan_rx_kill_vid
= ixgbevf_vlan_rx_kill_vid
,
3284 .ndo_set_features
= ixgbevf_set_features
,
3287 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3289 dev
->netdev_ops
= &ixgbe_netdev_ops
;
3290 ixgbevf_set_ethtool_ops(dev
);
3291 dev
->watchdog_timeo
= 5 * HZ
;
3295 * ixgbevf_probe - Device Initialization Routine
3296 * @pdev: PCI device information struct
3297 * @ent: entry in ixgbevf_pci_tbl
3299 * Returns 0 on success, negative on failure
3301 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3302 * The OS initialization, configuring of the adapter private structure,
3303 * and a hardware reset occur.
3305 static int __devinit
ixgbevf_probe(struct pci_dev
*pdev
,
3306 const struct pci_device_id
*ent
)
3308 struct net_device
*netdev
;
3309 struct ixgbevf_adapter
*adapter
= NULL
;
3310 struct ixgbe_hw
*hw
= NULL
;
3311 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3312 static int cards_found
;
3313 int err
, pci_using_dac
;
3315 err
= pci_enable_device(pdev
);
3319 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3320 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3323 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3325 err
= dma_set_coherent_mask(&pdev
->dev
,
3328 dev_err(&pdev
->dev
, "No usable DMA "
3329 "configuration, aborting\n");
3336 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
3338 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3342 pci_set_master(pdev
);
3345 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
3348 netdev
= alloc_etherdev(sizeof(struct ixgbevf_adapter
));
3352 goto err_alloc_etherdev
;
3355 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3357 pci_set_drvdata(pdev
, netdev
);
3358 adapter
= netdev_priv(netdev
);
3360 adapter
->netdev
= netdev
;
3361 adapter
->pdev
= pdev
;
3364 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3367 * call save state here in standalone driver because it relies on
3368 * adapter struct to exist, and needs to call netdev_priv
3370 pci_save_state(pdev
);
3372 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3373 pci_resource_len(pdev
, 0));
3379 ixgbevf_assign_netdev_ops(netdev
);
3381 adapter
->bd_number
= cards_found
;
3384 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3385 hw
->mac
.type
= ii
->mac
;
3387 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
3388 sizeof(struct ixgbe_mbx_operations
));
3390 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_CAPABLE
;
3391 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
3392 adapter
->flags
|= IXGBE_FLAG_RX_1BUF_CAPABLE
;
3394 /* setup the private structure */
3395 err
= ixgbevf_sw_init(adapter
);
3399 /* The HW MAC address was set and/or determined in sw_init */
3400 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
3402 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3403 pr_err("invalid MAC address\n");
3408 netdev
->hw_features
= NETIF_F_SG
|
3415 netdev
->features
= netdev
->hw_features
|
3416 NETIF_F_HW_VLAN_TX
|
3417 NETIF_F_HW_VLAN_RX
|
3418 NETIF_F_HW_VLAN_FILTER
;
3420 netdev
->vlan_features
|= NETIF_F_TSO
;
3421 netdev
->vlan_features
|= NETIF_F_TSO6
;
3422 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3423 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3424 netdev
->vlan_features
|= NETIF_F_SG
;
3427 netdev
->features
|= NETIF_F_HIGHDMA
;
3429 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3431 init_timer(&adapter
->watchdog_timer
);
3432 adapter
->watchdog_timer
.function
= ixgbevf_watchdog
;
3433 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3435 INIT_WORK(&adapter
->reset_task
, ixgbevf_reset_task
);
3436 INIT_WORK(&adapter
->watchdog_task
, ixgbevf_watchdog_task
);
3438 err
= ixgbevf_init_interrupt_scheme(adapter
);
3442 /* pick up the PCI bus settings for reporting later */
3443 if (hw
->mac
.ops
.get_bus_info
)
3444 hw
->mac
.ops
.get_bus_info(hw
);
3446 strcpy(netdev
->name
, "eth%d");
3448 err
= register_netdev(netdev
);
3452 adapter
->netdev_registered
= true;
3454 netif_carrier_off(netdev
);
3456 ixgbevf_init_last_counter_stats(adapter
);
3458 /* print the MAC address */
3459 hw_dbg(hw
, "%pM\n", netdev
->dev_addr
);
3461 hw_dbg(hw
, "MAC: %d\n", hw
->mac
.type
);
3463 hw_dbg(hw
, "LRO is disabled\n");
3465 hw_dbg(hw
, "Intel(R) 82599 Virtual Function\n");
3471 ixgbevf_reset_interrupt_capability(adapter
);
3472 iounmap(hw
->hw_addr
);
3474 free_netdev(netdev
);
3476 pci_release_regions(pdev
);
3479 pci_disable_device(pdev
);
3484 * ixgbevf_remove - Device Removal Routine
3485 * @pdev: PCI device information struct
3487 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3488 * that it should release a PCI device. The could be caused by a
3489 * Hot-Plug event, or because the driver is going to be removed from
3492 static void __devexit
ixgbevf_remove(struct pci_dev
*pdev
)
3494 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3495 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3497 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
3499 del_timer_sync(&adapter
->watchdog_timer
);
3501 cancel_work_sync(&adapter
->reset_task
);
3502 cancel_work_sync(&adapter
->watchdog_task
);
3504 if (adapter
->netdev_registered
) {
3505 unregister_netdev(netdev
);
3506 adapter
->netdev_registered
= false;
3509 ixgbevf_reset_interrupt_capability(adapter
);
3511 iounmap(adapter
->hw
.hw_addr
);
3512 pci_release_regions(pdev
);
3514 hw_dbg(&adapter
->hw
, "Remove complete\n");
3516 kfree(adapter
->tx_ring
);
3517 kfree(adapter
->rx_ring
);
3519 free_netdev(netdev
);
3521 pci_disable_device(pdev
);
3524 static struct pci_driver ixgbevf_driver
= {
3525 .name
= ixgbevf_driver_name
,
3526 .id_table
= ixgbevf_pci_tbl
,
3527 .probe
= ixgbevf_probe
,
3528 .remove
= __devexit_p(ixgbevf_remove
),
3529 .shutdown
= ixgbevf_shutdown
,
3533 * ixgbevf_init_module - Driver Registration Routine
3535 * ixgbevf_init_module is the first routine called when the driver is
3536 * loaded. All it does is register with the PCI subsystem.
3538 static int __init
ixgbevf_init_module(void)
3541 pr_info("%s - version %s\n", ixgbevf_driver_string
,
3542 ixgbevf_driver_version
);
3544 pr_info("%s\n", ixgbevf_copyright
);
3546 ret
= pci_register_driver(&ixgbevf_driver
);
3550 module_init(ixgbevf_init_module
);
3553 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3555 * ixgbevf_exit_module is called just before the driver is removed
3558 static void __exit
ixgbevf_exit_module(void)
3560 pci_unregister_driver(&ixgbevf_driver
);
3565 * ixgbevf_get_hw_dev_name - return device name string
3566 * used by hardware layer to print debugging information
3568 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
3570 struct ixgbevf_adapter
*adapter
= hw
->back
;
3571 return adapter
->netdev
->name
;
3575 module_exit(ixgbevf_exit_module
);
3577 /* ixgbevf_main.c */