1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2010 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/vmalloc.h>
37 #include <linux/string.h>
40 #include <linux/tcp.h>
41 #include <linux/ipv6.h>
42 #include <linux/slab.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
50 char ixgbevf_driver_name
[] = "ixgbevf";
51 static const char ixgbevf_driver_string
[] =
52 "Intel(R) 82599 Virtual Function";
54 #define DRV_VERSION "1.0.12-k0"
55 const char ixgbevf_driver_version
[] = DRV_VERSION
;
56 static char ixgbevf_copyright
[] =
57 "Copyright (c) 2009 - 2010 Intel Corporation.";
59 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
60 [board_82599_vf
] = &ixgbevf_vf_info
,
63 /* ixgbevf_pci_tbl - PCI Device ID Table
65 * Wildcard entries (PCI_ANY_ID) should come last
66 * Last entry must be all 0s
68 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
69 * Class, Class Mask, private data (not used) }
71 static struct pci_device_id ixgbevf_pci_tbl
[] = {
72 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
),
75 /* required last entry */
78 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
80 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
81 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
82 MODULE_LICENSE("GPL");
83 MODULE_VERSION(DRV_VERSION
);
85 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
88 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector
*q_vector
);
89 static void ixgbevf_write_eitr(struct ixgbevf_adapter
*adapter
, int v_idx
,
92 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw
*hw
,
93 struct ixgbevf_ring
*rx_ring
,
97 * Force memory writes to complete before letting h/w
98 * know there are new descriptors to fetch. (Only
99 * applicable for weak-ordered memory model archs,
103 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(rx_ring
->reg_idx
), val
);
107 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
108 * @adapter: pointer to adapter struct
109 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
110 * @queue: queue to map the corresponding interrupt to
111 * @msix_vector: the vector to map to the corresponding queue
114 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
115 u8 queue
, u8 msix_vector
)
118 struct ixgbe_hw
*hw
= &adapter
->hw
;
119 if (direction
== -1) {
121 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
122 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
125 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
127 /* tx or rx causes */
128 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
129 index
= ((16 * (queue
& 1)) + (8 * direction
));
130 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
131 ivar
&= ~(0xFF << index
);
132 ivar
|= (msix_vector
<< index
);
133 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
137 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter
*adapter
,
138 struct ixgbevf_tx_buffer
141 if (tx_buffer_info
->dma
) {
142 if (tx_buffer_info
->mapped_as_page
)
143 dma_unmap_page(&adapter
->pdev
->dev
,
145 tx_buffer_info
->length
,
148 dma_unmap_single(&adapter
->pdev
->dev
,
150 tx_buffer_info
->length
,
152 tx_buffer_info
->dma
= 0;
154 if (tx_buffer_info
->skb
) {
155 dev_kfree_skb_any(tx_buffer_info
->skb
);
156 tx_buffer_info
->skb
= NULL
;
158 tx_buffer_info
->time_stamp
= 0;
159 /* tx_buffer_info must be completely set up in the transmit path */
162 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter
*adapter
,
163 struct ixgbevf_ring
*tx_ring
,
166 struct ixgbe_hw
*hw
= &adapter
->hw
;
169 /* Detect a transmit hang in hardware, this serializes the
170 * check with the clearing of time_stamp and movement of eop */
171 head
= readl(hw
->hw_addr
+ tx_ring
->head
);
172 tail
= readl(hw
->hw_addr
+ tx_ring
->tail
);
173 adapter
->detect_tx_hung
= false;
174 if ((head
!= tail
) &&
175 tx_ring
->tx_buffer_info
[eop
].time_stamp
&&
176 time_after(jiffies
, tx_ring
->tx_buffer_info
[eop
].time_stamp
+ HZ
)) {
177 /* detected Tx unit hang */
178 union ixgbe_adv_tx_desc
*tx_desc
;
179 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
180 printk(KERN_ERR
"Detected Tx Unit Hang\n"
182 " TDH, TDT <%x>, <%x>\n"
183 " next_to_use <%x>\n"
184 " next_to_clean <%x>\n"
185 "tx_buffer_info[next_to_clean]\n"
186 " time_stamp <%lx>\n"
188 tx_ring
->queue_index
,
190 tx_ring
->next_to_use
, eop
,
191 tx_ring
->tx_buffer_info
[eop
].time_stamp
, jiffies
);
198 #define IXGBE_MAX_TXD_PWR 14
199 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
201 /* Tx Descriptors needed, worst case */
202 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
203 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
205 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
206 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
208 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
211 static void ixgbevf_tx_timeout(struct net_device
*netdev
);
214 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
215 * @adapter: board private structure
216 * @tx_ring: tx ring to clean
218 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter
*adapter
,
219 struct ixgbevf_ring
*tx_ring
)
221 struct net_device
*netdev
= adapter
->netdev
;
222 struct ixgbe_hw
*hw
= &adapter
->hw
;
223 union ixgbe_adv_tx_desc
*tx_desc
, *eop_desc
;
224 struct ixgbevf_tx_buffer
*tx_buffer_info
;
225 unsigned int i
, eop
, count
= 0;
226 unsigned int total_bytes
= 0, total_packets
= 0;
228 i
= tx_ring
->next_to_clean
;
229 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
230 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
232 while ((eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)) &&
233 (count
< tx_ring
->work_limit
)) {
234 bool cleaned
= false;
235 rmb(); /* read buffer_info after eop_desc */
236 for ( ; !cleaned
; count
++) {
238 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
239 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
240 cleaned
= (i
== eop
);
241 skb
= tx_buffer_info
->skb
;
243 if (cleaned
&& skb
) {
244 unsigned int segs
, bytecount
;
246 /* gso_segs is currently only valid for tcp */
247 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
248 /* multiply data chunks by size of headers */
249 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
251 total_packets
+= segs
;
252 total_bytes
+= bytecount
;
255 ixgbevf_unmap_and_free_tx_resource(adapter
,
258 tx_desc
->wb
.status
= 0;
261 if (i
== tx_ring
->count
)
265 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
266 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
269 tx_ring
->next_to_clean
= i
;
271 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
272 if (unlikely(count
&& netif_carrier_ok(netdev
) &&
273 (IXGBE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
274 /* Make sure that anybody stopping the queue after this
275 * sees the new next_to_clean.
279 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
280 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
281 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
282 ++adapter
->restart_queue
;
285 if (netif_queue_stopped(netdev
) &&
286 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
287 netif_wake_queue(netdev
);
288 ++adapter
->restart_queue
;
293 if (adapter
->detect_tx_hung
) {
294 if (ixgbevf_check_tx_hang(adapter
, tx_ring
, i
)) {
295 /* schedule immediate reset if we believe we hung */
297 "tx hang %d detected, resetting adapter\n",
298 adapter
->tx_timeout_count
+ 1);
299 ixgbevf_tx_timeout(adapter
->netdev
);
303 /* re-arm the interrupt */
304 if ((count
>= tx_ring
->work_limit
) &&
305 (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))) {
306 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, tx_ring
->v_idx
);
309 tx_ring
->total_bytes
+= total_bytes
;
310 tx_ring
->total_packets
+= total_packets
;
312 netdev
->stats
.tx_bytes
+= total_bytes
;
313 netdev
->stats
.tx_packets
+= total_packets
;
315 return count
< tx_ring
->work_limit
;
319 * ixgbevf_receive_skb - Send a completed packet up the stack
320 * @q_vector: structure containing interrupt and ring information
321 * @skb: packet to send up
322 * @status: hardware indication of status of receive
323 * @rx_ring: rx descriptor ring (for a specific queue) to setup
324 * @rx_desc: rx descriptor
326 static void ixgbevf_receive_skb(struct ixgbevf_q_vector
*q_vector
,
327 struct sk_buff
*skb
, u8 status
,
328 struct ixgbevf_ring
*ring
,
329 union ixgbe_adv_rx_desc
*rx_desc
)
331 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
332 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
333 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
336 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
)) {
337 if (adapter
->vlgrp
&& is_vlan
)
338 vlan_gro_receive(&q_vector
->napi
,
342 napi_gro_receive(&q_vector
->napi
, skb
);
344 if (adapter
->vlgrp
&& is_vlan
)
345 ret
= vlan_hwaccel_rx(skb
, adapter
->vlgrp
, tag
);
352 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
353 * @adapter: address of board private structure
354 * @status_err: hardware indication of status of receive
355 * @skb: skb currently being received and modified
357 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter
*adapter
,
358 u32 status_err
, struct sk_buff
*skb
)
360 skb_checksum_none_assert(skb
);
362 /* Rx csum disabled */
363 if (!(adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
))
366 /* if IP and error */
367 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
368 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
369 adapter
->hw_csum_rx_error
++;
373 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
376 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
377 adapter
->hw_csum_rx_error
++;
381 /* It must be a TCP or UDP packet with a valid checksum */
382 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
383 adapter
->hw_csum_rx_good
++;
387 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
388 * @adapter: address of board private structure
390 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter
*adapter
,
391 struct ixgbevf_ring
*rx_ring
,
394 struct pci_dev
*pdev
= adapter
->pdev
;
395 union ixgbe_adv_rx_desc
*rx_desc
;
396 struct ixgbevf_rx_buffer
*bi
;
399 unsigned int bufsz
= rx_ring
->rx_buf_len
+ NET_IP_ALIGN
;
401 i
= rx_ring
->next_to_use
;
402 bi
= &rx_ring
->rx_buffer_info
[i
];
404 while (cleaned_count
--) {
405 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
408 (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
)) {
410 bi
->page
= netdev_alloc_page(adapter
->netdev
);
412 adapter
->alloc_rx_page_failed
++;
417 /* use a half page if we're re-using */
418 bi
->page_offset
^= (PAGE_SIZE
/ 2);
421 bi
->page_dma
= dma_map_page(&pdev
->dev
, bi
->page
,
429 skb
= netdev_alloc_skb(adapter
->netdev
,
433 adapter
->alloc_rx_buff_failed
++;
438 * Make buffer alignment 2 beyond a 16 byte boundary
439 * this will result in a 16 byte aligned IP header after
440 * the 14 byte MAC header is removed
442 skb_reserve(skb
, NET_IP_ALIGN
);
447 bi
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
451 /* Refresh the desc even if buffer_addrs didn't change because
452 * each write-back erases this info. */
453 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
454 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
455 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
457 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
461 if (i
== rx_ring
->count
)
463 bi
= &rx_ring
->rx_buffer_info
[i
];
467 if (rx_ring
->next_to_use
!= i
) {
468 rx_ring
->next_to_use
= i
;
470 i
= (rx_ring
->count
- 1);
472 ixgbevf_release_rx_desc(&adapter
->hw
, rx_ring
, i
);
476 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
480 struct ixgbe_hw
*hw
= &adapter
->hw
;
482 mask
= (qmask
& 0xFFFFFFFF);
483 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, mask
);
486 static inline u16
ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc
*rx_desc
)
488 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
;
491 static inline u16
ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc
*rx_desc
)
493 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
496 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
497 struct ixgbevf_ring
*rx_ring
,
498 int *work_done
, int work_to_do
)
500 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
501 struct pci_dev
*pdev
= adapter
->pdev
;
502 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
503 struct ixgbevf_rx_buffer
*rx_buffer_info
, *next_buffer
;
508 bool cleaned
= false;
509 int cleaned_count
= 0;
510 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
512 i
= rx_ring
->next_to_clean
;
513 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
514 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
515 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
517 while (staterr
& IXGBE_RXD_STAT_DD
) {
519 if (*work_done
>= work_to_do
)
523 rmb(); /* read descriptor and rx_buffer_info after status DD */
524 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
525 hdr_info
= le16_to_cpu(ixgbevf_get_hdr_info(rx_desc
));
526 len
= (hdr_info
& IXGBE_RXDADV_HDRBUFLEN_MASK
) >>
527 IXGBE_RXDADV_HDRBUFLEN_SHIFT
;
528 if (hdr_info
& IXGBE_RXDADV_SPH
)
529 adapter
->rx_hdr_split
++;
530 if (len
> IXGBEVF_RX_HDR_SIZE
)
531 len
= IXGBEVF_RX_HDR_SIZE
;
532 upper_len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
534 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
537 skb
= rx_buffer_info
->skb
;
538 prefetch(skb
->data
- NET_IP_ALIGN
);
539 rx_buffer_info
->skb
= NULL
;
541 if (rx_buffer_info
->dma
) {
542 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
545 rx_buffer_info
->dma
= 0;
550 dma_unmap_page(&pdev
->dev
, rx_buffer_info
->page_dma
,
551 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
552 rx_buffer_info
->page_dma
= 0;
553 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
554 rx_buffer_info
->page
,
555 rx_buffer_info
->page_offset
,
558 if ((rx_ring
->rx_buf_len
> (PAGE_SIZE
/ 2)) ||
559 (page_count(rx_buffer_info
->page
) != 1))
560 rx_buffer_info
->page
= NULL
;
562 get_page(rx_buffer_info
->page
);
564 skb
->len
+= upper_len
;
565 skb
->data_len
+= upper_len
;
566 skb
->truesize
+= upper_len
;
570 if (i
== rx_ring
->count
)
573 next_rxd
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
577 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
579 if (!(staterr
& IXGBE_RXD_STAT_EOP
)) {
580 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
581 rx_buffer_info
->skb
= next_buffer
->skb
;
582 rx_buffer_info
->dma
= next_buffer
->dma
;
583 next_buffer
->skb
= skb
;
584 next_buffer
->dma
= 0;
586 skb
->next
= next_buffer
->skb
;
587 skb
->next
->prev
= skb
;
589 adapter
->non_eop_descs
++;
593 /* ERR_MASK will only have valid bits if EOP set */
594 if (unlikely(staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
)) {
595 dev_kfree_skb_irq(skb
);
599 ixgbevf_rx_checksum(adapter
, staterr
, skb
);
601 /* probably a little skewed due to removing CRC */
602 total_rx_bytes
+= skb
->len
;
606 * Work around issue of some types of VM to VM loop back
607 * packets not getting split correctly
609 if (staterr
& IXGBE_RXD_STAT_LB
) {
610 u32 header_fixup_len
= skb_headlen(skb
);
611 if (header_fixup_len
< 14)
612 skb_push(skb
, header_fixup_len
);
614 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
616 ixgbevf_receive_skb(q_vector
, skb
, staterr
, rx_ring
, rx_desc
);
619 rx_desc
->wb
.upper
.status_error
= 0;
621 /* return some buffers to hardware, one at a time is too slow */
622 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
623 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
,
628 /* use prefetched values */
630 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
632 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
635 rx_ring
->next_to_clean
= i
;
636 cleaned_count
= IXGBE_DESC_UNUSED(rx_ring
);
639 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
641 rx_ring
->total_packets
+= total_rx_packets
;
642 rx_ring
->total_bytes
+= total_rx_bytes
;
643 adapter
->netdev
->stats
.rx_bytes
+= total_rx_bytes
;
644 adapter
->netdev
->stats
.rx_packets
+= total_rx_packets
;
650 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
651 * @napi: napi struct with our devices info in it
652 * @budget: amount of work driver is allowed to do this pass, in packets
654 * This function is optimized for cleaning one queue only on a single
657 static int ixgbevf_clean_rxonly(struct napi_struct
*napi
, int budget
)
659 struct ixgbevf_q_vector
*q_vector
=
660 container_of(napi
, struct ixgbevf_q_vector
, napi
);
661 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
662 struct ixgbevf_ring
*rx_ring
= NULL
;
666 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
667 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
669 ixgbevf_clean_rx_irq(q_vector
, rx_ring
, &work_done
, budget
);
671 /* If all Rx work done, exit the polling mode */
672 if (work_done
< budget
) {
674 if (adapter
->itr_setting
& 1)
675 ixgbevf_set_itr_msix(q_vector
);
676 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
677 ixgbevf_irq_enable_queues(adapter
, rx_ring
->v_idx
);
684 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
685 * @napi: napi struct with our devices info in it
686 * @budget: amount of work driver is allowed to do this pass, in packets
688 * This function will clean more than one rx queue associated with a
691 static int ixgbevf_clean_rxonly_many(struct napi_struct
*napi
, int budget
)
693 struct ixgbevf_q_vector
*q_vector
=
694 container_of(napi
, struct ixgbevf_q_vector
, napi
);
695 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
696 struct ixgbevf_ring
*rx_ring
= NULL
;
697 int work_done
= 0, i
;
701 /* attempt to distribute budget to each queue fairly, but don't allow
702 * the budget to go below 1 because we'll exit polling */
703 budget
/= (q_vector
->rxr_count
?: 1);
704 budget
= max(budget
, 1);
705 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
706 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
707 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
708 ixgbevf_clean_rx_irq(q_vector
, rx_ring
, &work_done
, budget
);
709 enable_mask
|= rx_ring
->v_idx
;
710 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
714 #ifndef HAVE_NETDEV_NAPI_LIST
715 if (!netif_running(adapter
->netdev
))
719 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
720 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
722 /* If all Rx work done, exit the polling mode */
723 if (work_done
< budget
) {
725 if (adapter
->itr_setting
& 1)
726 ixgbevf_set_itr_msix(q_vector
);
727 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
728 ixgbevf_irq_enable_queues(adapter
, enable_mask
);
736 * ixgbevf_configure_msix - Configure MSI-X hardware
737 * @adapter: board private structure
739 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
742 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
744 struct ixgbevf_q_vector
*q_vector
;
745 struct ixgbe_hw
*hw
= &adapter
->hw
;
746 int i
, j
, q_vectors
, v_idx
, r_idx
;
749 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
752 * Populate the IVAR table and set the ITR values to the
753 * corresponding register.
755 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
756 q_vector
= adapter
->q_vector
[v_idx
];
757 /* XXX for_each_set_bit(...) */
758 r_idx
= find_first_bit(q_vector
->rxr_idx
,
759 adapter
->num_rx_queues
);
761 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
762 j
= adapter
->rx_ring
[r_idx
].reg_idx
;
763 ixgbevf_set_ivar(adapter
, 0, j
, v_idx
);
764 r_idx
= find_next_bit(q_vector
->rxr_idx
,
765 adapter
->num_rx_queues
,
768 r_idx
= find_first_bit(q_vector
->txr_idx
,
769 adapter
->num_tx_queues
);
771 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
772 j
= adapter
->tx_ring
[r_idx
].reg_idx
;
773 ixgbevf_set_ivar(adapter
, 1, j
, v_idx
);
774 r_idx
= find_next_bit(q_vector
->txr_idx
,
775 adapter
->num_tx_queues
,
779 /* if this is a tx only vector halve the interrupt rate */
780 if (q_vector
->txr_count
&& !q_vector
->rxr_count
)
781 q_vector
->eitr
= (adapter
->eitr_param
>> 1);
782 else if (q_vector
->rxr_count
)
784 q_vector
->eitr
= adapter
->eitr_param
;
786 ixgbevf_write_eitr(adapter
, v_idx
, q_vector
->eitr
);
789 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
791 /* set up to autoclear timer, and the vectors */
792 mask
= IXGBE_EIMS_ENABLE_MASK
;
793 mask
&= ~IXGBE_EIMS_OTHER
;
794 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, mask
);
801 latency_invalid
= 255
805 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
806 * @adapter: pointer to adapter
807 * @eitr: eitr setting (ints per sec) to give last timeslice
808 * @itr_setting: current throttle rate in ints/second
809 * @packets: the number of packets during this measurement interval
810 * @bytes: the number of bytes during this measurement interval
812 * Stores a new ITR value based on packets and byte
813 * counts during the last interrupt. The advantage of per interrupt
814 * computation is faster updates and more accurate ITR for the current
815 * traffic pattern. Constants in this function were computed
816 * based on theoretical maximum wire speed and thresholds were set based
817 * on testing data as well as attempting to minimize response time
818 * while increasing bulk throughput.
820 static u8
ixgbevf_update_itr(struct ixgbevf_adapter
*adapter
,
821 u32 eitr
, u8 itr_setting
,
822 int packets
, int bytes
)
824 unsigned int retval
= itr_setting
;
829 goto update_itr_done
;
832 /* simple throttlerate management
833 * 0-20MB/s lowest (100000 ints/s)
834 * 20-100MB/s low (20000 ints/s)
835 * 100-1249MB/s bulk (8000 ints/s)
837 /* what was last interrupt timeslice? */
838 timepassed_us
= 1000000/eitr
;
839 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
841 switch (itr_setting
) {
843 if (bytes_perint
> adapter
->eitr_low
)
844 retval
= low_latency
;
847 if (bytes_perint
> adapter
->eitr_high
)
848 retval
= bulk_latency
;
849 else if (bytes_perint
<= adapter
->eitr_low
)
850 retval
= lowest_latency
;
853 if (bytes_perint
<= adapter
->eitr_high
)
854 retval
= low_latency
;
863 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
864 * @adapter: pointer to adapter struct
865 * @v_idx: vector index into q_vector array
866 * @itr_reg: new value to be written in *register* format, not ints/s
868 * This function is made to be called by ethtool and by the driver
869 * when it needs to update VTEITR registers at runtime. Hardware
870 * specific quirks/differences are taken care of here.
872 static void ixgbevf_write_eitr(struct ixgbevf_adapter
*adapter
, int v_idx
,
875 struct ixgbe_hw
*hw
= &adapter
->hw
;
877 itr_reg
= EITR_INTS_PER_SEC_TO_REG(itr_reg
);
880 * set the WDIS bit to not clear the timer bits and cause an
881 * immediate assertion of the interrupt
883 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
885 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
888 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector
*q_vector
)
890 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
892 u8 current_itr
, ret_itr
;
893 int i
, r_idx
, v_idx
= q_vector
->v_idx
;
894 struct ixgbevf_ring
*rx_ring
, *tx_ring
;
896 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
897 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
898 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
899 ret_itr
= ixgbevf_update_itr(adapter
, q_vector
->eitr
,
901 tx_ring
->total_packets
,
902 tx_ring
->total_bytes
);
903 /* if the result for this queue would decrease interrupt
904 * rate for this vector then use that result */
905 q_vector
->tx_itr
= ((q_vector
->tx_itr
> ret_itr
) ?
906 q_vector
->tx_itr
- 1 : ret_itr
);
907 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
911 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
912 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
913 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
914 ret_itr
= ixgbevf_update_itr(adapter
, q_vector
->eitr
,
916 rx_ring
->total_packets
,
917 rx_ring
->total_bytes
);
918 /* if the result for this queue would decrease interrupt
919 * rate for this vector then use that result */
920 q_vector
->rx_itr
= ((q_vector
->rx_itr
> ret_itr
) ?
921 q_vector
->rx_itr
- 1 : ret_itr
);
922 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
926 current_itr
= max(q_vector
->rx_itr
, q_vector
->tx_itr
);
928 switch (current_itr
) {
929 /* counts and packets in update_itr are dependent on these numbers */
934 new_itr
= 20000; /* aka hwitr = ~200 */
942 if (new_itr
!= q_vector
->eitr
) {
945 /* save the algorithm value here, not the smoothed one */
946 q_vector
->eitr
= new_itr
;
947 /* do an exponential smoothing */
948 new_itr
= ((q_vector
->eitr
* 90)/100) + ((new_itr
* 10)/100);
949 itr_reg
= EITR_INTS_PER_SEC_TO_REG(new_itr
);
950 ixgbevf_write_eitr(adapter
, v_idx
, itr_reg
);
954 static irqreturn_t
ixgbevf_msix_mbx(int irq
, void *data
)
956 struct net_device
*netdev
= data
;
957 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
958 struct ixgbe_hw
*hw
= &adapter
->hw
;
962 eicr
= IXGBE_READ_REG(hw
, IXGBE_VTEICS
);
963 IXGBE_WRITE_REG(hw
, IXGBE_VTEICR
, eicr
);
965 if (!hw
->mbx
.ops
.check_for_ack(hw
)) {
967 * checking for the ack clears the PFACK bit. Place
968 * it back in the v2p_mailbox cache so that anyone
969 * polling for an ack will not miss it. Also
970 * avoid the read below because the code to read
971 * the mailbox will also clear the ack bit. This was
972 * causing lost acks. Just cache the bit and exit
975 hw
->mbx
.v2p_mailbox
|= IXGBE_VFMAILBOX_PFACK
;
979 /* Not an ack interrupt, go ahead and read the message */
980 hw
->mbx
.ops
.read(hw
, &msg
, 1);
982 if ((msg
& IXGBE_MBVFICR_VFREQ_MASK
) == IXGBE_PF_CONTROL_MSG
)
983 mod_timer(&adapter
->watchdog_timer
,
984 round_jiffies(jiffies
+ 1));
990 static irqreturn_t
ixgbevf_msix_clean_tx(int irq
, void *data
)
992 struct ixgbevf_q_vector
*q_vector
= data
;
993 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
994 struct ixgbevf_ring
*tx_ring
;
997 if (!q_vector
->txr_count
)
1000 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
1001 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
1002 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
1003 tx_ring
->total_bytes
= 0;
1004 tx_ring
->total_packets
= 0;
1005 ixgbevf_clean_tx_irq(adapter
, tx_ring
);
1006 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
1010 if (adapter
->itr_setting
& 1)
1011 ixgbevf_set_itr_msix(q_vector
);
1017 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1019 * @data: pointer to our q_vector struct for this interrupt vector
1021 static irqreturn_t
ixgbevf_msix_clean_rx(int irq
, void *data
)
1023 struct ixgbevf_q_vector
*q_vector
= data
;
1024 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
1025 struct ixgbe_hw
*hw
= &adapter
->hw
;
1026 struct ixgbevf_ring
*rx_ring
;
1030 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1031 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
1032 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1033 rx_ring
->total_bytes
= 0;
1034 rx_ring
->total_packets
= 0;
1035 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
1039 if (!q_vector
->rxr_count
)
1042 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1043 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1044 /* disable interrupts on this vector only */
1045 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, rx_ring
->v_idx
);
1046 napi_schedule(&q_vector
->napi
);
1052 static irqreturn_t
ixgbevf_msix_clean_many(int irq
, void *data
)
1054 ixgbevf_msix_clean_rx(irq
, data
);
1055 ixgbevf_msix_clean_tx(irq
, data
);
1060 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
1063 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1065 set_bit(r_idx
, q_vector
->rxr_idx
);
1066 q_vector
->rxr_count
++;
1067 a
->rx_ring
[r_idx
].v_idx
= 1 << v_idx
;
1070 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
1073 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1075 set_bit(t_idx
, q_vector
->txr_idx
);
1076 q_vector
->txr_count
++;
1077 a
->tx_ring
[t_idx
].v_idx
= 1 << v_idx
;
1081 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1082 * @adapter: board private structure to initialize
1084 * This function maps descriptor rings to the queue-specific vectors
1085 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1086 * one vector per ring/queue, but on a constrained vector budget, we
1087 * group the rings as "efficiently" as possible. You would add new
1088 * mapping configurations in here.
1090 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
1094 int rxr_idx
= 0, txr_idx
= 0;
1095 int rxr_remaining
= adapter
->num_rx_queues
;
1096 int txr_remaining
= adapter
->num_tx_queues
;
1101 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1104 * The ideal configuration...
1105 * We have enough vectors to map one per queue.
1107 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
1108 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
1109 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
1111 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
1112 map_vector_to_txq(adapter
, v_start
, txr_idx
);
1117 * If we don't have enough vectors for a 1-to-1
1118 * mapping, we'll have to group them so there are
1119 * multiple queues per vector.
1121 /* Re-adjusting *qpv takes care of the remainder. */
1122 for (i
= v_start
; i
< q_vectors
; i
++) {
1123 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
1124 for (j
= 0; j
< rqpv
; j
++) {
1125 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1130 for (i
= v_start
; i
< q_vectors
; i
++) {
1131 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
1132 for (j
= 0; j
< tqpv
; j
++) {
1133 map_vector_to_txq(adapter
, i
, txr_idx
);
1144 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1145 * @adapter: board private structure
1147 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1148 * interrupts from the kernel.
1150 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
1152 struct net_device
*netdev
= adapter
->netdev
;
1153 irqreturn_t (*handler
)(int, void *);
1154 int i
, vector
, q_vectors
, err
;
1157 /* Decrement for Other and TCP Timer vectors */
1158 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1160 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1161 ? &ixgbevf_msix_clean_many : \
1162 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1163 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1165 for (vector
= 0; vector
< q_vectors
; vector
++) {
1166 handler
= SET_HANDLER(adapter
->q_vector
[vector
]);
1168 if (handler
== &ixgbevf_msix_clean_rx
) {
1169 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1170 netdev
->name
, "rx", ri
++);
1171 } else if (handler
== &ixgbevf_msix_clean_tx
) {
1172 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1173 netdev
->name
, "tx", ti
++);
1174 } else if (handler
== &ixgbevf_msix_clean_many
) {
1175 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1176 netdev
->name
, "TxRx", vector
);
1178 /* skip this unused q_vector */
1181 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1182 handler
, 0, adapter
->name
[vector
],
1183 adapter
->q_vector
[vector
]);
1185 hw_dbg(&adapter
->hw
,
1186 "request_irq failed for MSIX interrupt "
1187 "Error: %d\n", err
);
1188 goto free_queue_irqs
;
1192 sprintf(adapter
->name
[vector
], "%s:mbx", netdev
->name
);
1193 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1194 &ixgbevf_msix_mbx
, 0, adapter
->name
[vector
], netdev
);
1196 hw_dbg(&adapter
->hw
,
1197 "request_irq for msix_mbx failed: %d\n", err
);
1198 goto free_queue_irqs
;
1204 for (i
= vector
- 1; i
>= 0; i
--)
1205 free_irq(adapter
->msix_entries
[--vector
].vector
,
1206 &(adapter
->q_vector
[i
]));
1207 pci_disable_msix(adapter
->pdev
);
1208 kfree(adapter
->msix_entries
);
1209 adapter
->msix_entries
= NULL
;
1213 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
1215 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1217 for (i
= 0; i
< q_vectors
; i
++) {
1218 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
1219 bitmap_zero(q_vector
->rxr_idx
, MAX_RX_QUEUES
);
1220 bitmap_zero(q_vector
->txr_idx
, MAX_TX_QUEUES
);
1221 q_vector
->rxr_count
= 0;
1222 q_vector
->txr_count
= 0;
1223 q_vector
->eitr
= adapter
->eitr_param
;
1228 * ixgbevf_request_irq - initialize interrupts
1229 * @adapter: board private structure
1231 * Attempts to configure interrupts using the best available
1232 * capabilities of the hardware and kernel.
1234 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
1238 err
= ixgbevf_request_msix_irqs(adapter
);
1241 hw_dbg(&adapter
->hw
,
1242 "request_irq failed, Error %d\n", err
);
1247 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1249 struct net_device
*netdev
= adapter
->netdev
;
1252 q_vectors
= adapter
->num_msix_vectors
;
1256 free_irq(adapter
->msix_entries
[i
].vector
, netdev
);
1259 for (; i
>= 0; i
--) {
1260 free_irq(adapter
->msix_entries
[i
].vector
,
1261 adapter
->q_vector
[i
]);
1264 ixgbevf_reset_q_vectors(adapter
);
1268 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1269 * @adapter: board private structure
1271 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1274 struct ixgbe_hw
*hw
= &adapter
->hw
;
1276 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1278 IXGBE_WRITE_FLUSH(hw
);
1280 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1281 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1285 * ixgbevf_irq_enable - Enable default interrupt generation settings
1286 * @adapter: board private structure
1288 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
,
1289 bool queues
, bool flush
)
1291 struct ixgbe_hw
*hw
= &adapter
->hw
;
1295 mask
= (IXGBE_EIMS_ENABLE_MASK
& ~IXGBE_EIMS_RTX_QUEUE
);
1298 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, mask
);
1301 ixgbevf_irq_enable_queues(adapter
, qmask
);
1304 IXGBE_WRITE_FLUSH(hw
);
1308 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1309 * @adapter: board private structure
1311 * Configure the Tx unit of the MAC after a reset.
1313 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1316 struct ixgbe_hw
*hw
= &adapter
->hw
;
1317 u32 i
, j
, tdlen
, txctrl
;
1319 /* Setup the HW Tx Head and Tail descriptor pointers */
1320 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1321 struct ixgbevf_ring
*ring
= &adapter
->tx_ring
[i
];
1324 tdlen
= ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
1325 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(j
),
1326 (tdba
& DMA_BIT_MASK(32)));
1327 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(j
), (tdba
>> 32));
1328 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(j
), tdlen
);
1329 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(j
), 0);
1330 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(j
), 0);
1331 adapter
->tx_ring
[i
].head
= IXGBE_VFTDH(j
);
1332 adapter
->tx_ring
[i
].tail
= IXGBE_VFTDT(j
);
1333 /* Disable Tx Head Writeback RO bit, since this hoses
1334 * bookkeeping if things aren't delivered in order.
1336 txctrl
= IXGBE_READ_REG(hw
, IXGBE_VFDCA_TXCTRL(j
));
1337 txctrl
&= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN
;
1338 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(j
), txctrl
);
1342 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1344 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1346 struct ixgbevf_ring
*rx_ring
;
1347 struct ixgbe_hw
*hw
= &adapter
->hw
;
1350 rx_ring
= &adapter
->rx_ring
[index
];
1352 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1354 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1355 u16 bufsz
= IXGBEVF_RXBUFFER_2048
;
1356 /* grow the amount we can receive on large page machines */
1357 if (bufsz
< (PAGE_SIZE
/ 2))
1358 bufsz
= (PAGE_SIZE
/ 2);
1359 /* cap the bufsz at our largest descriptor size */
1360 bufsz
= min((u16
)IXGBEVF_MAX_RXBUFFER
, bufsz
);
1362 srrctl
|= bufsz
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1363 srrctl
|= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1364 srrctl
|= ((IXGBEVF_RX_HDR_SIZE
<<
1365 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
) &
1366 IXGBE_SRRCTL_BSIZEHDR_MASK
);
1368 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1370 if (rx_ring
->rx_buf_len
== MAXIMUM_ETHERNET_VLAN_SIZE
)
1371 srrctl
|= IXGBEVF_RXBUFFER_2048
>>
1372 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1374 srrctl
|= rx_ring
->rx_buf_len
>>
1375 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1377 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1381 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1382 * @adapter: board private structure
1384 * Configure the Rx unit of the MAC after a reset.
1386 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1389 struct ixgbe_hw
*hw
= &adapter
->hw
;
1390 struct net_device
*netdev
= adapter
->netdev
;
1391 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1396 /* Decide whether to use packet split mode or not */
1397 if (netdev
->mtu
> ETH_DATA_LEN
) {
1398 if (adapter
->flags
& IXGBE_FLAG_RX_PS_CAPABLE
)
1399 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1401 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
1403 if (adapter
->flags
& IXGBE_FLAG_RX_1BUF_CAPABLE
)
1404 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
1406 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1409 /* Set the RX buffer length according to the mode */
1410 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1411 /* PSRTYPE must be initialized in 82599 */
1412 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
|
1413 IXGBE_PSRTYPE_UDPHDR
|
1414 IXGBE_PSRTYPE_IPV4HDR
|
1415 IXGBE_PSRTYPE_IPV6HDR
|
1416 IXGBE_PSRTYPE_L2HDR
;
1417 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, psrtype
);
1418 rx_buf_len
= IXGBEVF_RX_HDR_SIZE
;
1420 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, 0);
1421 if (netdev
->mtu
<= ETH_DATA_LEN
)
1422 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1424 rx_buf_len
= ALIGN(max_frame
, 1024);
1427 rdlen
= adapter
->rx_ring
[0].count
* sizeof(union ixgbe_adv_rx_desc
);
1428 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1429 * the Base and Length of the Rx Descriptor Ring */
1430 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1431 rdba
= adapter
->rx_ring
[i
].dma
;
1432 j
= adapter
->rx_ring
[i
].reg_idx
;
1433 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(j
),
1434 (rdba
& DMA_BIT_MASK(32)));
1435 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(j
), (rdba
>> 32));
1436 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(j
), rdlen
);
1437 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(j
), 0);
1438 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(j
), 0);
1439 adapter
->rx_ring
[i
].head
= IXGBE_VFRDH(j
);
1440 adapter
->rx_ring
[i
].tail
= IXGBE_VFRDT(j
);
1441 adapter
->rx_ring
[i
].rx_buf_len
= rx_buf_len
;
1443 ixgbevf_configure_srrctl(adapter
, j
);
1447 static void ixgbevf_vlan_rx_register(struct net_device
*netdev
,
1448 struct vlan_group
*grp
)
1450 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1451 struct ixgbe_hw
*hw
= &adapter
->hw
;
1455 adapter
->vlgrp
= grp
;
1457 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1458 j
= adapter
->rx_ring
[i
].reg_idx
;
1459 ctrl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
));
1460 ctrl
|= IXGBE_RXDCTL_VME
;
1461 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(j
), ctrl
);
1465 static void ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1467 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1468 struct ixgbe_hw
*hw
= &adapter
->hw
;
1470 /* add VID to filter table */
1471 if (hw
->mac
.ops
.set_vfta
)
1472 hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1475 static void ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1477 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1478 struct ixgbe_hw
*hw
= &adapter
->hw
;
1480 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
1481 ixgbevf_irq_disable(adapter
);
1483 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
1485 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
1486 ixgbevf_irq_enable(adapter
, true, true);
1488 /* remove VID from filter table */
1489 if (hw
->mac
.ops
.set_vfta
)
1490 hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1493 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1495 ixgbevf_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
1497 if (adapter
->vlgrp
) {
1499 for (vid
= 0; vid
< VLAN_N_VID
; vid
++) {
1500 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
1502 ixgbevf_vlan_rx_add_vid(adapter
->netdev
, vid
);
1508 * ixgbevf_set_rx_mode - Multicast set
1509 * @netdev: network interface device structure
1511 * The set_rx_method entry point is called whenever the multicast address
1512 * list or the network interface flags are updated. This routine is
1513 * responsible for configuring the hardware for proper multicast mode.
1515 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1517 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1518 struct ixgbe_hw
*hw
= &adapter
->hw
;
1520 /* reprogram multicast list */
1521 if (hw
->mac
.ops
.update_mc_addr_list
)
1522 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1525 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1528 struct ixgbevf_q_vector
*q_vector
;
1529 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1531 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1532 struct napi_struct
*napi
;
1533 q_vector
= adapter
->q_vector
[q_idx
];
1534 if (!q_vector
->rxr_count
)
1536 napi
= &q_vector
->napi
;
1537 if (q_vector
->rxr_count
> 1)
1538 napi
->poll
= &ixgbevf_clean_rxonly_many
;
1544 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1547 struct ixgbevf_q_vector
*q_vector
;
1548 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1550 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1551 q_vector
= adapter
->q_vector
[q_idx
];
1552 if (!q_vector
->rxr_count
)
1554 napi_disable(&q_vector
->napi
);
1558 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1560 struct net_device
*netdev
= adapter
->netdev
;
1563 ixgbevf_set_rx_mode(netdev
);
1565 ixgbevf_restore_vlan(adapter
);
1567 ixgbevf_configure_tx(adapter
);
1568 ixgbevf_configure_rx(adapter
);
1569 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1570 struct ixgbevf_ring
*ring
= &adapter
->rx_ring
[i
];
1571 ixgbevf_alloc_rx_buffers(adapter
, ring
, ring
->count
);
1572 ring
->next_to_use
= ring
->count
- 1;
1573 writel(ring
->next_to_use
, adapter
->hw
.hw_addr
+ ring
->tail
);
1577 #define IXGBE_MAX_RX_DESC_POLL 10
1578 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1581 struct ixgbe_hw
*hw
= &adapter
->hw
;
1582 int j
= adapter
->rx_ring
[rxr
].reg_idx
;
1585 for (k
= 0; k
< IXGBE_MAX_RX_DESC_POLL
; k
++) {
1586 if (IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
)) & IXGBE_RXDCTL_ENABLE
)
1591 if (k
>= IXGBE_MAX_RX_DESC_POLL
) {
1592 hw_dbg(hw
, "RXDCTL.ENABLE on Rx queue %d "
1593 "not set within the polling period\n", rxr
);
1596 ixgbevf_release_rx_desc(&adapter
->hw
, &adapter
->rx_ring
[rxr
],
1597 (adapter
->rx_ring
[rxr
].count
- 1));
1600 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
1602 /* Only save pre-reset stats if there are some */
1603 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
1604 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
1605 adapter
->stats
.base_vfgprc
;
1606 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
1607 adapter
->stats
.base_vfgptc
;
1608 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
1609 adapter
->stats
.base_vfgorc
;
1610 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
1611 adapter
->stats
.base_vfgotc
;
1612 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
1613 adapter
->stats
.base_vfmprc
;
1617 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
1619 struct ixgbe_hw
*hw
= &adapter
->hw
;
1621 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
1622 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
1623 adapter
->stats
.last_vfgorc
|=
1624 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
1625 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
1626 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
1627 adapter
->stats
.last_vfgotc
|=
1628 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
1629 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
1631 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
1632 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
1633 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
1634 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
1635 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
1638 static int ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
1640 struct net_device
*netdev
= adapter
->netdev
;
1641 struct ixgbe_hw
*hw
= &adapter
->hw
;
1643 int num_rx_rings
= adapter
->num_rx_queues
;
1646 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1647 j
= adapter
->tx_ring
[i
].reg_idx
;
1648 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1649 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1650 txdctl
|= (8 << 16);
1651 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1654 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1655 j
= adapter
->tx_ring
[i
].reg_idx
;
1656 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1657 txdctl
|= IXGBE_TXDCTL_ENABLE
;
1658 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1661 for (i
= 0; i
< num_rx_rings
; i
++) {
1662 j
= adapter
->rx_ring
[i
].reg_idx
;
1663 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
));
1664 rxdctl
|= IXGBE_RXDCTL_ENABLE
;
1665 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(j
), rxdctl
);
1666 ixgbevf_rx_desc_queue_enable(adapter
, i
);
1669 ixgbevf_configure_msix(adapter
);
1671 if (hw
->mac
.ops
.set_rar
) {
1672 if (is_valid_ether_addr(hw
->mac
.addr
))
1673 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
1675 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
1678 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1679 ixgbevf_napi_enable_all(adapter
);
1681 /* enable transmits */
1682 netif_tx_start_all_queues(netdev
);
1684 ixgbevf_save_reset_stats(adapter
);
1685 ixgbevf_init_last_counter_stats(adapter
);
1687 /* bring the link up in the watchdog, this could race with our first
1688 * link up interrupt but shouldn't be a problem */
1689 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
1690 adapter
->link_check_timeout
= jiffies
;
1691 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1695 int ixgbevf_up(struct ixgbevf_adapter
*adapter
)
1698 struct ixgbe_hw
*hw
= &adapter
->hw
;
1700 ixgbevf_configure(adapter
);
1702 err
= ixgbevf_up_complete(adapter
);
1704 /* clear any pending interrupts, may auto mask */
1705 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
1707 ixgbevf_irq_enable(adapter
, true, true);
1713 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1714 * @adapter: board private structure
1715 * @rx_ring: ring to free buffers from
1717 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter
*adapter
,
1718 struct ixgbevf_ring
*rx_ring
)
1720 struct pci_dev
*pdev
= adapter
->pdev
;
1724 if (!rx_ring
->rx_buffer_info
)
1727 /* Free all the Rx ring sk_buffs */
1728 for (i
= 0; i
< rx_ring
->count
; i
++) {
1729 struct ixgbevf_rx_buffer
*rx_buffer_info
;
1731 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1732 if (rx_buffer_info
->dma
) {
1733 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
1734 rx_ring
->rx_buf_len
,
1736 rx_buffer_info
->dma
= 0;
1738 if (rx_buffer_info
->skb
) {
1739 struct sk_buff
*skb
= rx_buffer_info
->skb
;
1740 rx_buffer_info
->skb
= NULL
;
1742 struct sk_buff
*this = skb
;
1744 dev_kfree_skb(this);
1747 if (!rx_buffer_info
->page
)
1749 dma_unmap_page(&pdev
->dev
, rx_buffer_info
->page_dma
,
1750 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
1751 rx_buffer_info
->page_dma
= 0;
1752 put_page(rx_buffer_info
->page
);
1753 rx_buffer_info
->page
= NULL
;
1754 rx_buffer_info
->page_offset
= 0;
1757 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
1758 memset(rx_ring
->rx_buffer_info
, 0, size
);
1760 /* Zero out the descriptor ring */
1761 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1763 rx_ring
->next_to_clean
= 0;
1764 rx_ring
->next_to_use
= 0;
1767 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
1769 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
1773 * ixgbevf_clean_tx_ring - Free Tx Buffers
1774 * @adapter: board private structure
1775 * @tx_ring: ring to be cleaned
1777 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter
*adapter
,
1778 struct ixgbevf_ring
*tx_ring
)
1780 struct ixgbevf_tx_buffer
*tx_buffer_info
;
1784 if (!tx_ring
->tx_buffer_info
)
1787 /* Free all the Tx ring sk_buffs */
1789 for (i
= 0; i
< tx_ring
->count
; i
++) {
1790 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
1791 ixgbevf_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
1794 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
1795 memset(tx_ring
->tx_buffer_info
, 0, size
);
1797 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1799 tx_ring
->next_to_use
= 0;
1800 tx_ring
->next_to_clean
= 0;
1803 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1805 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1809 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1810 * @adapter: board private structure
1812 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
1816 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1817 ixgbevf_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
1821 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1822 * @adapter: board private structure
1824 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
1828 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1829 ixgbevf_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
1832 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
1834 struct net_device
*netdev
= adapter
->netdev
;
1835 struct ixgbe_hw
*hw
= &adapter
->hw
;
1839 /* signal that we are down to the interrupt handler */
1840 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1841 /* disable receives */
1843 netif_tx_disable(netdev
);
1847 netif_tx_stop_all_queues(netdev
);
1849 ixgbevf_irq_disable(adapter
);
1851 ixgbevf_napi_disable_all(adapter
);
1853 del_timer_sync(&adapter
->watchdog_timer
);
1854 /* can't call flush scheduled work here because it can deadlock
1855 * if linkwatch_event tries to acquire the rtnl_lock which we are
1857 while (adapter
->flags
& IXGBE_FLAG_IN_WATCHDOG_TASK
)
1860 /* disable transmits in the hardware now that interrupts are off */
1861 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1862 j
= adapter
->tx_ring
[i
].reg_idx
;
1863 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1864 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
),
1865 (txdctl
& ~IXGBE_TXDCTL_ENABLE
));
1868 netif_carrier_off(netdev
);
1870 if (!pci_channel_offline(adapter
->pdev
))
1871 ixgbevf_reset(adapter
);
1873 ixgbevf_clean_all_tx_rings(adapter
);
1874 ixgbevf_clean_all_rx_rings(adapter
);
1877 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
1879 struct ixgbe_hw
*hw
= &adapter
->hw
;
1881 WARN_ON(in_interrupt());
1883 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
1887 * Check if PF is up before re-init. If not then skip until
1888 * later when the PF is up and ready to service requests from
1889 * the VF via mailbox. If the VF is up and running then the
1890 * watchdog task will continue to schedule reset tasks until
1891 * the PF is up and running.
1893 if (!hw
->mac
.ops
.reset_hw(hw
)) {
1894 ixgbevf_down(adapter
);
1895 ixgbevf_up(adapter
);
1898 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
1901 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
1903 struct ixgbe_hw
*hw
= &adapter
->hw
;
1904 struct net_device
*netdev
= adapter
->netdev
;
1906 if (hw
->mac
.ops
.reset_hw(hw
))
1907 hw_dbg(hw
, "PF still resetting\n");
1909 hw
->mac
.ops
.init_hw(hw
);
1911 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1912 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1914 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1919 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
1922 int err
, vector_threshold
;
1924 /* We'll want at least 3 (vector_threshold):
1927 * 3) Other (Link Status Change, etc.)
1929 vector_threshold
= MIN_MSIX_COUNT
;
1931 /* The more we get, the more we will assign to Tx/Rx Cleanup
1932 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1933 * Right now, we simply care about how many we'll get; we'll
1934 * set them up later while requesting irq's.
1936 while (vectors
>= vector_threshold
) {
1937 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1939 if (!err
) /* Success in acquiring all requested vectors. */
1942 vectors
= 0; /* Nasty failure, quit now */
1943 else /* err == number of vectors we should try again with */
1947 if (vectors
< vector_threshold
) {
1948 /* Can't allocate enough MSI-X interrupts? Oh well.
1949 * This just means we'll go with either a single MSI
1950 * vector or fall back to legacy interrupts.
1952 hw_dbg(&adapter
->hw
,
1953 "Unable to allocate MSI-X interrupts\n");
1954 kfree(adapter
->msix_entries
);
1955 adapter
->msix_entries
= NULL
;
1958 * Adjust for only the vectors we'll use, which is minimum
1959 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1960 * vectors we were allocated.
1962 adapter
->num_msix_vectors
= vectors
;
1967 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
1968 * @adapter: board private structure to initialize
1970 * This is the top level queue allocation routine. The order here is very
1971 * important, starting with the "most" number of features turned on at once,
1972 * and ending with the smallest set of features. This way large combinations
1973 * can be allocated if they're turned on, and smaller combinations are the
1974 * fallthrough conditions.
1977 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
1979 /* Start with base case */
1980 adapter
->num_rx_queues
= 1;
1981 adapter
->num_tx_queues
= 1;
1982 adapter
->num_rx_pools
= adapter
->num_rx_queues
;
1983 adapter
->num_rx_queues_per_pool
= 1;
1987 * ixgbevf_alloc_queues - Allocate memory for all rings
1988 * @adapter: board private structure to initialize
1990 * We allocate one ring per queue at run-time since we don't know the
1991 * number of queues at compile-time. The polling_netdev array is
1992 * intended for Multiqueue, but should work fine with a single queue.
1994 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
1998 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
1999 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
2000 if (!adapter
->tx_ring
)
2001 goto err_tx_ring_allocation
;
2003 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
2004 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
2005 if (!adapter
->rx_ring
)
2006 goto err_rx_ring_allocation
;
2008 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2009 adapter
->tx_ring
[i
].count
= adapter
->tx_ring_count
;
2010 adapter
->tx_ring
[i
].queue_index
= i
;
2011 adapter
->tx_ring
[i
].reg_idx
= i
;
2014 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2015 adapter
->rx_ring
[i
].count
= adapter
->rx_ring_count
;
2016 adapter
->rx_ring
[i
].queue_index
= i
;
2017 adapter
->rx_ring
[i
].reg_idx
= i
;
2022 err_rx_ring_allocation
:
2023 kfree(adapter
->tx_ring
);
2024 err_tx_ring_allocation
:
2029 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2030 * @adapter: board private structure to initialize
2032 * Attempt to configure the interrupts using the best available
2033 * capabilities of the hardware and the kernel.
2035 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2038 int vector
, v_budget
;
2041 * It's easy to be greedy for MSI-X vectors, but it really
2042 * doesn't do us much good if we have a lot more vectors
2043 * than CPU's. So let's be conservative and only ask for
2044 * (roughly) twice the number of vectors as there are CPU's.
2046 v_budget
= min(adapter
->num_rx_queues
+ adapter
->num_tx_queues
,
2047 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS
;
2049 /* A failure in MSI-X entry allocation isn't fatal, but it does
2050 * mean we disable MSI-X capabilities of the adapter. */
2051 adapter
->msix_entries
= kcalloc(v_budget
,
2052 sizeof(struct msix_entry
), GFP_KERNEL
);
2053 if (!adapter
->msix_entries
) {
2058 for (vector
= 0; vector
< v_budget
; vector
++)
2059 adapter
->msix_entries
[vector
].entry
= vector
;
2061 ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
2068 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2069 * @adapter: board private structure to initialize
2071 * We allocate one q_vector per queue interrupt. If allocation fails we
2074 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
2076 int q_idx
, num_q_vectors
;
2077 struct ixgbevf_q_vector
*q_vector
;
2079 int (*poll
)(struct napi_struct
*, int);
2081 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2082 napi_vectors
= adapter
->num_rx_queues
;
2083 poll
= &ixgbevf_clean_rxonly
;
2085 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2086 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
2089 q_vector
->adapter
= adapter
;
2090 q_vector
->v_idx
= q_idx
;
2091 q_vector
->eitr
= adapter
->eitr_param
;
2092 if (q_idx
< napi_vectors
)
2093 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
2095 adapter
->q_vector
[q_idx
] = q_vector
;
2103 q_vector
= adapter
->q_vector
[q_idx
];
2104 netif_napi_del(&q_vector
->napi
);
2106 adapter
->q_vector
[q_idx
] = NULL
;
2112 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2113 * @adapter: board private structure to initialize
2115 * This function frees the memory allocated to the q_vectors. In addition if
2116 * NAPI is enabled it will delete any references to the NAPI struct prior
2117 * to freeing the q_vector.
2119 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
2121 int q_idx
, num_q_vectors
;
2124 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2125 napi_vectors
= adapter
->num_rx_queues
;
2127 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2128 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
2130 adapter
->q_vector
[q_idx
] = NULL
;
2131 if (q_idx
< napi_vectors
)
2132 netif_napi_del(&q_vector
->napi
);
2138 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2139 * @adapter: board private structure
2142 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2144 pci_disable_msix(adapter
->pdev
);
2145 kfree(adapter
->msix_entries
);
2146 adapter
->msix_entries
= NULL
;
2150 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2151 * @adapter: board private structure to initialize
2154 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2158 /* Number of supported queues */
2159 ixgbevf_set_num_queues(adapter
);
2161 err
= ixgbevf_set_interrupt_capability(adapter
);
2163 hw_dbg(&adapter
->hw
,
2164 "Unable to setup interrupt capabilities\n");
2165 goto err_set_interrupt
;
2168 err
= ixgbevf_alloc_q_vectors(adapter
);
2170 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue "
2172 goto err_alloc_q_vectors
;
2175 err
= ixgbevf_alloc_queues(adapter
);
2177 printk(KERN_ERR
"Unable to allocate memory for queues\n");
2178 goto err_alloc_queues
;
2181 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, "
2182 "Tx Queue count = %u\n",
2183 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2184 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2186 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2190 ixgbevf_free_q_vectors(adapter
);
2191 err_alloc_q_vectors
:
2192 ixgbevf_reset_interrupt_capability(adapter
);
2198 * ixgbevf_sw_init - Initialize general software structures
2199 * (struct ixgbevf_adapter)
2200 * @adapter: board private structure to initialize
2202 * ixgbevf_sw_init initializes the Adapter private data structure.
2203 * Fields are initialized based on PCI device information and
2204 * OS network device settings (MTU size).
2206 static int __devinit
ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2208 struct ixgbe_hw
*hw
= &adapter
->hw
;
2209 struct pci_dev
*pdev
= adapter
->pdev
;
2212 /* PCI config space info */
2214 hw
->vendor_id
= pdev
->vendor
;
2215 hw
->device_id
= pdev
->device
;
2216 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
2217 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2218 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2220 hw
->mbx
.ops
.init_params(hw
);
2221 hw
->mac
.max_tx_queues
= MAX_TX_QUEUES
;
2222 hw
->mac
.max_rx_queues
= MAX_RX_QUEUES
;
2223 err
= hw
->mac
.ops
.reset_hw(hw
);
2225 dev_info(&pdev
->dev
,
2226 "PF still in reset state, assigning new address\n");
2227 dev_hw_addr_random(adapter
->netdev
, hw
->mac
.addr
);
2229 err
= hw
->mac
.ops
.init_hw(hw
);
2231 printk(KERN_ERR
"init_shared_code failed: %d\n", err
);
2236 /* Enable dynamic interrupt throttling rates */
2237 adapter
->eitr_param
= 20000;
2238 adapter
->itr_setting
= 1;
2240 /* set defaults for eitr in MegaBytes */
2241 adapter
->eitr_low
= 10;
2242 adapter
->eitr_high
= 20;
2244 /* set default ring sizes */
2245 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2246 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2248 /* enable rx csum by default */
2249 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
2251 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2257 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2259 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2260 if (current_counter < last_counter) \
2261 counter += 0x100000000LL; \
2262 last_counter = current_counter; \
2263 counter &= 0xFFFFFFFF00000000LL; \
2264 counter |= current_counter; \
2267 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2269 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2270 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2271 u64 current_counter = (current_counter_msb << 32) | \
2272 current_counter_lsb; \
2273 if (current_counter < last_counter) \
2274 counter += 0x1000000000LL; \
2275 last_counter = current_counter; \
2276 counter &= 0xFFFFFFF000000000LL; \
2277 counter |= current_counter; \
2280 * ixgbevf_update_stats - Update the board statistics counters.
2281 * @adapter: board private structure
2283 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2285 struct ixgbe_hw
*hw
= &adapter
->hw
;
2287 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2288 adapter
->stats
.vfgprc
);
2289 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2290 adapter
->stats
.vfgptc
);
2291 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2292 adapter
->stats
.last_vfgorc
,
2293 adapter
->stats
.vfgorc
);
2294 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2295 adapter
->stats
.last_vfgotc
,
2296 adapter
->stats
.vfgotc
);
2297 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2298 adapter
->stats
.vfmprc
);
2300 /* Fill out the OS statistics structure */
2301 adapter
->netdev
->stats
.multicast
= adapter
->stats
.vfmprc
-
2302 adapter
->stats
.base_vfmprc
;
2306 * ixgbevf_watchdog - Timer Call-back
2307 * @data: pointer to adapter cast into an unsigned long
2309 static void ixgbevf_watchdog(unsigned long data
)
2311 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2312 struct ixgbe_hw
*hw
= &adapter
->hw
;
2317 * Do the watchdog outside of interrupt context due to the lovely
2318 * delays that some of the newer hardware requires
2321 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2322 goto watchdog_short_circuit
;
2324 /* get one bit for every active tx/rx interrupt vector */
2325 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2326 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2327 if (qv
->rxr_count
|| qv
->txr_count
)
2331 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, (u32
)eics
);
2333 watchdog_short_circuit
:
2334 schedule_work(&adapter
->watchdog_task
);
2338 * ixgbevf_tx_timeout - Respond to a Tx Hang
2339 * @netdev: network interface device structure
2341 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
2343 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2345 /* Do the reset outside of interrupt context */
2346 schedule_work(&adapter
->reset_task
);
2349 static void ixgbevf_reset_task(struct work_struct
*work
)
2351 struct ixgbevf_adapter
*adapter
;
2352 adapter
= container_of(work
, struct ixgbevf_adapter
, reset_task
);
2354 /* If we're already down or resetting, just bail */
2355 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2356 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2359 adapter
->tx_timeout_count
++;
2361 ixgbevf_reinit_locked(adapter
);
2365 * ixgbevf_watchdog_task - worker thread to bring link up
2366 * @work: pointer to work_struct containing our data
2368 static void ixgbevf_watchdog_task(struct work_struct
*work
)
2370 struct ixgbevf_adapter
*adapter
= container_of(work
,
2371 struct ixgbevf_adapter
,
2373 struct net_device
*netdev
= adapter
->netdev
;
2374 struct ixgbe_hw
*hw
= &adapter
->hw
;
2375 u32 link_speed
= adapter
->link_speed
;
2376 bool link_up
= adapter
->link_up
;
2378 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
2381 * Always check the link on the watchdog because we have
2384 if (hw
->mac
.ops
.check_link
) {
2385 if ((hw
->mac
.ops
.check_link(hw
, &link_speed
,
2386 &link_up
, false)) != 0) {
2387 adapter
->link_up
= link_up
;
2388 adapter
->link_speed
= link_speed
;
2389 netif_carrier_off(netdev
);
2390 netif_tx_stop_all_queues(netdev
);
2391 schedule_work(&adapter
->reset_task
);
2395 /* always assume link is up, if no check link
2397 link_speed
= IXGBE_LINK_SPEED_10GB_FULL
;
2400 adapter
->link_up
= link_up
;
2401 adapter
->link_speed
= link_speed
;
2404 if (!netif_carrier_ok(netdev
)) {
2405 hw_dbg(&adapter
->hw
, "NIC Link is Up, %u Gbps\n",
2406 (link_speed
== IXGBE_LINK_SPEED_10GB_FULL
) ?
2408 netif_carrier_on(netdev
);
2409 netif_tx_wake_all_queues(netdev
);
2411 /* Force detection of hung controller */
2412 adapter
->detect_tx_hung
= true;
2415 adapter
->link_up
= false;
2416 adapter
->link_speed
= 0;
2417 if (netif_carrier_ok(netdev
)) {
2418 hw_dbg(&adapter
->hw
, "NIC Link is Down\n");
2419 netif_carrier_off(netdev
);
2420 netif_tx_stop_all_queues(netdev
);
2424 ixgbevf_update_stats(adapter
);
2427 /* Force detection of hung controller every watchdog period */
2428 adapter
->detect_tx_hung
= true;
2430 /* Reset the timer */
2431 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2432 mod_timer(&adapter
->watchdog_timer
,
2433 round_jiffies(jiffies
+ (2 * HZ
)));
2435 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
2439 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2440 * @adapter: board private structure
2441 * @tx_ring: Tx descriptor ring for a specific queue
2443 * Free all transmit software resources
2445 void ixgbevf_free_tx_resources(struct ixgbevf_adapter
*adapter
,
2446 struct ixgbevf_ring
*tx_ring
)
2448 struct pci_dev
*pdev
= adapter
->pdev
;
2450 ixgbevf_clean_tx_ring(adapter
, tx_ring
);
2452 vfree(tx_ring
->tx_buffer_info
);
2453 tx_ring
->tx_buffer_info
= NULL
;
2455 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
2458 tx_ring
->desc
= NULL
;
2462 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2463 * @adapter: board private structure
2465 * Free all transmit software resources
2467 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2471 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2472 if (adapter
->tx_ring
[i
].desc
)
2473 ixgbevf_free_tx_resources(adapter
,
2474 &adapter
->tx_ring
[i
]);
2479 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2480 * @adapter: board private structure
2481 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2483 * Return 0 on success, negative on failure
2485 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter
*adapter
,
2486 struct ixgbevf_ring
*tx_ring
)
2488 struct pci_dev
*pdev
= adapter
->pdev
;
2491 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2492 tx_ring
->tx_buffer_info
= vzalloc(size
);
2493 if (!tx_ring
->tx_buffer_info
)
2496 /* round up to nearest 4K */
2497 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2498 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2500 tx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, tx_ring
->size
,
2501 &tx_ring
->dma
, GFP_KERNEL
);
2505 tx_ring
->next_to_use
= 0;
2506 tx_ring
->next_to_clean
= 0;
2507 tx_ring
->work_limit
= tx_ring
->count
;
2511 vfree(tx_ring
->tx_buffer_info
);
2512 tx_ring
->tx_buffer_info
= NULL
;
2513 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit "
2514 "descriptor ring\n");
2519 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2520 * @adapter: board private structure
2522 * If this function returns with an error, then it's possible one or
2523 * more of the rings is populated (while the rest are not). It is the
2524 * callers duty to clean those orphaned rings.
2526 * Return 0 on success, negative on failure
2528 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2532 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2533 err
= ixgbevf_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2536 hw_dbg(&adapter
->hw
,
2537 "Allocation for Tx Queue %u failed\n", i
);
2545 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2546 * @adapter: board private structure
2547 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2549 * Returns 0 on success, negative on failure
2551 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter
*adapter
,
2552 struct ixgbevf_ring
*rx_ring
)
2554 struct pci_dev
*pdev
= adapter
->pdev
;
2557 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2558 rx_ring
->rx_buffer_info
= vzalloc(size
);
2559 if (!rx_ring
->rx_buffer_info
) {
2560 hw_dbg(&adapter
->hw
,
2561 "Unable to vmalloc buffer memory for "
2562 "the receive descriptor ring\n");
2566 /* Round up to nearest 4K */
2567 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2568 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2570 rx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, rx_ring
->size
,
2571 &rx_ring
->dma
, GFP_KERNEL
);
2573 if (!rx_ring
->desc
) {
2574 hw_dbg(&adapter
->hw
,
2575 "Unable to allocate memory for "
2576 "the receive descriptor ring\n");
2577 vfree(rx_ring
->rx_buffer_info
);
2578 rx_ring
->rx_buffer_info
= NULL
;
2582 rx_ring
->next_to_clean
= 0;
2583 rx_ring
->next_to_use
= 0;
2591 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2592 * @adapter: board private structure
2594 * If this function returns with an error, then it's possible one or
2595 * more of the rings is populated (while the rest are not). It is the
2596 * callers duty to clean those orphaned rings.
2598 * Return 0 on success, negative on failure
2600 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2604 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2605 err
= ixgbevf_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2608 hw_dbg(&adapter
->hw
,
2609 "Allocation for Rx Queue %u failed\n", i
);
2616 * ixgbevf_free_rx_resources - Free Rx Resources
2617 * @adapter: board private structure
2618 * @rx_ring: ring to clean the resources from
2620 * Free all receive software resources
2622 void ixgbevf_free_rx_resources(struct ixgbevf_adapter
*adapter
,
2623 struct ixgbevf_ring
*rx_ring
)
2625 struct pci_dev
*pdev
= adapter
->pdev
;
2627 ixgbevf_clean_rx_ring(adapter
, rx_ring
);
2629 vfree(rx_ring
->rx_buffer_info
);
2630 rx_ring
->rx_buffer_info
= NULL
;
2632 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
2635 rx_ring
->desc
= NULL
;
2639 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2640 * @adapter: board private structure
2642 * Free all receive software resources
2644 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2648 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2649 if (adapter
->rx_ring
[i
].desc
)
2650 ixgbevf_free_rx_resources(adapter
,
2651 &adapter
->rx_ring
[i
]);
2655 * ixgbevf_open - Called when a network interface is made active
2656 * @netdev: network interface device structure
2658 * Returns 0 on success, negative value on failure
2660 * The open entry point is called when a network interface is made
2661 * active by the system (IFF_UP). At this point all resources needed
2662 * for transmit and receive operations are allocated, the interrupt
2663 * handler is registered with the OS, the watchdog timer is started,
2664 * and the stack is notified that the interface is ready.
2666 static int ixgbevf_open(struct net_device
*netdev
)
2668 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2669 struct ixgbe_hw
*hw
= &adapter
->hw
;
2672 /* disallow open during test */
2673 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
2676 if (hw
->adapter_stopped
) {
2677 ixgbevf_reset(adapter
);
2678 /* if adapter is still stopped then PF isn't up and
2679 * the vf can't start. */
2680 if (hw
->adapter_stopped
) {
2681 err
= IXGBE_ERR_MBX
;
2682 printk(KERN_ERR
"Unable to start - perhaps the PF"
2683 " Driver isn't up yet\n");
2684 goto err_setup_reset
;
2688 /* allocate transmit descriptors */
2689 err
= ixgbevf_setup_all_tx_resources(adapter
);
2693 /* allocate receive descriptors */
2694 err
= ixgbevf_setup_all_rx_resources(adapter
);
2698 ixgbevf_configure(adapter
);
2701 * Map the Tx/Rx rings to the vectors we were allotted.
2702 * if request_irq will be called in this function map_rings
2703 * must be called *before* up_complete
2705 ixgbevf_map_rings_to_vectors(adapter
);
2707 err
= ixgbevf_up_complete(adapter
);
2711 /* clear any pending interrupts, may auto mask */
2712 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2713 err
= ixgbevf_request_irq(adapter
);
2717 ixgbevf_irq_enable(adapter
, true, true);
2722 ixgbevf_down(adapter
);
2724 ixgbevf_free_irq(adapter
);
2726 ixgbevf_free_all_rx_resources(adapter
);
2728 ixgbevf_free_all_tx_resources(adapter
);
2729 ixgbevf_reset(adapter
);
2737 * ixgbevf_close - Disables a network interface
2738 * @netdev: network interface device structure
2740 * Returns 0, this is not allowed to fail
2742 * The close entry point is called when an interface is de-activated
2743 * by the OS. The hardware is still under the drivers control, but
2744 * needs to be disabled. A global MAC reset is issued to stop the
2745 * hardware, and all transmit and receive resources are freed.
2747 static int ixgbevf_close(struct net_device
*netdev
)
2749 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2751 ixgbevf_down(adapter
);
2752 ixgbevf_free_irq(adapter
);
2754 ixgbevf_free_all_tx_resources(adapter
);
2755 ixgbevf_free_all_rx_resources(adapter
);
2760 static int ixgbevf_tso(struct ixgbevf_adapter
*adapter
,
2761 struct ixgbevf_ring
*tx_ring
,
2762 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
2764 struct ixgbe_adv_tx_context_desc
*context_desc
;
2767 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2768 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
;
2769 u32 mss_l4len_idx
, l4len
;
2771 if (skb_is_gso(skb
)) {
2772 if (skb_header_cloned(skb
)) {
2773 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2777 l4len
= tcp_hdrlen(skb
);
2780 if (skb
->protocol
== htons(ETH_P_IP
)) {
2781 struct iphdr
*iph
= ip_hdr(skb
);
2784 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2788 adapter
->hw_tso_ctxt
++;
2789 } else if (skb_is_gso_v6(skb
)) {
2790 ipv6_hdr(skb
)->payload_len
= 0;
2791 tcp_hdr(skb
)->check
=
2792 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2793 &ipv6_hdr(skb
)->daddr
,
2795 adapter
->hw_tso6_ctxt
++;
2798 i
= tx_ring
->next_to_use
;
2800 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2801 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2803 /* VLAN MACLEN IPLEN */
2804 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2806 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
2807 vlan_macip_lens
|= ((skb_network_offset(skb
)) <<
2808 IXGBE_ADVTXD_MACLEN_SHIFT
);
2809 *hdr_len
+= skb_network_offset(skb
);
2811 (skb_transport_header(skb
) - skb_network_header(skb
));
2813 (skb_transport_header(skb
) - skb_network_header(skb
));
2814 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2815 context_desc
->seqnum_seed
= 0;
2817 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2818 type_tucmd_mlhl
= (IXGBE_TXD_CMD_DEXT
|
2819 IXGBE_ADVTXD_DTYP_CTXT
);
2821 if (skb
->protocol
== htons(ETH_P_IP
))
2822 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2823 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2824 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2828 (skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
);
2829 mss_l4len_idx
|= (l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
);
2830 /* use index 1 for TSO */
2831 mss_l4len_idx
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
2832 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
2834 tx_buffer_info
->time_stamp
= jiffies
;
2835 tx_buffer_info
->next_to_watch
= i
;
2838 if (i
== tx_ring
->count
)
2840 tx_ring
->next_to_use
= i
;
2848 static bool ixgbevf_tx_csum(struct ixgbevf_adapter
*adapter
,
2849 struct ixgbevf_ring
*tx_ring
,
2850 struct sk_buff
*skb
, u32 tx_flags
)
2852 struct ixgbe_adv_tx_context_desc
*context_desc
;
2854 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2855 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
= 0;
2857 if (skb
->ip_summed
== CHECKSUM_PARTIAL
||
2858 (tx_flags
& IXGBE_TX_FLAGS_VLAN
)) {
2859 i
= tx_ring
->next_to_use
;
2860 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2861 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2863 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2864 vlan_macip_lens
|= (tx_flags
&
2865 IXGBE_TX_FLAGS_VLAN_MASK
);
2866 vlan_macip_lens
|= (skb_network_offset(skb
) <<
2867 IXGBE_ADVTXD_MACLEN_SHIFT
);
2868 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2869 vlan_macip_lens
|= (skb_transport_header(skb
) -
2870 skb_network_header(skb
));
2872 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2873 context_desc
->seqnum_seed
= 0;
2875 type_tucmd_mlhl
|= (IXGBE_TXD_CMD_DEXT
|
2876 IXGBE_ADVTXD_DTYP_CTXT
);
2878 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2879 switch (skb
->protocol
) {
2880 case __constant_htons(ETH_P_IP
):
2881 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2882 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2884 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2886 case __constant_htons(ETH_P_IPV6
):
2887 /* XXX what about other V6 headers?? */
2888 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2890 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2893 if (unlikely(net_ratelimit())) {
2895 "partial checksum but "
2903 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2904 /* use index zero for tx checksum offload */
2905 context_desc
->mss_l4len_idx
= 0;
2907 tx_buffer_info
->time_stamp
= jiffies
;
2908 tx_buffer_info
->next_to_watch
= i
;
2910 adapter
->hw_csum_tx_good
++;
2912 if (i
== tx_ring
->count
)
2914 tx_ring
->next_to_use
= i
;
2922 static int ixgbevf_tx_map(struct ixgbevf_adapter
*adapter
,
2923 struct ixgbevf_ring
*tx_ring
,
2924 struct sk_buff
*skb
, u32 tx_flags
,
2927 struct pci_dev
*pdev
= adapter
->pdev
;
2928 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2930 unsigned int total
= skb
->len
;
2931 unsigned int offset
= 0, size
;
2933 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2937 i
= tx_ring
->next_to_use
;
2939 len
= min(skb_headlen(skb
), total
);
2941 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2942 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2944 tx_buffer_info
->length
= size
;
2945 tx_buffer_info
->mapped_as_page
= false;
2946 tx_buffer_info
->dma
= dma_map_single(&adapter
->pdev
->dev
,
2948 size
, DMA_TO_DEVICE
);
2949 if (dma_mapping_error(&pdev
->dev
, tx_buffer_info
->dma
))
2951 tx_buffer_info
->time_stamp
= jiffies
;
2952 tx_buffer_info
->next_to_watch
= i
;
2959 if (i
== tx_ring
->count
)
2963 for (f
= 0; f
< nr_frags
; f
++) {
2964 struct skb_frag_struct
*frag
;
2966 frag
= &skb_shinfo(skb
)->frags
[f
];
2967 len
= min((unsigned int)frag
->size
, total
);
2968 offset
= frag
->page_offset
;
2971 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2972 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2974 tx_buffer_info
->length
= size
;
2975 tx_buffer_info
->dma
= dma_map_page(&adapter
->pdev
->dev
,
2980 tx_buffer_info
->mapped_as_page
= true;
2981 if (dma_mapping_error(&pdev
->dev
, tx_buffer_info
->dma
))
2983 tx_buffer_info
->time_stamp
= jiffies
;
2984 tx_buffer_info
->next_to_watch
= i
;
2991 if (i
== tx_ring
->count
)
2999 i
= tx_ring
->count
- 1;
3002 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
3003 tx_ring
->tx_buffer_info
[first
].next_to_watch
= i
;
3008 dev_err(&pdev
->dev
, "TX DMA map failed\n");
3010 /* clear timestamp and dma mappings for failed tx_buffer_info map */
3011 tx_buffer_info
->dma
= 0;
3012 tx_buffer_info
->time_stamp
= 0;
3013 tx_buffer_info
->next_to_watch
= 0;
3016 /* clear timestamp and dma mappings for remaining portion of packet */
3017 while (count
>= 0) {
3021 i
+= tx_ring
->count
;
3022 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3023 ixgbevf_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
3029 static void ixgbevf_tx_queue(struct ixgbevf_adapter
*adapter
,
3030 struct ixgbevf_ring
*tx_ring
, int tx_flags
,
3031 int count
, u32 paylen
, u8 hdr_len
)
3033 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
3034 struct ixgbevf_tx_buffer
*tx_buffer_info
;
3035 u32 olinfo_status
= 0, cmd_type_len
= 0;
3038 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
3040 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
3042 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
3044 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3045 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
3047 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
3048 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
3050 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3051 IXGBE_ADVTXD_POPTS_SHIFT
;
3053 /* use index 1 context for tso */
3054 olinfo_status
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
3055 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3056 olinfo_status
|= IXGBE_TXD_POPTS_IXSM
<<
3057 IXGBE_ADVTXD_POPTS_SHIFT
;
3059 } else if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3060 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3061 IXGBE_ADVTXD_POPTS_SHIFT
;
3063 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
3065 i
= tx_ring
->next_to_use
;
3067 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3068 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
3069 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
3070 tx_desc
->read
.cmd_type_len
=
3071 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
3072 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3074 if (i
== tx_ring
->count
)
3078 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
3081 * Force memory writes to complete before letting h/w
3082 * know there are new descriptors to fetch. (Only
3083 * applicable for weak-ordered memory model archs,
3088 tx_ring
->next_to_use
= i
;
3089 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3092 static int __ixgbevf_maybe_stop_tx(struct net_device
*netdev
,
3093 struct ixgbevf_ring
*tx_ring
, int size
)
3095 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3097 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3098 /* Herbert's original patch had:
3099 * smp_mb__after_netif_stop_queue();
3100 * but since that doesn't exist yet, just open code it. */
3103 /* We need to check again in a case another CPU has just
3104 * made room available. */
3105 if (likely(IXGBE_DESC_UNUSED(tx_ring
) < size
))
3108 /* A reprieve! - use start_queue because it doesn't call schedule */
3109 netif_start_subqueue(netdev
, tx_ring
->queue_index
);
3110 ++adapter
->restart_queue
;
3114 static int ixgbevf_maybe_stop_tx(struct net_device
*netdev
,
3115 struct ixgbevf_ring
*tx_ring
, int size
)
3117 if (likely(IXGBE_DESC_UNUSED(tx_ring
) >= size
))
3119 return __ixgbevf_maybe_stop_tx(netdev
, tx_ring
, size
);
3122 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3124 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3125 struct ixgbevf_ring
*tx_ring
;
3127 unsigned int tx_flags
= 0;
3134 tx_ring
= &adapter
->tx_ring
[r_idx
];
3136 if (vlan_tx_tag_present(skb
)) {
3137 tx_flags
|= vlan_tx_tag_get(skb
);
3138 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3139 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3142 /* four things can cause us to need a context descriptor */
3143 if (skb_is_gso(skb
) ||
3144 (skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3145 (tx_flags
& IXGBE_TX_FLAGS_VLAN
))
3148 count
+= TXD_USE_COUNT(skb_headlen(skb
));
3149 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3150 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3152 if (ixgbevf_maybe_stop_tx(netdev
, tx_ring
, count
)) {
3154 return NETDEV_TX_BUSY
;
3157 first
= tx_ring
->next_to_use
;
3159 if (skb
->protocol
== htons(ETH_P_IP
))
3160 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
3161 tso
= ixgbevf_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
);
3163 dev_kfree_skb_any(skb
);
3164 return NETDEV_TX_OK
;
3168 tx_flags
|= IXGBE_TX_FLAGS_TSO
;
3169 else if (ixgbevf_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
3170 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3171 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3173 ixgbevf_tx_queue(adapter
, tx_ring
, tx_flags
,
3174 ixgbevf_tx_map(adapter
, tx_ring
, skb
, tx_flags
, first
),
3177 ixgbevf_maybe_stop_tx(netdev
, tx_ring
, DESC_NEEDED
);
3179 return NETDEV_TX_OK
;
3183 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3184 * @netdev: network interface device structure
3185 * @p: pointer to an address structure
3187 * Returns 0 on success, negative on failure
3189 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3191 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3192 struct ixgbe_hw
*hw
= &adapter
->hw
;
3193 struct sockaddr
*addr
= p
;
3195 if (!is_valid_ether_addr(addr
->sa_data
))
3196 return -EADDRNOTAVAIL
;
3198 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3199 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3201 if (hw
->mac
.ops
.set_rar
)
3202 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
3208 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3209 * @netdev: network interface device structure
3210 * @new_mtu: new value for maximum frame size
3212 * Returns 0 on success, negative on failure
3214 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3216 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3217 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3219 /* MTU < 68 is an error and causes problems on some kernels */
3220 if ((new_mtu
< 68) || (max_frame
> MAXIMUM_ETHERNET_VLAN_SIZE
))
3223 hw_dbg(&adapter
->hw
, "changing MTU from %d to %d\n",
3224 netdev
->mtu
, new_mtu
);
3225 /* must set new MTU before calling down or up */
3226 netdev
->mtu
= new_mtu
;
3228 if (netif_running(netdev
))
3229 ixgbevf_reinit_locked(adapter
);
3234 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3236 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3237 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3239 netif_device_detach(netdev
);
3241 if (netif_running(netdev
)) {
3242 ixgbevf_down(adapter
);
3243 ixgbevf_free_irq(adapter
);
3244 ixgbevf_free_all_tx_resources(adapter
);
3245 ixgbevf_free_all_rx_resources(adapter
);
3249 pci_save_state(pdev
);
3252 pci_disable_device(pdev
);
3255 static const struct net_device_ops ixgbe_netdev_ops
= {
3256 .ndo_open
= &ixgbevf_open
,
3257 .ndo_stop
= &ixgbevf_close
,
3258 .ndo_start_xmit
= &ixgbevf_xmit_frame
,
3259 .ndo_set_rx_mode
= &ixgbevf_set_rx_mode
,
3260 .ndo_set_multicast_list
= &ixgbevf_set_rx_mode
,
3261 .ndo_validate_addr
= eth_validate_addr
,
3262 .ndo_set_mac_address
= &ixgbevf_set_mac
,
3263 .ndo_change_mtu
= &ixgbevf_change_mtu
,
3264 .ndo_tx_timeout
= &ixgbevf_tx_timeout
,
3265 .ndo_vlan_rx_register
= &ixgbevf_vlan_rx_register
,
3266 .ndo_vlan_rx_add_vid
= &ixgbevf_vlan_rx_add_vid
,
3267 .ndo_vlan_rx_kill_vid
= &ixgbevf_vlan_rx_kill_vid
,
3270 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3272 struct ixgbevf_adapter
*adapter
;
3273 adapter
= netdev_priv(dev
);
3274 dev
->netdev_ops
= &ixgbe_netdev_ops
;
3275 ixgbevf_set_ethtool_ops(dev
);
3276 dev
->watchdog_timeo
= 5 * HZ
;
3280 * ixgbevf_probe - Device Initialization Routine
3281 * @pdev: PCI device information struct
3282 * @ent: entry in ixgbevf_pci_tbl
3284 * Returns 0 on success, negative on failure
3286 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3287 * The OS initialization, configuring of the adapter private structure,
3288 * and a hardware reset occur.
3290 static int __devinit
ixgbevf_probe(struct pci_dev
*pdev
,
3291 const struct pci_device_id
*ent
)
3293 struct net_device
*netdev
;
3294 struct ixgbevf_adapter
*adapter
= NULL
;
3295 struct ixgbe_hw
*hw
= NULL
;
3296 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3297 static int cards_found
;
3298 int err
, pci_using_dac
;
3300 err
= pci_enable_device(pdev
);
3304 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3305 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3308 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3310 err
= dma_set_coherent_mask(&pdev
->dev
,
3313 dev_err(&pdev
->dev
, "No usable DMA "
3314 "configuration, aborting\n");
3321 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
3323 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3327 pci_set_master(pdev
);
3330 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
3333 netdev
= alloc_etherdev(sizeof(struct ixgbevf_adapter
));
3337 goto err_alloc_etherdev
;
3340 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3342 pci_set_drvdata(pdev
, netdev
);
3343 adapter
= netdev_priv(netdev
);
3345 adapter
->netdev
= netdev
;
3346 adapter
->pdev
= pdev
;
3349 adapter
->msg_enable
= (1 << DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
3352 * call save state here in standalone driver because it relies on
3353 * adapter struct to exist, and needs to call netdev_priv
3355 pci_save_state(pdev
);
3357 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3358 pci_resource_len(pdev
, 0));
3364 ixgbevf_assign_netdev_ops(netdev
);
3366 adapter
->bd_number
= cards_found
;
3369 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3370 hw
->mac
.type
= ii
->mac
;
3372 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
3373 sizeof(struct ixgbe_mac_operations
));
3375 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_CAPABLE
;
3376 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
3377 adapter
->flags
|= IXGBE_FLAG_RX_1BUF_CAPABLE
;
3379 /* setup the private structure */
3380 err
= ixgbevf_sw_init(adapter
);
3382 netdev
->features
= NETIF_F_SG
|
3384 NETIF_F_HW_VLAN_TX
|
3385 NETIF_F_HW_VLAN_RX
|
3386 NETIF_F_HW_VLAN_FILTER
;
3388 netdev
->features
|= NETIF_F_IPV6_CSUM
;
3389 netdev
->features
|= NETIF_F_TSO
;
3390 netdev
->features
|= NETIF_F_TSO6
;
3391 netdev
->features
|= NETIF_F_GRO
;
3392 netdev
->vlan_features
|= NETIF_F_TSO
;
3393 netdev
->vlan_features
|= NETIF_F_TSO6
;
3394 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3395 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3396 netdev
->vlan_features
|= NETIF_F_SG
;
3399 netdev
->features
|= NETIF_F_HIGHDMA
;
3401 /* The HW MAC address was set and/or determined in sw_init */
3402 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
3403 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
3405 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3406 printk(KERN_ERR
"invalid MAC address\n");
3411 init_timer(&adapter
->watchdog_timer
);
3412 adapter
->watchdog_timer
.function
= ixgbevf_watchdog
;
3413 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3415 INIT_WORK(&adapter
->reset_task
, ixgbevf_reset_task
);
3416 INIT_WORK(&adapter
->watchdog_task
, ixgbevf_watchdog_task
);
3418 err
= ixgbevf_init_interrupt_scheme(adapter
);
3422 /* pick up the PCI bus settings for reporting later */
3423 if (hw
->mac
.ops
.get_bus_info
)
3424 hw
->mac
.ops
.get_bus_info(hw
);
3426 strcpy(netdev
->name
, "eth%d");
3428 err
= register_netdev(netdev
);
3432 adapter
->netdev_registered
= true;
3434 netif_carrier_off(netdev
);
3436 ixgbevf_init_last_counter_stats(adapter
);
3438 /* print the MAC address */
3439 hw_dbg(hw
, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3440 netdev
->dev_addr
[0],
3441 netdev
->dev_addr
[1],
3442 netdev
->dev_addr
[2],
3443 netdev
->dev_addr
[3],
3444 netdev
->dev_addr
[4],
3445 netdev
->dev_addr
[5]);
3447 hw_dbg(hw
, "MAC: %d\n", hw
->mac
.type
);
3449 hw_dbg(hw
, "LRO is disabled\n");
3451 hw_dbg(hw
, "Intel(R) 82599 Virtual Function\n");
3457 ixgbevf_reset_interrupt_capability(adapter
);
3458 iounmap(hw
->hw_addr
);
3460 free_netdev(netdev
);
3462 pci_release_regions(pdev
);
3465 pci_disable_device(pdev
);
3470 * ixgbevf_remove - Device Removal Routine
3471 * @pdev: PCI device information struct
3473 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3474 * that it should release a PCI device. The could be caused by a
3475 * Hot-Plug event, or because the driver is going to be removed from
3478 static void __devexit
ixgbevf_remove(struct pci_dev
*pdev
)
3480 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3481 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3483 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
3485 del_timer_sync(&adapter
->watchdog_timer
);
3487 cancel_work_sync(&adapter
->watchdog_task
);
3489 flush_scheduled_work();
3491 if (adapter
->netdev_registered
) {
3492 unregister_netdev(netdev
);
3493 adapter
->netdev_registered
= false;
3496 ixgbevf_reset_interrupt_capability(adapter
);
3498 iounmap(adapter
->hw
.hw_addr
);
3499 pci_release_regions(pdev
);
3501 hw_dbg(&adapter
->hw
, "Remove complete\n");
3503 kfree(adapter
->tx_ring
);
3504 kfree(adapter
->rx_ring
);
3506 free_netdev(netdev
);
3508 pci_disable_device(pdev
);
3511 static struct pci_driver ixgbevf_driver
= {
3512 .name
= ixgbevf_driver_name
,
3513 .id_table
= ixgbevf_pci_tbl
,
3514 .probe
= ixgbevf_probe
,
3515 .remove
= __devexit_p(ixgbevf_remove
),
3516 .shutdown
= ixgbevf_shutdown
,
3520 * ixgbe_init_module - Driver Registration Routine
3522 * ixgbe_init_module is the first routine called when the driver is
3523 * loaded. All it does is register with the PCI subsystem.
3525 static int __init
ixgbevf_init_module(void)
3528 printk(KERN_INFO
"ixgbevf: %s - version %s\n", ixgbevf_driver_string
,
3529 ixgbevf_driver_version
);
3531 printk(KERN_INFO
"%s\n", ixgbevf_copyright
);
3533 ret
= pci_register_driver(&ixgbevf_driver
);
3537 module_init(ixgbevf_init_module
);
3540 * ixgbe_exit_module - Driver Exit Cleanup Routine
3542 * ixgbe_exit_module is called just before the driver is removed
3545 static void __exit
ixgbevf_exit_module(void)
3547 pci_unregister_driver(&ixgbevf_driver
);
3552 * ixgbe_get_hw_dev_name - return device name string
3553 * used by hardware layer to print debugging information
3555 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
3557 struct ixgbevf_adapter
*adapter
= hw
->back
;
3558 return adapter
->netdev
->name
;
3562 module_exit(ixgbevf_exit_module
);
3564 /* ixgbevf_main.c */