1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 const char ixgbevf_driver_name
[] = "ixgbevf";
58 static const char ixgbevf_driver_string
[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.12.1-k"
62 const char ixgbevf_driver_version
[] = DRV_VERSION
;
63 static char ixgbevf_copyright
[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
66 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
67 [board_82599_vf
] = &ixgbevf_82599_vf_info
,
68 [board_X540_vf
] = &ixgbevf_X540_vf_info
,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl
) = {
80 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
), board_82599_vf
},
81 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF
), board_X540_vf
},
82 /* required last entry */
85 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION
);
92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93 static int debug
= -1;
94 module_param(debug
, int, 0);
95 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
98 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
);
99 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
);
100 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
);
102 static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring
*rx_ring
,
105 rx_ring
->next_to_use
= val
;
108 * Force memory writes to complete before letting h/w
109 * know there are new descriptors to fetch. (Only
110 * applicable for weak-ordered memory model archs,
114 writel(val
, rx_ring
->tail
);
118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
119 * @adapter: pointer to adapter struct
120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
121 * @queue: queue to map the corresponding interrupt to
122 * @msix_vector: the vector to map to the corresponding queue
124 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
125 u8 queue
, u8 msix_vector
)
128 struct ixgbe_hw
*hw
= &adapter
->hw
;
129 if (direction
== -1) {
131 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
132 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
135 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
137 /* tx or rx causes */
138 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
139 index
= ((16 * (queue
& 1)) + (8 * direction
));
140 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
141 ivar
&= ~(0xFF << index
);
142 ivar
|= (msix_vector
<< index
);
143 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
147 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring
*tx_ring
,
148 struct ixgbevf_tx_buffer
151 if (tx_buffer_info
->dma
) {
152 if (tx_buffer_info
->mapped_as_page
)
153 dma_unmap_page(tx_ring
->dev
,
155 tx_buffer_info
->length
,
158 dma_unmap_single(tx_ring
->dev
,
160 tx_buffer_info
->length
,
162 tx_buffer_info
->dma
= 0;
164 if (tx_buffer_info
->skb
) {
165 dev_kfree_skb_any(tx_buffer_info
->skb
);
166 tx_buffer_info
->skb
= NULL
;
168 tx_buffer_info
->time_stamp
= 0;
169 /* tx_buffer_info must be completely set up in the transmit path */
172 #define IXGBE_MAX_TXD_PWR 14
173 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
175 /* Tx Descriptors needed, worst case */
176 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
177 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
179 static void ixgbevf_tx_timeout(struct net_device
*netdev
);
182 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
183 * @q_vector: board private structure
184 * @tx_ring: tx ring to clean
186 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector
*q_vector
,
187 struct ixgbevf_ring
*tx_ring
)
189 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
190 union ixgbe_adv_tx_desc
*tx_desc
, *eop_desc
;
191 struct ixgbevf_tx_buffer
*tx_buffer_info
;
192 unsigned int i
, count
= 0;
193 unsigned int total_bytes
= 0, total_packets
= 0;
195 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
198 i
= tx_ring
->next_to_clean
;
199 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
200 eop_desc
= tx_buffer_info
->next_to_watch
;
203 bool cleaned
= false;
205 /* if next_to_watch is not set then there is no work pending */
209 /* prevent any other reads prior to eop_desc */
210 read_barrier_depends();
212 /* if DD is not set pending work has not been completed */
213 if (!(eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
216 /* clear next_to_watch to prevent false hangs */
217 tx_buffer_info
->next_to_watch
= NULL
;
219 for ( ; !cleaned
; count
++) {
221 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
222 cleaned
= (tx_desc
== eop_desc
);
223 skb
= tx_buffer_info
->skb
;
225 if (cleaned
&& skb
) {
226 unsigned int segs
, bytecount
;
228 /* gso_segs is currently only valid for tcp */
229 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
230 /* multiply data chunks by size of headers */
231 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
233 total_packets
+= segs
;
234 total_bytes
+= bytecount
;
237 ixgbevf_unmap_and_free_tx_resource(tx_ring
,
240 tx_desc
->wb
.status
= 0;
243 if (i
== tx_ring
->count
)
246 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
249 eop_desc
= tx_buffer_info
->next_to_watch
;
250 } while (count
< tx_ring
->count
);
252 tx_ring
->next_to_clean
= i
;
254 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
255 if (unlikely(count
&& netif_carrier_ok(tx_ring
->netdev
) &&
256 (ixgbevf_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
257 /* Make sure that anybody stopping the queue after this
258 * sees the new next_to_clean.
261 if (__netif_subqueue_stopped(tx_ring
->netdev
,
262 tx_ring
->queue_index
) &&
263 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
264 netif_wake_subqueue(tx_ring
->netdev
,
265 tx_ring
->queue_index
);
266 ++adapter
->restart_queue
;
270 u64_stats_update_begin(&tx_ring
->syncp
);
271 tx_ring
->stats
.bytes
+= total_bytes
;
272 tx_ring
->stats
.packets
+= total_packets
;
273 u64_stats_update_end(&tx_ring
->syncp
);
274 q_vector
->tx
.total_bytes
+= total_bytes
;
275 q_vector
->tx
.total_packets
+= total_packets
;
277 return count
< tx_ring
->count
;
281 * ixgbevf_receive_skb - Send a completed packet up the stack
282 * @q_vector: structure containing interrupt and ring information
283 * @skb: packet to send up
284 * @status: hardware indication of status of receive
285 * @rx_desc: rx descriptor
287 static void ixgbevf_receive_skb(struct ixgbevf_q_vector
*q_vector
,
288 struct sk_buff
*skb
, u8 status
,
289 union ixgbe_adv_rx_desc
*rx_desc
)
291 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
292 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
293 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
295 if (is_vlan
&& test_bit(tag
& VLAN_VID_MASK
, adapter
->active_vlans
))
296 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), tag
);
298 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
))
299 napi_gro_receive(&q_vector
->napi
, skb
);
305 * ixgbevf_rx_skb - Helper function to determine proper Rx method
306 * @q_vector: structure containing interrupt and ring information
307 * @skb: packet to send up
308 * @status: hardware indication of status of receive
309 * @rx_desc: rx descriptor
311 static void ixgbevf_rx_skb(struct ixgbevf_q_vector
*q_vector
,
312 struct sk_buff
*skb
, u8 status
,
313 union ixgbe_adv_rx_desc
*rx_desc
)
315 #ifdef CONFIG_NET_RX_BUSY_POLL
316 skb_mark_napi_id(skb
, &q_vector
->napi
);
318 if (ixgbevf_qv_busy_polling(q_vector
)) {
319 netif_receive_skb(skb
);
320 /* exit early if we busy polled */
323 #endif /* CONFIG_NET_RX_BUSY_POLL */
325 ixgbevf_receive_skb(q_vector
, skb
, status
, rx_desc
);
329 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
330 * @ring: pointer to Rx descriptor ring structure
331 * @status_err: hardware indication of status of receive
332 * @skb: skb currently being received and modified
334 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring
*ring
,
335 u32 status_err
, struct sk_buff
*skb
)
337 skb_checksum_none_assert(skb
);
339 /* Rx csum disabled */
340 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
343 /* if IP and error */
344 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
345 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
346 ring
->rx_stats
.csum_err
++;
350 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
353 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
354 ring
->rx_stats
.csum_err
++;
358 /* It must be a TCP or UDP packet with a valid checksum */
359 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
360 ring
->hw_csum_rx_good
++;
364 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
365 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
367 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring
*rx_ring
,
370 union ixgbe_adv_rx_desc
*rx_desc
;
371 struct ixgbevf_rx_buffer
*bi
;
372 unsigned int i
= rx_ring
->next_to_use
;
374 while (cleaned_count
--) {
375 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
376 bi
= &rx_ring
->rx_buffer_info
[i
];
381 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
382 rx_ring
->rx_buf_len
);
388 bi
->dma
= dma_map_single(rx_ring
->dev
, skb
->data
,
391 if (dma_mapping_error(rx_ring
->dev
, bi
->dma
)) {
394 dev_err(rx_ring
->dev
, "Rx DMA map failed\n");
398 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
401 if (i
== rx_ring
->count
)
406 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
407 if (rx_ring
->next_to_use
!= i
)
408 ixgbevf_release_rx_desc(rx_ring
, i
);
411 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
414 struct ixgbe_hw
*hw
= &adapter
->hw
;
416 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, qmask
);
419 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
420 struct ixgbevf_ring
*rx_ring
,
423 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
424 struct ixgbevf_rx_buffer
*rx_buffer_info
, *next_buffer
;
428 int cleaned_count
= 0;
429 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
431 i
= rx_ring
->next_to_clean
;
432 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
433 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
434 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
436 while (staterr
& IXGBE_RXD_STAT_DD
) {
441 rmb(); /* read descriptor and rx_buffer_info after status DD */
442 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
443 skb
= rx_buffer_info
->skb
;
444 prefetch(skb
->data
- NET_IP_ALIGN
);
445 rx_buffer_info
->skb
= NULL
;
447 if (rx_buffer_info
->dma
) {
448 dma_unmap_single(rx_ring
->dev
, rx_buffer_info
->dma
,
451 rx_buffer_info
->dma
= 0;
456 if (i
== rx_ring
->count
)
459 next_rxd
= IXGBEVF_RX_DESC(rx_ring
, i
);
463 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
465 if (!(staterr
& IXGBE_RXD_STAT_EOP
)) {
466 skb
->next
= next_buffer
->skb
;
467 IXGBE_CB(skb
->next
)->prev
= skb
;
468 rx_ring
->rx_stats
.non_eop_descs
++;
472 /* we should not be chaining buffers, if we did drop the skb */
473 if (IXGBE_CB(skb
)->prev
) {
475 struct sk_buff
*this = skb
;
476 skb
= IXGBE_CB(skb
)->prev
;
482 /* ERR_MASK will only have valid bits if EOP set */
483 if (unlikely(staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
)) {
484 dev_kfree_skb_irq(skb
);
488 ixgbevf_rx_checksum(rx_ring
, staterr
, skb
);
490 /* probably a little skewed due to removing CRC */
491 total_rx_bytes
+= skb
->len
;
494 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
496 /* Workaround hardware that can't do proper VEPA multicast
499 if ((skb
->pkt_type
& (PACKET_BROADCAST
| PACKET_MULTICAST
)) &&
500 ether_addr_equal(rx_ring
->netdev
->dev_addr
,
501 eth_hdr(skb
)->h_source
)) {
502 dev_kfree_skb_irq(skb
);
506 ixgbevf_rx_skb(q_vector
, skb
, staterr
, rx_desc
);
509 rx_desc
->wb
.upper
.status_error
= 0;
511 /* return some buffers to hardware, one at a time is too slow */
512 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
513 ixgbevf_alloc_rx_buffers(rx_ring
, cleaned_count
);
517 /* use prefetched values */
519 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
521 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
524 rx_ring
->next_to_clean
= i
;
525 cleaned_count
= ixgbevf_desc_unused(rx_ring
);
528 ixgbevf_alloc_rx_buffers(rx_ring
, cleaned_count
);
530 u64_stats_update_begin(&rx_ring
->syncp
);
531 rx_ring
->stats
.packets
+= total_rx_packets
;
532 rx_ring
->stats
.bytes
+= total_rx_bytes
;
533 u64_stats_update_end(&rx_ring
->syncp
);
534 q_vector
->rx
.total_packets
+= total_rx_packets
;
535 q_vector
->rx
.total_bytes
+= total_rx_bytes
;
537 return total_rx_packets
;
541 * ixgbevf_poll - NAPI polling calback
542 * @napi: napi struct with our devices info in it
543 * @budget: amount of work driver is allowed to do this pass, in packets
545 * This function will clean more than one or more rings associated with a
548 static int ixgbevf_poll(struct napi_struct
*napi
, int budget
)
550 struct ixgbevf_q_vector
*q_vector
=
551 container_of(napi
, struct ixgbevf_q_vector
, napi
);
552 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
553 struct ixgbevf_ring
*ring
;
555 bool clean_complete
= true;
557 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
558 clean_complete
&= ixgbevf_clean_tx_irq(q_vector
, ring
);
560 #ifdef CONFIG_NET_RX_BUSY_POLL
561 if (!ixgbevf_qv_lock_napi(q_vector
))
565 /* attempt to distribute budget to each queue fairly, but don't allow
566 * the budget to go below 1 because we'll exit polling */
567 if (q_vector
->rx
.count
> 1)
568 per_ring_budget
= max(budget
/q_vector
->rx
.count
, 1);
570 per_ring_budget
= budget
;
572 adapter
->flags
|= IXGBE_FLAG_IN_NETPOLL
;
573 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
574 clean_complete
&= (ixgbevf_clean_rx_irq(q_vector
, ring
,
577 adapter
->flags
&= ~IXGBE_FLAG_IN_NETPOLL
;
579 #ifdef CONFIG_NET_RX_BUSY_POLL
580 ixgbevf_qv_unlock_napi(q_vector
);
583 /* If all work not completed, return budget and keep polling */
586 /* all work done, exit the polling mode */
588 if (adapter
->rx_itr_setting
& 1)
589 ixgbevf_set_itr(q_vector
);
590 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
591 ixgbevf_irq_enable_queues(adapter
,
592 1 << q_vector
->v_idx
);
598 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
599 * @q_vector: structure containing interrupt and ring information
601 void ixgbevf_write_eitr(struct ixgbevf_q_vector
*q_vector
)
603 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
604 struct ixgbe_hw
*hw
= &adapter
->hw
;
605 int v_idx
= q_vector
->v_idx
;
606 u32 itr_reg
= q_vector
->itr
& IXGBE_MAX_EITR
;
609 * set the WDIS bit to not clear the timer bits and cause an
610 * immediate assertion of the interrupt
612 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
614 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
617 #ifdef CONFIG_NET_RX_BUSY_POLL
618 /* must be called with local_bh_disable()d */
619 static int ixgbevf_busy_poll_recv(struct napi_struct
*napi
)
621 struct ixgbevf_q_vector
*q_vector
=
622 container_of(napi
, struct ixgbevf_q_vector
, napi
);
623 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
624 struct ixgbevf_ring
*ring
;
627 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
628 return LL_FLUSH_FAILED
;
630 if (!ixgbevf_qv_lock_poll(q_vector
))
631 return LL_FLUSH_BUSY
;
633 ixgbevf_for_each_ring(ring
, q_vector
->rx
) {
634 found
= ixgbevf_clean_rx_irq(q_vector
, ring
, 4);
635 #ifdef BP_EXTENDED_STATS
637 ring
->stats
.cleaned
+= found
;
639 ring
->stats
.misses
++;
645 ixgbevf_qv_unlock_poll(q_vector
);
649 #endif /* CONFIG_NET_RX_BUSY_POLL */
652 * ixgbevf_configure_msix - Configure MSI-X hardware
653 * @adapter: board private structure
655 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
658 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
660 struct ixgbevf_q_vector
*q_vector
;
661 int q_vectors
, v_idx
;
663 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
664 adapter
->eims_enable_mask
= 0;
667 * Populate the IVAR table and set the ITR values to the
668 * corresponding register.
670 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
671 struct ixgbevf_ring
*ring
;
672 q_vector
= adapter
->q_vector
[v_idx
];
674 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
675 ixgbevf_set_ivar(adapter
, 0, ring
->reg_idx
, v_idx
);
677 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
678 ixgbevf_set_ivar(adapter
, 1, ring
->reg_idx
, v_idx
);
680 if (q_vector
->tx
.ring
&& !q_vector
->rx
.ring
) {
682 if (adapter
->tx_itr_setting
== 1)
683 q_vector
->itr
= IXGBE_10K_ITR
;
685 q_vector
->itr
= adapter
->tx_itr_setting
;
687 /* rx or rx/tx vector */
688 if (adapter
->rx_itr_setting
== 1)
689 q_vector
->itr
= IXGBE_20K_ITR
;
691 q_vector
->itr
= adapter
->rx_itr_setting
;
694 /* add q_vector eims value to global eims_enable_mask */
695 adapter
->eims_enable_mask
|= 1 << v_idx
;
697 ixgbevf_write_eitr(q_vector
);
700 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
701 /* setup eims_other and add value to global eims_enable_mask */
702 adapter
->eims_other
= 1 << v_idx
;
703 adapter
->eims_enable_mask
|= adapter
->eims_other
;
710 latency_invalid
= 255
714 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
715 * @q_vector: structure containing interrupt and ring information
716 * @ring_container: structure containing ring performance data
718 * Stores a new ITR value based on packets and byte
719 * counts during the last interrupt. The advantage of per interrupt
720 * computation is faster updates and more accurate ITR for the current
721 * traffic pattern. Constants in this function were computed
722 * based on theoretical maximum wire speed and thresholds were set based
723 * on testing data as well as attempting to minimize response time
724 * while increasing bulk throughput.
726 static void ixgbevf_update_itr(struct ixgbevf_q_vector
*q_vector
,
727 struct ixgbevf_ring_container
*ring_container
)
729 int bytes
= ring_container
->total_bytes
;
730 int packets
= ring_container
->total_packets
;
733 u8 itr_setting
= ring_container
->itr
;
738 /* simple throttlerate management
739 * 0-20MB/s lowest (100000 ints/s)
740 * 20-100MB/s low (20000 ints/s)
741 * 100-1249MB/s bulk (8000 ints/s)
743 /* what was last interrupt timeslice? */
744 timepassed_us
= q_vector
->itr
>> 2;
745 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
747 switch (itr_setting
) {
749 if (bytes_perint
> 10)
750 itr_setting
= low_latency
;
753 if (bytes_perint
> 20)
754 itr_setting
= bulk_latency
;
755 else if (bytes_perint
<= 10)
756 itr_setting
= lowest_latency
;
759 if (bytes_perint
<= 20)
760 itr_setting
= low_latency
;
764 /* clear work counters since we have the values we need */
765 ring_container
->total_bytes
= 0;
766 ring_container
->total_packets
= 0;
768 /* write updated itr to ring container */
769 ring_container
->itr
= itr_setting
;
772 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
)
774 u32 new_itr
= q_vector
->itr
;
777 ixgbevf_update_itr(q_vector
, &q_vector
->tx
);
778 ixgbevf_update_itr(q_vector
, &q_vector
->rx
);
780 current_itr
= max(q_vector
->rx
.itr
, q_vector
->tx
.itr
);
782 switch (current_itr
) {
783 /* counts and packets in update_itr are dependent on these numbers */
785 new_itr
= IXGBE_100K_ITR
;
788 new_itr
= IXGBE_20K_ITR
;
792 new_itr
= IXGBE_8K_ITR
;
796 if (new_itr
!= q_vector
->itr
) {
797 /* do an exponential smoothing */
798 new_itr
= (10 * new_itr
* q_vector
->itr
) /
799 ((9 * new_itr
) + q_vector
->itr
);
801 /* save the algorithm value here */
802 q_vector
->itr
= new_itr
;
804 ixgbevf_write_eitr(q_vector
);
808 static irqreturn_t
ixgbevf_msix_other(int irq
, void *data
)
810 struct ixgbevf_adapter
*adapter
= data
;
811 struct ixgbe_hw
*hw
= &adapter
->hw
;
813 hw
->mac
.get_link_status
= 1;
815 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
816 mod_timer(&adapter
->watchdog_timer
, jiffies
);
818 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_other
);
824 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
826 * @data: pointer to our q_vector struct for this interrupt vector
828 static irqreturn_t
ixgbevf_msix_clean_rings(int irq
, void *data
)
830 struct ixgbevf_q_vector
*q_vector
= data
;
832 /* EIAM disabled interrupts (on this vector) for us */
833 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
834 napi_schedule(&q_vector
->napi
);
839 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
842 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
844 a
->rx_ring
[r_idx
]->next
= q_vector
->rx
.ring
;
845 q_vector
->rx
.ring
= a
->rx_ring
[r_idx
];
846 q_vector
->rx
.count
++;
849 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
852 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
854 a
->tx_ring
[t_idx
]->next
= q_vector
->tx
.ring
;
855 q_vector
->tx
.ring
= a
->tx_ring
[t_idx
];
856 q_vector
->tx
.count
++;
860 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
861 * @adapter: board private structure to initialize
863 * This function maps descriptor rings to the queue-specific vectors
864 * we were allotted through the MSI-X enabling code. Ideally, we'd have
865 * one vector per ring/queue, but on a constrained vector budget, we
866 * group the rings as "efficiently" as possible. You would add new
867 * mapping configurations in here.
869 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
873 int rxr_idx
= 0, txr_idx
= 0;
874 int rxr_remaining
= adapter
->num_rx_queues
;
875 int txr_remaining
= adapter
->num_tx_queues
;
880 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
883 * The ideal configuration...
884 * We have enough vectors to map one per queue.
886 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
887 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
888 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
890 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
891 map_vector_to_txq(adapter
, v_start
, txr_idx
);
896 * If we don't have enough vectors for a 1-to-1
897 * mapping, we'll have to group them so there are
898 * multiple queues per vector.
900 /* Re-adjusting *qpv takes care of the remainder. */
901 for (i
= v_start
; i
< q_vectors
; i
++) {
902 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
903 for (j
= 0; j
< rqpv
; j
++) {
904 map_vector_to_rxq(adapter
, i
, rxr_idx
);
909 for (i
= v_start
; i
< q_vectors
; i
++) {
910 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
911 for (j
= 0; j
< tqpv
; j
++) {
912 map_vector_to_txq(adapter
, i
, txr_idx
);
923 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
924 * @adapter: board private structure
926 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
927 * interrupts from the kernel.
929 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
931 struct net_device
*netdev
= adapter
->netdev
;
932 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
936 for (vector
= 0; vector
< q_vectors
; vector
++) {
937 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[vector
];
938 struct msix_entry
*entry
= &adapter
->msix_entries
[vector
];
940 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
941 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
942 "%s-%s-%d", netdev
->name
, "TxRx", ri
++);
944 } else if (q_vector
->rx
.ring
) {
945 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
946 "%s-%s-%d", netdev
->name
, "rx", ri
++);
947 } else if (q_vector
->tx
.ring
) {
948 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
949 "%s-%s-%d", netdev
->name
, "tx", ti
++);
951 /* skip this unused q_vector */
954 err
= request_irq(entry
->vector
, &ixgbevf_msix_clean_rings
, 0,
955 q_vector
->name
, q_vector
);
958 "request_irq failed for MSIX interrupt "
960 goto free_queue_irqs
;
964 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
965 &ixgbevf_msix_other
, 0, netdev
->name
, adapter
);
968 "request_irq for msix_other failed: %d\n", err
);
969 goto free_queue_irqs
;
977 free_irq(adapter
->msix_entries
[vector
].vector
,
978 adapter
->q_vector
[vector
]);
980 /* This failure is non-recoverable - it indicates the system is
981 * out of MSIX vector resources and the VF driver cannot run
982 * without them. Set the number of msix vectors to zero
983 * indicating that not enough can be allocated. The error
984 * will be returned to the user indicating device open failed.
985 * Any further attempts to force the driver to open will also
986 * fail. The only way to recover is to unload the driver and
987 * reload it again. If the system has recovered some MSIX
988 * vectors then it may succeed.
990 adapter
->num_msix_vectors
= 0;
994 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
996 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
998 for (i
= 0; i
< q_vectors
; i
++) {
999 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
1000 q_vector
->rx
.ring
= NULL
;
1001 q_vector
->tx
.ring
= NULL
;
1002 q_vector
->rx
.count
= 0;
1003 q_vector
->tx
.count
= 0;
1008 * ixgbevf_request_irq - initialize interrupts
1009 * @adapter: board private structure
1011 * Attempts to configure interrupts using the best available
1012 * capabilities of the hardware and kernel.
1014 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
1018 err
= ixgbevf_request_msix_irqs(adapter
);
1021 hw_dbg(&adapter
->hw
,
1022 "request_irq failed, Error %d\n", err
);
1027 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1031 q_vectors
= adapter
->num_msix_vectors
;
1034 free_irq(adapter
->msix_entries
[i
].vector
, adapter
);
1037 for (; i
>= 0; i
--) {
1038 /* free only the irqs that were actually requested */
1039 if (!adapter
->q_vector
[i
]->rx
.ring
&&
1040 !adapter
->q_vector
[i
]->tx
.ring
)
1043 free_irq(adapter
->msix_entries
[i
].vector
,
1044 adapter
->q_vector
[i
]);
1047 ixgbevf_reset_q_vectors(adapter
);
1051 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1052 * @adapter: board private structure
1054 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1056 struct ixgbe_hw
*hw
= &adapter
->hw
;
1059 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, 0);
1060 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1061 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, 0);
1063 IXGBE_WRITE_FLUSH(hw
);
1065 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1066 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1070 * ixgbevf_irq_enable - Enable default interrupt generation settings
1071 * @adapter: board private structure
1073 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
)
1075 struct ixgbe_hw
*hw
= &adapter
->hw
;
1077 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, adapter
->eims_enable_mask
);
1078 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, adapter
->eims_enable_mask
);
1079 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_enable_mask
);
1083 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1084 * @adapter: board private structure
1085 * @ring: structure containing ring specific data
1087 * Configure the Tx descriptor ring after a reset.
1089 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter
*adapter
,
1090 struct ixgbevf_ring
*ring
)
1092 struct ixgbe_hw
*hw
= &adapter
->hw
;
1093 u64 tdba
= ring
->dma
;
1095 u32 txdctl
= IXGBE_TXDCTL_ENABLE
;
1096 u8 reg_idx
= ring
->reg_idx
;
1098 /* disable queue to avoid issues while updating state */
1099 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), IXGBE_TXDCTL_SWFLSH
);
1100 IXGBE_WRITE_FLUSH(hw
);
1102 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(reg_idx
), tdba
& DMA_BIT_MASK(32));
1103 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(reg_idx
), tdba
>> 32);
1104 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(reg_idx
),
1105 ring
->count
* sizeof(union ixgbe_adv_tx_desc
));
1107 /* disable head writeback */
1108 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAH(reg_idx
), 0);
1109 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAL(reg_idx
), 0);
1111 /* enable relaxed ordering */
1112 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(reg_idx
),
1113 (IXGBE_DCA_TXCTRL_DESC_RRO_EN
|
1114 IXGBE_DCA_TXCTRL_DATA_RRO_EN
));
1116 /* reset head and tail pointers */
1117 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(reg_idx
), 0);
1118 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(reg_idx
), 0);
1119 ring
->tail
= hw
->hw_addr
+ IXGBE_VFTDT(reg_idx
);
1121 /* reset ntu and ntc to place SW in sync with hardwdare */
1122 ring
->next_to_clean
= 0;
1123 ring
->next_to_use
= 0;
1125 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1126 * to or less than the number of on chip descriptors, which is
1129 txdctl
|= (8 << 16); /* WTHRESH = 8 */
1131 /* Setting PTHRESH to 32 both improves performance */
1132 txdctl
|= (1 << 8) | /* HTHRESH = 1 */
1133 32; /* PTHRESH = 32 */
1135 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), txdctl
);
1137 /* poll to verify queue is enabled */
1139 usleep_range(1000, 2000);
1140 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(reg_idx
));
1141 } while (--wait_loop
&& !(txdctl
& IXGBE_TXDCTL_ENABLE
));
1143 pr_err("Could not enable Tx Queue %d\n", reg_idx
);
1147 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1148 * @adapter: board private structure
1150 * Configure the Tx unit of the MAC after a reset.
1152 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1156 /* Setup the HW Tx Head and Tail descriptor pointers */
1157 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1158 ixgbevf_configure_tx_ring(adapter
, adapter
->tx_ring
[i
]);
1161 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1163 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1165 struct ixgbevf_ring
*rx_ring
;
1166 struct ixgbe_hw
*hw
= &adapter
->hw
;
1169 rx_ring
= adapter
->rx_ring
[index
];
1171 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1173 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1175 srrctl
|= ALIGN(rx_ring
->rx_buf_len
, 1024) >>
1176 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1178 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1181 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter
*adapter
)
1183 struct ixgbe_hw
*hw
= &adapter
->hw
;
1185 /* PSRTYPE must be initialized in 82599 */
1186 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
| IXGBE_PSRTYPE_UDPHDR
|
1187 IXGBE_PSRTYPE_IPV4HDR
| IXGBE_PSRTYPE_IPV6HDR
|
1188 IXGBE_PSRTYPE_L2HDR
;
1190 if (adapter
->num_rx_queues
> 1)
1193 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, psrtype
);
1196 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter
*adapter
)
1198 struct ixgbe_hw
*hw
= &adapter
->hw
;
1199 struct net_device
*netdev
= adapter
->netdev
;
1200 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1204 /* notify the PF of our intent to use this size of frame */
1205 ixgbevf_rlpml_set_vf(hw
, max_frame
);
1207 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1208 max_frame
+= VLAN_HLEN
;
1211 * Allocate buffer sizes that fit well into 32K and
1212 * take into account max frame size of 9.5K
1214 if ((hw
->mac
.type
== ixgbe_mac_X540_vf
) &&
1215 (max_frame
<= MAXIMUM_ETHERNET_VLAN_SIZE
))
1216 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1217 else if (max_frame
<= IXGBEVF_RXBUFFER_2K
)
1218 rx_buf_len
= IXGBEVF_RXBUFFER_2K
;
1219 else if (max_frame
<= IXGBEVF_RXBUFFER_4K
)
1220 rx_buf_len
= IXGBEVF_RXBUFFER_4K
;
1221 else if (max_frame
<= IXGBEVF_RXBUFFER_8K
)
1222 rx_buf_len
= IXGBEVF_RXBUFFER_8K
;
1224 rx_buf_len
= IXGBEVF_RXBUFFER_10K
;
1226 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1227 adapter
->rx_ring
[i
]->rx_buf_len
= rx_buf_len
;
1230 #define IXGBEVF_MAX_RX_DESC_POLL 10
1231 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter
*adapter
,
1232 struct ixgbevf_ring
*ring
)
1234 struct ixgbe_hw
*hw
= &adapter
->hw
;
1235 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1237 u8 reg_idx
= ring
->reg_idx
;
1239 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1240 rxdctl
&= ~IXGBE_RXDCTL_ENABLE
;
1242 /* write value back with RXDCTL.ENABLE bit cleared */
1243 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1245 /* the hardware may take up to 100us to really disable the rx queue */
1248 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1249 } while (--wait_loop
&& (rxdctl
& IXGBE_RXDCTL_ENABLE
));
1252 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1256 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1257 struct ixgbevf_ring
*ring
)
1259 struct ixgbe_hw
*hw
= &adapter
->hw
;
1260 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1262 u8 reg_idx
= ring
->reg_idx
;
1265 usleep_range(1000, 2000);
1266 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1267 } while (--wait_loop
&& !(rxdctl
& IXGBE_RXDCTL_ENABLE
));
1270 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1274 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter
*adapter
,
1275 struct ixgbevf_ring
*ring
)
1277 struct ixgbe_hw
*hw
= &adapter
->hw
;
1278 u64 rdba
= ring
->dma
;
1280 u8 reg_idx
= ring
->reg_idx
;
1282 /* disable queue to avoid issues while updating state */
1283 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1284 ixgbevf_disable_rx_queue(adapter
, ring
);
1286 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(reg_idx
), rdba
& DMA_BIT_MASK(32));
1287 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(reg_idx
), rdba
>> 32);
1288 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(reg_idx
),
1289 ring
->count
* sizeof(union ixgbe_adv_rx_desc
));
1291 /* enable relaxed ordering */
1292 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_RXCTRL(reg_idx
),
1293 IXGBE_DCA_RXCTRL_DESC_RRO_EN
);
1295 /* reset head and tail pointers */
1296 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(reg_idx
), 0);
1297 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(reg_idx
), 0);
1298 ring
->tail
= hw
->hw_addr
+ IXGBE_VFRDT(reg_idx
);
1300 /* reset ntu and ntc to place SW in sync with hardwdare */
1301 ring
->next_to_clean
= 0;
1302 ring
->next_to_use
= 0;
1304 ixgbevf_configure_srrctl(adapter
, reg_idx
);
1306 /* prevent DMA from exceeding buffer space available */
1307 rxdctl
&= ~IXGBE_RXDCTL_RLPMLMASK
;
1308 rxdctl
|= ring
->rx_buf_len
| IXGBE_RXDCTL_RLPML_EN
;
1309 rxdctl
|= IXGBE_RXDCTL_ENABLE
| IXGBE_RXDCTL_VME
;
1310 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1312 ixgbevf_rx_desc_queue_enable(adapter
, ring
);
1313 ixgbevf_alloc_rx_buffers(ring
, ixgbevf_desc_unused(ring
));
1317 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1318 * @adapter: board private structure
1320 * Configure the Rx unit of the MAC after a reset.
1322 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1326 ixgbevf_setup_psrtype(adapter
);
1328 /* set_rx_buffer_len must be called before ring initialization */
1329 ixgbevf_set_rx_buffer_len(adapter
);
1331 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1332 * the Base and Length of the Rx Descriptor Ring */
1333 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1334 ixgbevf_configure_rx_ring(adapter
, adapter
->rx_ring
[i
]);
1337 static int ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
,
1338 __be16 proto
, u16 vid
)
1340 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1341 struct ixgbe_hw
*hw
= &adapter
->hw
;
1344 spin_lock_bh(&adapter
->mbx_lock
);
1346 /* add VID to filter table */
1347 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1349 spin_unlock_bh(&adapter
->mbx_lock
);
1351 /* translate error return types so error makes sense */
1352 if (err
== IXGBE_ERR_MBX
)
1355 if (err
== IXGBE_ERR_INVALID_ARGUMENT
)
1358 set_bit(vid
, adapter
->active_vlans
);
1363 static int ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
,
1364 __be16 proto
, u16 vid
)
1366 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1367 struct ixgbe_hw
*hw
= &adapter
->hw
;
1368 int err
= -EOPNOTSUPP
;
1370 spin_lock_bh(&adapter
->mbx_lock
);
1372 /* remove VID from filter table */
1373 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1375 spin_unlock_bh(&adapter
->mbx_lock
);
1377 clear_bit(vid
, adapter
->active_vlans
);
1382 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1386 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1387 ixgbevf_vlan_rx_add_vid(adapter
->netdev
,
1388 htons(ETH_P_8021Q
), vid
);
1391 static int ixgbevf_write_uc_addr_list(struct net_device
*netdev
)
1393 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1394 struct ixgbe_hw
*hw
= &adapter
->hw
;
1397 if ((netdev_uc_count(netdev
)) > 10) {
1398 pr_err("Too many unicast filters - No Space\n");
1402 if (!netdev_uc_empty(netdev
)) {
1403 struct netdev_hw_addr
*ha
;
1404 netdev_for_each_uc_addr(ha
, netdev
) {
1405 hw
->mac
.ops
.set_uc_addr(hw
, ++count
, ha
->addr
);
1410 * If the list is empty then send message to PF driver to
1411 * clear all macvlans on this VF.
1413 hw
->mac
.ops
.set_uc_addr(hw
, 0, NULL
);
1420 * ixgbevf_set_rx_mode - Multicast and unicast set
1421 * @netdev: network interface device structure
1423 * The set_rx_method entry point is called whenever the multicast address
1424 * list, unicast address list or the network interface flags are updated.
1425 * This routine is responsible for configuring the hardware for proper
1426 * multicast mode and configuring requested unicast filters.
1428 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1430 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1431 struct ixgbe_hw
*hw
= &adapter
->hw
;
1433 spin_lock_bh(&adapter
->mbx_lock
);
1435 /* reprogram multicast list */
1436 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1438 ixgbevf_write_uc_addr_list(netdev
);
1440 spin_unlock_bh(&adapter
->mbx_lock
);
1443 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1446 struct ixgbevf_q_vector
*q_vector
;
1447 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1449 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1450 q_vector
= adapter
->q_vector
[q_idx
];
1451 #ifdef CONFIG_NET_RX_BUSY_POLL
1452 ixgbevf_qv_init_lock(adapter
->q_vector
[q_idx
]);
1454 napi_enable(&q_vector
->napi
);
1458 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1461 struct ixgbevf_q_vector
*q_vector
;
1462 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1464 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1465 q_vector
= adapter
->q_vector
[q_idx
];
1466 napi_disable(&q_vector
->napi
);
1467 #ifdef CONFIG_NET_RX_BUSY_POLL
1468 while (!ixgbevf_qv_disable(adapter
->q_vector
[q_idx
])) {
1469 pr_info("QV %d locked\n", q_idx
);
1470 usleep_range(1000, 20000);
1472 #endif /* CONFIG_NET_RX_BUSY_POLL */
1476 static int ixgbevf_configure_dcb(struct ixgbevf_adapter
*adapter
)
1478 struct ixgbe_hw
*hw
= &adapter
->hw
;
1479 unsigned int def_q
= 0;
1480 unsigned int num_tcs
= 0;
1481 unsigned int num_rx_queues
= 1;
1484 spin_lock_bh(&adapter
->mbx_lock
);
1486 /* fetch queue configuration from the PF */
1487 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
1489 spin_unlock_bh(&adapter
->mbx_lock
);
1495 /* update default Tx ring register index */
1496 adapter
->tx_ring
[0]->reg_idx
= def_q
;
1498 /* we need as many queues as traffic classes */
1499 num_rx_queues
= num_tcs
;
1502 /* if we have a bad config abort request queue reset */
1503 if (adapter
->num_rx_queues
!= num_rx_queues
) {
1504 /* force mailbox timeout to prevent further messages */
1505 hw
->mbx
.timeout
= 0;
1507 /* wait for watchdog to come around and bail us out */
1508 adapter
->flags
|= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
;
1514 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1516 ixgbevf_configure_dcb(adapter
);
1518 ixgbevf_set_rx_mode(adapter
->netdev
);
1520 ixgbevf_restore_vlan(adapter
);
1522 ixgbevf_configure_tx(adapter
);
1523 ixgbevf_configure_rx(adapter
);
1526 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
1528 /* Only save pre-reset stats if there are some */
1529 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
1530 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
1531 adapter
->stats
.base_vfgprc
;
1532 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
1533 adapter
->stats
.base_vfgptc
;
1534 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
1535 adapter
->stats
.base_vfgorc
;
1536 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
1537 adapter
->stats
.base_vfgotc
;
1538 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
1539 adapter
->stats
.base_vfmprc
;
1543 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
1545 struct ixgbe_hw
*hw
= &adapter
->hw
;
1547 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
1548 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
1549 adapter
->stats
.last_vfgorc
|=
1550 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
1551 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
1552 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
1553 adapter
->stats
.last_vfgotc
|=
1554 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
1555 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
1557 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
1558 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
1559 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
1560 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
1561 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
1564 static void ixgbevf_negotiate_api(struct ixgbevf_adapter
*adapter
)
1566 struct ixgbe_hw
*hw
= &adapter
->hw
;
1567 int api
[] = { ixgbe_mbox_api_11
,
1569 ixgbe_mbox_api_unknown
};
1570 int err
= 0, idx
= 0;
1572 spin_lock_bh(&adapter
->mbx_lock
);
1574 while (api
[idx
] != ixgbe_mbox_api_unknown
) {
1575 err
= ixgbevf_negotiate_api_version(hw
, api
[idx
]);
1581 spin_unlock_bh(&adapter
->mbx_lock
);
1584 static void ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
1586 struct net_device
*netdev
= adapter
->netdev
;
1587 struct ixgbe_hw
*hw
= &adapter
->hw
;
1589 ixgbevf_configure_msix(adapter
);
1591 spin_lock_bh(&adapter
->mbx_lock
);
1593 if (is_valid_ether_addr(hw
->mac
.addr
))
1594 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
1596 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
1598 spin_unlock_bh(&adapter
->mbx_lock
);
1600 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1601 ixgbevf_napi_enable_all(adapter
);
1603 /* enable transmits */
1604 netif_tx_start_all_queues(netdev
);
1606 ixgbevf_save_reset_stats(adapter
);
1607 ixgbevf_init_last_counter_stats(adapter
);
1609 hw
->mac
.get_link_status
= 1;
1610 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1613 void ixgbevf_up(struct ixgbevf_adapter
*adapter
)
1615 struct ixgbe_hw
*hw
= &adapter
->hw
;
1617 ixgbevf_configure(adapter
);
1619 ixgbevf_up_complete(adapter
);
1621 /* clear any pending interrupts, may auto mask */
1622 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
1624 ixgbevf_irq_enable(adapter
);
1628 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1629 * @rx_ring: ring to free buffers from
1631 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring
*rx_ring
)
1636 if (!rx_ring
->rx_buffer_info
)
1639 /* Free all the Rx ring sk_buffs */
1640 for (i
= 0; i
< rx_ring
->count
; i
++) {
1641 struct ixgbevf_rx_buffer
*rx_buffer_info
;
1643 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1644 if (rx_buffer_info
->dma
) {
1645 dma_unmap_single(rx_ring
->dev
, rx_buffer_info
->dma
,
1646 rx_ring
->rx_buf_len
,
1648 rx_buffer_info
->dma
= 0;
1650 if (rx_buffer_info
->skb
) {
1651 struct sk_buff
*skb
= rx_buffer_info
->skb
;
1652 rx_buffer_info
->skb
= NULL
;
1654 struct sk_buff
*this = skb
;
1655 skb
= IXGBE_CB(skb
)->prev
;
1656 dev_kfree_skb(this);
1661 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
1662 memset(rx_ring
->rx_buffer_info
, 0, size
);
1664 /* Zero out the descriptor ring */
1665 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1669 * ixgbevf_clean_tx_ring - Free Tx Buffers
1670 * @tx_ring: ring to be cleaned
1672 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring
*tx_ring
)
1674 struct ixgbevf_tx_buffer
*tx_buffer_info
;
1678 if (!tx_ring
->tx_buffer_info
)
1681 /* Free all the Tx ring sk_buffs */
1682 for (i
= 0; i
< tx_ring
->count
; i
++) {
1683 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
1684 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
1687 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
1688 memset(tx_ring
->tx_buffer_info
, 0, size
);
1690 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1694 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1695 * @adapter: board private structure
1697 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
1701 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1702 ixgbevf_clean_rx_ring(adapter
->rx_ring
[i
]);
1706 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1707 * @adapter: board private structure
1709 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
1713 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1714 ixgbevf_clean_tx_ring(adapter
->tx_ring
[i
]);
1717 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
1719 struct net_device
*netdev
= adapter
->netdev
;
1720 struct ixgbe_hw
*hw
= &adapter
->hw
;
1723 /* signal that we are down to the interrupt handler */
1724 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1726 /* disable all enabled rx queues */
1727 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1728 ixgbevf_disable_rx_queue(adapter
, adapter
->rx_ring
[i
]);
1730 netif_tx_disable(netdev
);
1734 netif_tx_stop_all_queues(netdev
);
1736 ixgbevf_irq_disable(adapter
);
1738 ixgbevf_napi_disable_all(adapter
);
1740 del_timer_sync(&adapter
->watchdog_timer
);
1741 /* can't call flush scheduled work here because it can deadlock
1742 * if linkwatch_event tries to acquire the rtnl_lock which we are
1744 while (adapter
->flags
& IXGBE_FLAG_IN_WATCHDOG_TASK
)
1747 /* disable transmits in the hardware now that interrupts are off */
1748 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1749 u8 reg_idx
= adapter
->tx_ring
[i
]->reg_idx
;
1751 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
),
1752 IXGBE_TXDCTL_SWFLSH
);
1755 netif_carrier_off(netdev
);
1757 if (!pci_channel_offline(adapter
->pdev
))
1758 ixgbevf_reset(adapter
);
1760 ixgbevf_clean_all_tx_rings(adapter
);
1761 ixgbevf_clean_all_rx_rings(adapter
);
1764 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
1766 WARN_ON(in_interrupt());
1768 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
1771 ixgbevf_down(adapter
);
1772 ixgbevf_up(adapter
);
1774 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
1777 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
1779 struct ixgbe_hw
*hw
= &adapter
->hw
;
1780 struct net_device
*netdev
= adapter
->netdev
;
1782 if (hw
->mac
.ops
.reset_hw(hw
)) {
1783 hw_dbg(hw
, "PF still resetting\n");
1785 hw
->mac
.ops
.init_hw(hw
);
1786 ixgbevf_negotiate_api(adapter
);
1789 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1790 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1792 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1797 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
1801 int vector_threshold
;
1803 /* We'll want at least 2 (vector_threshold):
1804 * 1) TxQ[0] + RxQ[0] handler
1805 * 2) Other (Link Status Change, etc.)
1807 vector_threshold
= MIN_MSIX_COUNT
;
1809 /* The more we get, the more we will assign to Tx/Rx Cleanup
1810 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1811 * Right now, we simply care about how many we'll get; we'll
1812 * set them up later while requesting irq's.
1814 while (vectors
>= vector_threshold
) {
1815 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1817 if (!err
|| err
< 0) /* Success or a nasty failure. */
1819 else /* err == number of vectors we should try again with */
1823 if (vectors
< vector_threshold
)
1827 dev_err(&adapter
->pdev
->dev
,
1828 "Unable to allocate MSI-X interrupts\n");
1829 kfree(adapter
->msix_entries
);
1830 adapter
->msix_entries
= NULL
;
1833 * Adjust for only the vectors we'll use, which is minimum
1834 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1835 * vectors we were allocated.
1837 adapter
->num_msix_vectors
= vectors
;
1844 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1845 * @adapter: board private structure to initialize
1847 * This is the top level queue allocation routine. The order here is very
1848 * important, starting with the "most" number of features turned on at once,
1849 * and ending with the smallest set of features. This way large combinations
1850 * can be allocated if they're turned on, and smaller combinations are the
1851 * fallthrough conditions.
1854 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
1856 struct ixgbe_hw
*hw
= &adapter
->hw
;
1857 unsigned int def_q
= 0;
1858 unsigned int num_tcs
= 0;
1861 /* Start with base case */
1862 adapter
->num_rx_queues
= 1;
1863 adapter
->num_tx_queues
= 1;
1865 spin_lock_bh(&adapter
->mbx_lock
);
1867 /* fetch queue configuration from the PF */
1868 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
1870 spin_unlock_bh(&adapter
->mbx_lock
);
1875 /* we need as many queues as traffic classes */
1877 adapter
->num_rx_queues
= num_tcs
;
1881 * ixgbevf_alloc_queues - Allocate memory for all rings
1882 * @adapter: board private structure to initialize
1884 * We allocate one ring per queue at run-time since we don't know the
1885 * number of queues at compile-time. The polling_netdev array is
1886 * intended for Multiqueue, but should work fine with a single queue.
1888 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
1890 struct ixgbevf_ring
*ring
;
1893 for (; tx
< adapter
->num_tx_queues
; tx
++) {
1894 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
1896 goto err_allocation
;
1898 ring
->dev
= &adapter
->pdev
->dev
;
1899 ring
->netdev
= adapter
->netdev
;
1900 ring
->count
= adapter
->tx_ring_count
;
1901 ring
->queue_index
= tx
;
1904 adapter
->tx_ring
[tx
] = ring
;
1907 for (; rx
< adapter
->num_rx_queues
; rx
++) {
1908 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
1910 goto err_allocation
;
1912 ring
->dev
= &adapter
->pdev
->dev
;
1913 ring
->netdev
= adapter
->netdev
;
1915 ring
->count
= adapter
->rx_ring_count
;
1916 ring
->queue_index
= rx
;
1919 adapter
->rx_ring
[rx
] = ring
;
1926 kfree(adapter
->tx_ring
[--tx
]);
1927 adapter
->tx_ring
[tx
] = NULL
;
1931 kfree(adapter
->rx_ring
[--rx
]);
1932 adapter
->rx_ring
[rx
] = NULL
;
1938 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1939 * @adapter: board private structure to initialize
1941 * Attempt to configure the interrupts using the best available
1942 * capabilities of the hardware and the kernel.
1944 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
1946 struct net_device
*netdev
= adapter
->netdev
;
1948 int vector
, v_budget
;
1951 * It's easy to be greedy for MSI-X vectors, but it really
1952 * doesn't do us much good if we have a lot more vectors
1953 * than CPU's. So let's be conservative and only ask for
1954 * (roughly) the same number of vectors as there are CPU's.
1955 * The default is to use pairs of vectors.
1957 v_budget
= max(adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1958 v_budget
= min_t(int, v_budget
, num_online_cpus());
1959 v_budget
+= NON_Q_VECTORS
;
1961 /* A failure in MSI-X entry allocation isn't fatal, but it does
1962 * mean we disable MSI-X capabilities of the adapter. */
1963 adapter
->msix_entries
= kcalloc(v_budget
,
1964 sizeof(struct msix_entry
), GFP_KERNEL
);
1965 if (!adapter
->msix_entries
) {
1970 for (vector
= 0; vector
< v_budget
; vector
++)
1971 adapter
->msix_entries
[vector
].entry
= vector
;
1973 err
= ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
1977 err
= netif_set_real_num_tx_queues(netdev
, adapter
->num_tx_queues
);
1981 err
= netif_set_real_num_rx_queues(netdev
, adapter
->num_rx_queues
);
1988 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1989 * @adapter: board private structure to initialize
1991 * We allocate one q_vector per queue interrupt. If allocation fails we
1994 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
1996 int q_idx
, num_q_vectors
;
1997 struct ixgbevf_q_vector
*q_vector
;
1999 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2001 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2002 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
2005 q_vector
->adapter
= adapter
;
2006 q_vector
->v_idx
= q_idx
;
2007 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
2009 #ifdef CONFIG_NET_RX_BUSY_POLL
2010 napi_hash_add(&q_vector
->napi
);
2012 adapter
->q_vector
[q_idx
] = q_vector
;
2020 q_vector
= adapter
->q_vector
[q_idx
];
2021 #ifdef CONFIG_NET_RX_BUSY_POLL
2022 napi_hash_del(&q_vector
->napi
);
2024 netif_napi_del(&q_vector
->napi
);
2026 adapter
->q_vector
[q_idx
] = NULL
;
2032 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2033 * @adapter: board private structure to initialize
2035 * This function frees the memory allocated to the q_vectors. In addition if
2036 * NAPI is enabled it will delete any references to the NAPI struct prior
2037 * to freeing the q_vector.
2039 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
2041 int q_idx
, num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2043 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2044 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
2046 adapter
->q_vector
[q_idx
] = NULL
;
2047 #ifdef CONFIG_NET_RX_BUSY_POLL
2048 napi_hash_del(&q_vector
->napi
);
2050 netif_napi_del(&q_vector
->napi
);
2056 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2057 * @adapter: board private structure
2060 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2062 pci_disable_msix(adapter
->pdev
);
2063 kfree(adapter
->msix_entries
);
2064 adapter
->msix_entries
= NULL
;
2068 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2069 * @adapter: board private structure to initialize
2072 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2076 /* Number of supported queues */
2077 ixgbevf_set_num_queues(adapter
);
2079 err
= ixgbevf_set_interrupt_capability(adapter
);
2081 hw_dbg(&adapter
->hw
,
2082 "Unable to setup interrupt capabilities\n");
2083 goto err_set_interrupt
;
2086 err
= ixgbevf_alloc_q_vectors(adapter
);
2088 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue "
2090 goto err_alloc_q_vectors
;
2093 err
= ixgbevf_alloc_queues(adapter
);
2095 pr_err("Unable to allocate memory for queues\n");
2096 goto err_alloc_queues
;
2099 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, "
2100 "Tx Queue count = %u\n",
2101 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2102 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2104 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2108 ixgbevf_free_q_vectors(adapter
);
2109 err_alloc_q_vectors
:
2110 ixgbevf_reset_interrupt_capability(adapter
);
2116 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2117 * @adapter: board private structure to clear interrupt scheme on
2119 * We go through and clear interrupt specific resources and reset the structure
2120 * to pre-load conditions
2122 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2126 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2127 kfree(adapter
->tx_ring
[i
]);
2128 adapter
->tx_ring
[i
] = NULL
;
2130 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2131 kfree(adapter
->rx_ring
[i
]);
2132 adapter
->rx_ring
[i
] = NULL
;
2135 adapter
->num_tx_queues
= 0;
2136 adapter
->num_rx_queues
= 0;
2138 ixgbevf_free_q_vectors(adapter
);
2139 ixgbevf_reset_interrupt_capability(adapter
);
2143 * ixgbevf_sw_init - Initialize general software structures
2144 * (struct ixgbevf_adapter)
2145 * @adapter: board private structure to initialize
2147 * ixgbevf_sw_init initializes the Adapter private data structure.
2148 * Fields are initialized based on PCI device information and
2149 * OS network device settings (MTU size).
2151 static int ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2153 struct ixgbe_hw
*hw
= &adapter
->hw
;
2154 struct pci_dev
*pdev
= adapter
->pdev
;
2155 struct net_device
*netdev
= adapter
->netdev
;
2158 /* PCI config space info */
2160 hw
->vendor_id
= pdev
->vendor
;
2161 hw
->device_id
= pdev
->device
;
2162 hw
->revision_id
= pdev
->revision
;
2163 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2164 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2166 hw
->mbx
.ops
.init_params(hw
);
2168 /* assume legacy case in which PF would only give VF 2 queues */
2169 hw
->mac
.max_tx_queues
= 2;
2170 hw
->mac
.max_rx_queues
= 2;
2172 /* lock to protect mailbox accesses */
2173 spin_lock_init(&adapter
->mbx_lock
);
2175 err
= hw
->mac
.ops
.reset_hw(hw
);
2177 dev_info(&pdev
->dev
,
2178 "PF still in reset state. Is the PF interface up?\n");
2180 err
= hw
->mac
.ops
.init_hw(hw
);
2182 pr_err("init_shared_code failed: %d\n", err
);
2185 ixgbevf_negotiate_api(adapter
);
2186 err
= hw
->mac
.ops
.get_mac_addr(hw
, hw
->mac
.addr
);
2188 dev_info(&pdev
->dev
, "Error reading MAC address\n");
2189 else if (is_zero_ether_addr(adapter
->hw
.mac
.addr
))
2190 dev_info(&pdev
->dev
,
2191 "MAC address not assigned by administrator.\n");
2192 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
2195 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2196 dev_info(&pdev
->dev
, "Assigning random MAC address\n");
2197 eth_hw_addr_random(netdev
);
2198 memcpy(hw
->mac
.addr
, netdev
->dev_addr
, netdev
->addr_len
);
2201 /* Enable dynamic interrupt throttling rates */
2202 adapter
->rx_itr_setting
= 1;
2203 adapter
->tx_itr_setting
= 1;
2205 /* set default ring sizes */
2206 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2207 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2209 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2216 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2218 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2219 if (current_counter < last_counter) \
2220 counter += 0x100000000LL; \
2221 last_counter = current_counter; \
2222 counter &= 0xFFFFFFFF00000000LL; \
2223 counter |= current_counter; \
2226 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2228 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2229 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2230 u64 current_counter = (current_counter_msb << 32) | \
2231 current_counter_lsb; \
2232 if (current_counter < last_counter) \
2233 counter += 0x1000000000LL; \
2234 last_counter = current_counter; \
2235 counter &= 0xFFFFFFF000000000LL; \
2236 counter |= current_counter; \
2239 * ixgbevf_update_stats - Update the board statistics counters.
2240 * @adapter: board private structure
2242 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2244 struct ixgbe_hw
*hw
= &adapter
->hw
;
2247 if (!adapter
->link_up
)
2250 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2251 adapter
->stats
.vfgprc
);
2252 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2253 adapter
->stats
.vfgptc
);
2254 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2255 adapter
->stats
.last_vfgorc
,
2256 adapter
->stats
.vfgorc
);
2257 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2258 adapter
->stats
.last_vfgotc
,
2259 adapter
->stats
.vfgotc
);
2260 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2261 adapter
->stats
.vfmprc
);
2263 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2264 adapter
->hw_csum_rx_error
+=
2265 adapter
->rx_ring
[i
]->hw_csum_rx_error
;
2266 adapter
->hw_csum_rx_good
+=
2267 adapter
->rx_ring
[i
]->hw_csum_rx_good
;
2268 adapter
->rx_ring
[i
]->hw_csum_rx_error
= 0;
2269 adapter
->rx_ring
[i
]->hw_csum_rx_good
= 0;
2274 * ixgbevf_watchdog - Timer Call-back
2275 * @data: pointer to adapter cast into an unsigned long
2277 static void ixgbevf_watchdog(unsigned long data
)
2279 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2280 struct ixgbe_hw
*hw
= &adapter
->hw
;
2285 * Do the watchdog outside of interrupt context due to the lovely
2286 * delays that some of the newer hardware requires
2289 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2290 goto watchdog_short_circuit
;
2292 /* get one bit for every active tx/rx interrupt vector */
2293 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2294 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2295 if (qv
->rx
.ring
|| qv
->tx
.ring
)
2299 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, eics
);
2301 watchdog_short_circuit
:
2302 schedule_work(&adapter
->watchdog_task
);
2306 * ixgbevf_tx_timeout - Respond to a Tx Hang
2307 * @netdev: network interface device structure
2309 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
2311 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2313 /* Do the reset outside of interrupt context */
2314 schedule_work(&adapter
->reset_task
);
2317 static void ixgbevf_reset_task(struct work_struct
*work
)
2319 struct ixgbevf_adapter
*adapter
;
2320 adapter
= container_of(work
, struct ixgbevf_adapter
, reset_task
);
2322 /* If we're already down or resetting, just bail */
2323 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2324 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2327 adapter
->tx_timeout_count
++;
2329 ixgbevf_reinit_locked(adapter
);
2333 * ixgbevf_watchdog_task - worker thread to bring link up
2334 * @work: pointer to work_struct containing our data
2336 static void ixgbevf_watchdog_task(struct work_struct
*work
)
2338 struct ixgbevf_adapter
*adapter
= container_of(work
,
2339 struct ixgbevf_adapter
,
2341 struct net_device
*netdev
= adapter
->netdev
;
2342 struct ixgbe_hw
*hw
= &adapter
->hw
;
2343 u32 link_speed
= adapter
->link_speed
;
2344 bool link_up
= adapter
->link_up
;
2347 ixgbevf_queue_reset_subtask(adapter
);
2349 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
2352 * Always check the link on the watchdog because we have
2355 spin_lock_bh(&adapter
->mbx_lock
);
2357 need_reset
= hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
2359 spin_unlock_bh(&adapter
->mbx_lock
);
2362 adapter
->link_up
= link_up
;
2363 adapter
->link_speed
= link_speed
;
2364 netif_carrier_off(netdev
);
2365 netif_tx_stop_all_queues(netdev
);
2366 schedule_work(&adapter
->reset_task
);
2369 adapter
->link_up
= link_up
;
2370 adapter
->link_speed
= link_speed
;
2373 if (!netif_carrier_ok(netdev
)) {
2374 char *link_speed_string
;
2375 switch (link_speed
) {
2376 case IXGBE_LINK_SPEED_10GB_FULL
:
2377 link_speed_string
= "10 Gbps";
2379 case IXGBE_LINK_SPEED_1GB_FULL
:
2380 link_speed_string
= "1 Gbps";
2382 case IXGBE_LINK_SPEED_100_FULL
:
2383 link_speed_string
= "100 Mbps";
2386 link_speed_string
= "unknown speed";
2389 dev_info(&adapter
->pdev
->dev
,
2390 "NIC Link is Up, %s\n", link_speed_string
);
2391 netif_carrier_on(netdev
);
2392 netif_tx_wake_all_queues(netdev
);
2395 adapter
->link_up
= false;
2396 adapter
->link_speed
= 0;
2397 if (netif_carrier_ok(netdev
)) {
2398 dev_info(&adapter
->pdev
->dev
, "NIC Link is Down\n");
2399 netif_carrier_off(netdev
);
2400 netif_tx_stop_all_queues(netdev
);
2404 ixgbevf_update_stats(adapter
);
2407 /* Reset the timer */
2408 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2409 mod_timer(&adapter
->watchdog_timer
,
2410 round_jiffies(jiffies
+ (2 * HZ
)));
2412 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
2416 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2417 * @tx_ring: Tx descriptor ring for a specific queue
2419 * Free all transmit software resources
2421 void ixgbevf_free_tx_resources(struct ixgbevf_ring
*tx_ring
)
2423 ixgbevf_clean_tx_ring(tx_ring
);
2425 vfree(tx_ring
->tx_buffer_info
);
2426 tx_ring
->tx_buffer_info
= NULL
;
2428 /* if not set, then don't free */
2432 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
, tx_ring
->desc
,
2435 tx_ring
->desc
= NULL
;
2439 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2440 * @adapter: board private structure
2442 * Free all transmit software resources
2444 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2448 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2449 if (adapter
->tx_ring
[i
]->desc
)
2450 ixgbevf_free_tx_resources(adapter
->tx_ring
[i
]);
2454 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2455 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2457 * Return 0 on success, negative on failure
2459 int ixgbevf_setup_tx_resources(struct ixgbevf_ring
*tx_ring
)
2463 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2464 tx_ring
->tx_buffer_info
= vzalloc(size
);
2465 if (!tx_ring
->tx_buffer_info
)
2468 /* round up to nearest 4K */
2469 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2470 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2472 tx_ring
->desc
= dma_alloc_coherent(tx_ring
->dev
, tx_ring
->size
,
2473 &tx_ring
->dma
, GFP_KERNEL
);
2480 vfree(tx_ring
->tx_buffer_info
);
2481 tx_ring
->tx_buffer_info
= NULL
;
2482 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit "
2483 "descriptor ring\n");
2488 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2489 * @adapter: board private structure
2491 * If this function returns with an error, then it's possible one or
2492 * more of the rings is populated (while the rest are not). It is the
2493 * callers duty to clean those orphaned rings.
2495 * Return 0 on success, negative on failure
2497 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2501 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2502 err
= ixgbevf_setup_tx_resources(adapter
->tx_ring
[i
]);
2505 hw_dbg(&adapter
->hw
,
2506 "Allocation for Tx Queue %u failed\n", i
);
2514 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2515 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2517 * Returns 0 on success, negative on failure
2519 int ixgbevf_setup_rx_resources(struct ixgbevf_ring
*rx_ring
)
2523 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2524 rx_ring
->rx_buffer_info
= vzalloc(size
);
2525 if (!rx_ring
->rx_buffer_info
)
2528 /* Round up to nearest 4K */
2529 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2530 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2532 rx_ring
->desc
= dma_alloc_coherent(rx_ring
->dev
, rx_ring
->size
,
2533 &rx_ring
->dma
, GFP_KERNEL
);
2540 vfree(rx_ring
->rx_buffer_info
);
2541 rx_ring
->rx_buffer_info
= NULL
;
2542 dev_err(rx_ring
->dev
, "Unable to allocate memory for the Rx descriptor ring\n");
2547 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2548 * @adapter: board private structure
2550 * If this function returns with an error, then it's possible one or
2551 * more of the rings is populated (while the rest are not). It is the
2552 * callers duty to clean those orphaned rings.
2554 * Return 0 on success, negative on failure
2556 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2560 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2561 err
= ixgbevf_setup_rx_resources(adapter
->rx_ring
[i
]);
2564 hw_dbg(&adapter
->hw
,
2565 "Allocation for Rx Queue %u failed\n", i
);
2572 * ixgbevf_free_rx_resources - Free Rx Resources
2573 * @rx_ring: ring to clean the resources from
2575 * Free all receive software resources
2577 void ixgbevf_free_rx_resources(struct ixgbevf_ring
*rx_ring
)
2579 ixgbevf_clean_rx_ring(rx_ring
);
2581 vfree(rx_ring
->rx_buffer_info
);
2582 rx_ring
->rx_buffer_info
= NULL
;
2584 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
, rx_ring
->desc
,
2587 rx_ring
->desc
= NULL
;
2591 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2592 * @adapter: board private structure
2594 * Free all receive software resources
2596 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2600 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2601 if (adapter
->rx_ring
[i
]->desc
)
2602 ixgbevf_free_rx_resources(adapter
->rx_ring
[i
]);
2606 * ixgbevf_open - Called when a network interface is made active
2607 * @netdev: network interface device structure
2609 * Returns 0 on success, negative value on failure
2611 * The open entry point is called when a network interface is made
2612 * active by the system (IFF_UP). At this point all resources needed
2613 * for transmit and receive operations are allocated, the interrupt
2614 * handler is registered with the OS, the watchdog timer is started,
2615 * and the stack is notified that the interface is ready.
2617 static int ixgbevf_open(struct net_device
*netdev
)
2619 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2620 struct ixgbe_hw
*hw
= &adapter
->hw
;
2623 /* A previous failure to open the device because of a lack of
2624 * available MSIX vector resources may have reset the number
2625 * of msix vectors variable to zero. The only way to recover
2626 * is to unload/reload the driver and hope that the system has
2627 * been able to recover some MSIX vector resources.
2629 if (!adapter
->num_msix_vectors
)
2632 /* disallow open during test */
2633 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
2636 if (hw
->adapter_stopped
) {
2637 ixgbevf_reset(adapter
);
2638 /* if adapter is still stopped then PF isn't up and
2639 * the vf can't start. */
2640 if (hw
->adapter_stopped
) {
2641 err
= IXGBE_ERR_MBX
;
2642 pr_err("Unable to start - perhaps the PF Driver isn't "
2644 goto err_setup_reset
;
2648 /* allocate transmit descriptors */
2649 err
= ixgbevf_setup_all_tx_resources(adapter
);
2653 /* allocate receive descriptors */
2654 err
= ixgbevf_setup_all_rx_resources(adapter
);
2658 ixgbevf_configure(adapter
);
2661 * Map the Tx/Rx rings to the vectors we were allotted.
2662 * if request_irq will be called in this function map_rings
2663 * must be called *before* up_complete
2665 ixgbevf_map_rings_to_vectors(adapter
);
2667 ixgbevf_up_complete(adapter
);
2669 /* clear any pending interrupts, may auto mask */
2670 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2671 err
= ixgbevf_request_irq(adapter
);
2675 ixgbevf_irq_enable(adapter
);
2680 ixgbevf_down(adapter
);
2682 ixgbevf_free_all_rx_resources(adapter
);
2684 ixgbevf_free_all_tx_resources(adapter
);
2685 ixgbevf_reset(adapter
);
2693 * ixgbevf_close - Disables a network interface
2694 * @netdev: network interface device structure
2696 * Returns 0, this is not allowed to fail
2698 * The close entry point is called when an interface is de-activated
2699 * by the OS. The hardware is still under the drivers control, but
2700 * needs to be disabled. A global MAC reset is issued to stop the
2701 * hardware, and all transmit and receive resources are freed.
2703 static int ixgbevf_close(struct net_device
*netdev
)
2705 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2707 ixgbevf_down(adapter
);
2708 ixgbevf_free_irq(adapter
);
2710 ixgbevf_free_all_tx_resources(adapter
);
2711 ixgbevf_free_all_rx_resources(adapter
);
2716 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
)
2718 struct net_device
*dev
= adapter
->netdev
;
2720 if (!(adapter
->flags
& IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
))
2723 adapter
->flags
&= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
;
2725 /* if interface is down do nothing */
2726 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2727 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2730 /* Hardware has to reinitialize queues and interrupts to
2731 * match packet buffer alignment. Unfortunately, the
2732 * hardware is not flexible enough to do this dynamically.
2734 if (netif_running(dev
))
2737 ixgbevf_clear_interrupt_scheme(adapter
);
2738 ixgbevf_init_interrupt_scheme(adapter
);
2740 if (netif_running(dev
))
2744 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring
*tx_ring
,
2745 u32 vlan_macip_lens
, u32 type_tucmd
,
2748 struct ixgbe_adv_tx_context_desc
*context_desc
;
2749 u16 i
= tx_ring
->next_to_use
;
2751 context_desc
= IXGBEVF_TX_CTXTDESC(tx_ring
, i
);
2754 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
2756 /* set bits to identify this as an advanced context descriptor */
2757 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
2759 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2760 context_desc
->seqnum_seed
= 0;
2761 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
2762 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
2765 static int ixgbevf_tso(struct ixgbevf_ring
*tx_ring
,
2766 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
2768 u32 vlan_macip_lens
, type_tucmd
;
2769 u32 mss_l4len_idx
, l4len
;
2771 if (!skb_is_gso(skb
))
2774 if (skb_header_cloned(skb
)) {
2775 int err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2780 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2781 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2783 if (skb
->protocol
== htons(ETH_P_IP
)) {
2784 struct iphdr
*iph
= ip_hdr(skb
);
2787 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2791 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2792 } else if (skb_is_gso_v6(skb
)) {
2793 ipv6_hdr(skb
)->payload_len
= 0;
2794 tcp_hdr(skb
)->check
=
2795 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2796 &ipv6_hdr(skb
)->daddr
,
2800 /* compute header lengths */
2801 l4len
= tcp_hdrlen(skb
);
2803 *hdr_len
= skb_transport_offset(skb
) + l4len
;
2805 /* mss_l4len_id: use 1 as index for TSO */
2806 mss_l4len_idx
= l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
;
2807 mss_l4len_idx
|= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
2808 mss_l4len_idx
|= 1 << IXGBE_ADVTXD_IDX_SHIFT
;
2810 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2811 vlan_macip_lens
= skb_network_header_len(skb
);
2812 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
2813 vlan_macip_lens
|= tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
2815 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
2816 type_tucmd
, mss_l4len_idx
);
2821 static bool ixgbevf_tx_csum(struct ixgbevf_ring
*tx_ring
,
2822 struct sk_buff
*skb
, u32 tx_flags
)
2824 u32 vlan_macip_lens
= 0;
2825 u32 mss_l4len_idx
= 0;
2828 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2830 switch (skb
->protocol
) {
2831 case __constant_htons(ETH_P_IP
):
2832 vlan_macip_lens
|= skb_network_header_len(skb
);
2833 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2834 l4_hdr
= ip_hdr(skb
)->protocol
;
2836 case __constant_htons(ETH_P_IPV6
):
2837 vlan_macip_lens
|= skb_network_header_len(skb
);
2838 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
2841 if (unlikely(net_ratelimit())) {
2842 dev_warn(tx_ring
->dev
,
2843 "partial checksum but proto=%x!\n",
2851 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2852 mss_l4len_idx
= tcp_hdrlen(skb
) <<
2853 IXGBE_ADVTXD_L4LEN_SHIFT
;
2856 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
2857 mss_l4len_idx
= sizeof(struct sctphdr
) <<
2858 IXGBE_ADVTXD_L4LEN_SHIFT
;
2861 mss_l4len_idx
= sizeof(struct udphdr
) <<
2862 IXGBE_ADVTXD_L4LEN_SHIFT
;
2865 if (unlikely(net_ratelimit())) {
2866 dev_warn(tx_ring
->dev
,
2867 "partial checksum but l4 proto=%x!\n",
2874 /* vlan_macip_lens: MACLEN, VLAN tag */
2875 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
2876 vlan_macip_lens
|= tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
2878 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
2879 type_tucmd
, mss_l4len_idx
);
2881 return (skb
->ip_summed
== CHECKSUM_PARTIAL
);
2884 static int ixgbevf_tx_map(struct ixgbevf_ring
*tx_ring
,
2885 struct sk_buff
*skb
, u32 tx_flags
)
2887 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2889 unsigned int total
= skb
->len
;
2890 unsigned int offset
= 0, size
;
2892 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2896 i
= tx_ring
->next_to_use
;
2898 len
= min(skb_headlen(skb
), total
);
2900 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2901 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2903 tx_buffer_info
->length
= size
;
2904 tx_buffer_info
->mapped_as_page
= false;
2905 tx_buffer_info
->dma
= dma_map_single(tx_ring
->dev
,
2907 size
, DMA_TO_DEVICE
);
2908 if (dma_mapping_error(tx_ring
->dev
, tx_buffer_info
->dma
))
2916 if (i
== tx_ring
->count
)
2920 for (f
= 0; f
< nr_frags
; f
++) {
2921 const struct skb_frag_struct
*frag
;
2923 frag
= &skb_shinfo(skb
)->frags
[f
];
2924 len
= min((unsigned int)skb_frag_size(frag
), total
);
2928 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2929 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2931 tx_buffer_info
->length
= size
;
2932 tx_buffer_info
->dma
=
2933 skb_frag_dma_map(tx_ring
->dev
, frag
,
2934 offset
, size
, DMA_TO_DEVICE
);
2935 if (dma_mapping_error(tx_ring
->dev
,
2936 tx_buffer_info
->dma
))
2938 tx_buffer_info
->mapped_as_page
= true;
2945 if (i
== tx_ring
->count
)
2953 i
= tx_ring
->count
- 1;
2956 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
2961 dev_err(tx_ring
->dev
, "TX DMA map failed\n");
2963 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2964 tx_buffer_info
->dma
= 0;
2967 /* clear timestamp and dma mappings for remaining portion of packet */
2968 while (count
>= 0) {
2972 i
+= tx_ring
->count
;
2973 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2974 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
2980 static void ixgbevf_tx_queue(struct ixgbevf_ring
*tx_ring
, int tx_flags
,
2981 int count
, unsigned int first
, u32 paylen
,
2984 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
2985 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2986 u32 olinfo_status
= 0, cmd_type_len
= 0;
2989 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
2991 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
2993 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
2995 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2996 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
2998 if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
2999 olinfo_status
|= IXGBE_ADVTXD_POPTS_TXSM
;
3001 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
3002 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
3004 /* use index 1 context for tso */
3005 olinfo_status
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
3006 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3007 olinfo_status
|= IXGBE_ADVTXD_POPTS_IXSM
;
3011 * Check Context must be set if Tx switch is enabled, which it
3012 * always is for case where virtual functions are running
3014 olinfo_status
|= IXGBE_ADVTXD_CC
;
3016 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
3018 i
= tx_ring
->next_to_use
;
3020 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3021 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
3022 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
3023 tx_desc
->read
.cmd_type_len
=
3024 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
3025 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3027 if (i
== tx_ring
->count
)
3031 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
3033 tx_ring
->tx_buffer_info
[first
].time_stamp
= jiffies
;
3035 /* Force memory writes to complete before letting h/w
3036 * know there are new descriptors to fetch. (Only
3037 * applicable for weak-ordered memory model archs,
3042 tx_ring
->tx_buffer_info
[first
].next_to_watch
= tx_desc
;
3043 tx_ring
->next_to_use
= i
;
3046 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3048 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3049 /* Herbert's original patch had:
3050 * smp_mb__after_netif_stop_queue();
3051 * but since that doesn't exist yet, just open code it. */
3054 /* We need to check again in a case another CPU has just
3055 * made room available. */
3056 if (likely(ixgbevf_desc_unused(tx_ring
) < size
))
3059 /* A reprieve! - use start_queue because it doesn't call schedule */
3060 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3061 ++tx_ring
->tx_stats
.restart_queue
;
3066 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3068 if (likely(ixgbevf_desc_unused(tx_ring
) >= size
))
3070 return __ixgbevf_maybe_stop_tx(tx_ring
, size
);
3073 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3075 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3076 struct ixgbevf_ring
*tx_ring
;
3078 unsigned int tx_flags
= 0;
3081 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
3082 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3085 u8
*dst_mac
= skb_header_pointer(skb
, 0, 0, NULL
);
3086 if (!dst_mac
|| is_link_local_ether_addr(dst_mac
)) {
3088 return NETDEV_TX_OK
;
3091 tx_ring
= adapter
->tx_ring
[r_idx
];
3094 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3095 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3096 * + 2 desc gap to keep tail from touching head,
3097 * + 1 desc for context descriptor,
3098 * otherwise try next time
3100 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3101 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3102 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3104 count
+= skb_shinfo(skb
)->nr_frags
;
3106 if (ixgbevf_maybe_stop_tx(tx_ring
, count
+ 3)) {
3107 tx_ring
->tx_stats
.tx_busy
++;
3108 return NETDEV_TX_BUSY
;
3111 if (vlan_tx_tag_present(skb
)) {
3112 tx_flags
|= vlan_tx_tag_get(skb
);
3113 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3114 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3117 first
= tx_ring
->next_to_use
;
3119 if (skb
->protocol
== htons(ETH_P_IP
))
3120 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
3121 tso
= ixgbevf_tso(tx_ring
, skb
, tx_flags
, &hdr_len
);
3123 dev_kfree_skb_any(skb
);
3124 return NETDEV_TX_OK
;
3128 tx_flags
|= IXGBE_TX_FLAGS_TSO
| IXGBE_TX_FLAGS_CSUM
;
3129 else if (ixgbevf_tx_csum(tx_ring
, skb
, tx_flags
))
3130 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3132 ixgbevf_tx_queue(tx_ring
, tx_flags
,
3133 ixgbevf_tx_map(tx_ring
, skb
, tx_flags
),
3134 first
, skb
->len
, hdr_len
);
3136 writel(tx_ring
->next_to_use
, tx_ring
->tail
);
3138 ixgbevf_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
3140 return NETDEV_TX_OK
;
3144 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3145 * @netdev: network interface device structure
3146 * @p: pointer to an address structure
3148 * Returns 0 on success, negative on failure
3150 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3152 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3153 struct ixgbe_hw
*hw
= &adapter
->hw
;
3154 struct sockaddr
*addr
= p
;
3156 if (!is_valid_ether_addr(addr
->sa_data
))
3157 return -EADDRNOTAVAIL
;
3159 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3160 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3162 spin_lock_bh(&adapter
->mbx_lock
);
3164 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
3166 spin_unlock_bh(&adapter
->mbx_lock
);
3172 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3173 * @netdev: network interface device structure
3174 * @new_mtu: new value for maximum frame size
3176 * Returns 0 on success, negative on failure
3178 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3180 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3181 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3182 int max_possible_frame
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3184 switch (adapter
->hw
.api_version
) {
3185 case ixgbe_mbox_api_11
:
3186 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3189 if (adapter
->hw
.mac
.type
== ixgbe_mac_X540_vf
)
3190 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3194 /* MTU < 68 is an error and causes problems on some kernels */
3195 if ((new_mtu
< 68) || (max_frame
> max_possible_frame
))
3198 hw_dbg(&adapter
->hw
, "changing MTU from %d to %d\n",
3199 netdev
->mtu
, new_mtu
);
3200 /* must set new MTU before calling down or up */
3201 netdev
->mtu
= new_mtu
;
3203 if (netif_running(netdev
))
3204 ixgbevf_reinit_locked(adapter
);
3209 static int ixgbevf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3211 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3212 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3217 netif_device_detach(netdev
);
3219 if (netif_running(netdev
)) {
3221 ixgbevf_down(adapter
);
3222 ixgbevf_free_irq(adapter
);
3223 ixgbevf_free_all_tx_resources(adapter
);
3224 ixgbevf_free_all_rx_resources(adapter
);
3228 ixgbevf_clear_interrupt_scheme(adapter
);
3231 retval
= pci_save_state(pdev
);
3236 pci_disable_device(pdev
);
3242 static int ixgbevf_resume(struct pci_dev
*pdev
)
3244 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3245 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3248 pci_set_power_state(pdev
, PCI_D0
);
3249 pci_restore_state(pdev
);
3251 * pci_restore_state clears dev->state_saved so call
3252 * pci_save_state to restore it.
3254 pci_save_state(pdev
);
3256 err
= pci_enable_device_mem(pdev
);
3258 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
3261 pci_set_master(pdev
);
3263 ixgbevf_reset(adapter
);
3266 err
= ixgbevf_init_interrupt_scheme(adapter
);
3269 dev_err(&pdev
->dev
, "Cannot initialize interrupts\n");
3273 if (netif_running(netdev
)) {
3274 err
= ixgbevf_open(netdev
);
3279 netif_device_attach(netdev
);
3284 #endif /* CONFIG_PM */
3285 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3287 ixgbevf_suspend(pdev
, PMSG_SUSPEND
);
3290 static struct rtnl_link_stats64
*ixgbevf_get_stats(struct net_device
*netdev
,
3291 struct rtnl_link_stats64
*stats
)
3293 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3296 const struct ixgbevf_ring
*ring
;
3299 ixgbevf_update_stats(adapter
);
3301 stats
->multicast
= adapter
->stats
.vfmprc
- adapter
->stats
.base_vfmprc
;
3303 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3304 ring
= adapter
->rx_ring
[i
];
3306 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
3307 bytes
= ring
->stats
.bytes
;
3308 packets
= ring
->stats
.packets
;
3309 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
3310 stats
->rx_bytes
+= bytes
;
3311 stats
->rx_packets
+= packets
;
3314 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3315 ring
= adapter
->tx_ring
[i
];
3317 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
3318 bytes
= ring
->stats
.bytes
;
3319 packets
= ring
->stats
.packets
;
3320 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
3321 stats
->tx_bytes
+= bytes
;
3322 stats
->tx_packets
+= packets
;
3328 static const struct net_device_ops ixgbevf_netdev_ops
= {
3329 .ndo_open
= ixgbevf_open
,
3330 .ndo_stop
= ixgbevf_close
,
3331 .ndo_start_xmit
= ixgbevf_xmit_frame
,
3332 .ndo_set_rx_mode
= ixgbevf_set_rx_mode
,
3333 .ndo_get_stats64
= ixgbevf_get_stats
,
3334 .ndo_validate_addr
= eth_validate_addr
,
3335 .ndo_set_mac_address
= ixgbevf_set_mac
,
3336 .ndo_change_mtu
= ixgbevf_change_mtu
,
3337 .ndo_tx_timeout
= ixgbevf_tx_timeout
,
3338 .ndo_vlan_rx_add_vid
= ixgbevf_vlan_rx_add_vid
,
3339 .ndo_vlan_rx_kill_vid
= ixgbevf_vlan_rx_kill_vid
,
3340 #ifdef CONFIG_NET_RX_BUSY_POLL
3341 .ndo_busy_poll
= ixgbevf_busy_poll_recv
,
3345 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3347 dev
->netdev_ops
= &ixgbevf_netdev_ops
;
3348 ixgbevf_set_ethtool_ops(dev
);
3349 dev
->watchdog_timeo
= 5 * HZ
;
3353 * ixgbevf_probe - Device Initialization Routine
3354 * @pdev: PCI device information struct
3355 * @ent: entry in ixgbevf_pci_tbl
3357 * Returns 0 on success, negative on failure
3359 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3360 * The OS initialization, configuring of the adapter private structure,
3361 * and a hardware reset occur.
3363 static int ixgbevf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3365 struct net_device
*netdev
;
3366 struct ixgbevf_adapter
*adapter
= NULL
;
3367 struct ixgbe_hw
*hw
= NULL
;
3368 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3369 static int cards_found
;
3370 int err
, pci_using_dac
;
3372 err
= pci_enable_device(pdev
);
3376 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
3379 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
3381 dev_err(&pdev
->dev
, "No usable DMA "
3382 "configuration, aborting\n");
3388 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
3390 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3394 pci_set_master(pdev
);
3396 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
3400 goto err_alloc_etherdev
;
3403 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3405 pci_set_drvdata(pdev
, netdev
);
3406 adapter
= netdev_priv(netdev
);
3408 adapter
->netdev
= netdev
;
3409 adapter
->pdev
= pdev
;
3412 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3415 * call save state here in standalone driver because it relies on
3416 * adapter struct to exist, and needs to call netdev_priv
3418 pci_save_state(pdev
);
3420 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3421 pci_resource_len(pdev
, 0));
3427 ixgbevf_assign_netdev_ops(netdev
);
3429 adapter
->bd_number
= cards_found
;
3432 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3433 hw
->mac
.type
= ii
->mac
;
3435 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
3436 sizeof(struct ixgbe_mbx_operations
));
3438 /* setup the private structure */
3439 err
= ixgbevf_sw_init(adapter
);
3443 /* The HW MAC address was set and/or determined in sw_init */
3444 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3445 pr_err("invalid MAC address\n");
3450 netdev
->hw_features
= NETIF_F_SG
|
3457 netdev
->features
= netdev
->hw_features
|
3458 NETIF_F_HW_VLAN_CTAG_TX
|
3459 NETIF_F_HW_VLAN_CTAG_RX
|
3460 NETIF_F_HW_VLAN_CTAG_FILTER
;
3462 netdev
->vlan_features
|= NETIF_F_TSO
;
3463 netdev
->vlan_features
|= NETIF_F_TSO6
;
3464 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3465 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3466 netdev
->vlan_features
|= NETIF_F_SG
;
3469 netdev
->features
|= NETIF_F_HIGHDMA
;
3471 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3473 init_timer(&adapter
->watchdog_timer
);
3474 adapter
->watchdog_timer
.function
= ixgbevf_watchdog
;
3475 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3477 INIT_WORK(&adapter
->reset_task
, ixgbevf_reset_task
);
3478 INIT_WORK(&adapter
->watchdog_task
, ixgbevf_watchdog_task
);
3480 err
= ixgbevf_init_interrupt_scheme(adapter
);
3484 strcpy(netdev
->name
, "eth%d");
3486 err
= register_netdev(netdev
);
3490 netif_carrier_off(netdev
);
3492 ixgbevf_init_last_counter_stats(adapter
);
3494 /* print the MAC address */
3495 hw_dbg(hw
, "%pM\n", netdev
->dev_addr
);
3497 hw_dbg(hw
, "MAC: %d\n", hw
->mac
.type
);
3499 hw_dbg(hw
, "Intel(R) 82599 Virtual Function\n");
3504 ixgbevf_clear_interrupt_scheme(adapter
);
3506 ixgbevf_reset_interrupt_capability(adapter
);
3507 iounmap(hw
->hw_addr
);
3509 free_netdev(netdev
);
3511 pci_release_regions(pdev
);
3514 pci_disable_device(pdev
);
3519 * ixgbevf_remove - Device Removal Routine
3520 * @pdev: PCI device information struct
3522 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3523 * that it should release a PCI device. The could be caused by a
3524 * Hot-Plug event, or because the driver is going to be removed from
3527 static void ixgbevf_remove(struct pci_dev
*pdev
)
3529 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3530 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3532 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
3534 del_timer_sync(&adapter
->watchdog_timer
);
3536 cancel_work_sync(&adapter
->reset_task
);
3537 cancel_work_sync(&adapter
->watchdog_task
);
3539 if (netdev
->reg_state
== NETREG_REGISTERED
)
3540 unregister_netdev(netdev
);
3542 ixgbevf_clear_interrupt_scheme(adapter
);
3543 ixgbevf_reset_interrupt_capability(adapter
);
3545 iounmap(adapter
->hw
.hw_addr
);
3546 pci_release_regions(pdev
);
3548 hw_dbg(&adapter
->hw
, "Remove complete\n");
3550 free_netdev(netdev
);
3552 pci_disable_device(pdev
);
3556 * ixgbevf_io_error_detected - called when PCI error is detected
3557 * @pdev: Pointer to PCI device
3558 * @state: The current pci connection state
3560 * This function is called after a PCI bus error affecting
3561 * this device has been detected.
3563 static pci_ers_result_t
ixgbevf_io_error_detected(struct pci_dev
*pdev
,
3564 pci_channel_state_t state
)
3566 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3567 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3569 netif_device_detach(netdev
);
3571 if (state
== pci_channel_io_perm_failure
)
3572 return PCI_ERS_RESULT_DISCONNECT
;
3574 if (netif_running(netdev
))
3575 ixgbevf_down(adapter
);
3577 pci_disable_device(pdev
);
3579 /* Request a slot slot reset. */
3580 return PCI_ERS_RESULT_NEED_RESET
;
3584 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3585 * @pdev: Pointer to PCI device
3587 * Restart the card from scratch, as if from a cold-boot. Implementation
3588 * resembles the first-half of the ixgbevf_resume routine.
3590 static pci_ers_result_t
ixgbevf_io_slot_reset(struct pci_dev
*pdev
)
3592 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3593 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3595 if (pci_enable_device_mem(pdev
)) {
3597 "Cannot re-enable PCI device after reset.\n");
3598 return PCI_ERS_RESULT_DISCONNECT
;
3601 pci_set_master(pdev
);
3603 ixgbevf_reset(adapter
);
3605 return PCI_ERS_RESULT_RECOVERED
;
3609 * ixgbevf_io_resume - called when traffic can start flowing again.
3610 * @pdev: Pointer to PCI device
3612 * This callback is called when the error recovery driver tells us that
3613 * its OK to resume normal operation. Implementation resembles the
3614 * second-half of the ixgbevf_resume routine.
3616 static void ixgbevf_io_resume(struct pci_dev
*pdev
)
3618 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3619 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3621 if (netif_running(netdev
))
3622 ixgbevf_up(adapter
);
3624 netif_device_attach(netdev
);
3627 /* PCI Error Recovery (ERS) */
3628 static const struct pci_error_handlers ixgbevf_err_handler
= {
3629 .error_detected
= ixgbevf_io_error_detected
,
3630 .slot_reset
= ixgbevf_io_slot_reset
,
3631 .resume
= ixgbevf_io_resume
,
3634 static struct pci_driver ixgbevf_driver
= {
3635 .name
= ixgbevf_driver_name
,
3636 .id_table
= ixgbevf_pci_tbl
,
3637 .probe
= ixgbevf_probe
,
3638 .remove
= ixgbevf_remove
,
3640 /* Power Management Hooks */
3641 .suspend
= ixgbevf_suspend
,
3642 .resume
= ixgbevf_resume
,
3644 .shutdown
= ixgbevf_shutdown
,
3645 .err_handler
= &ixgbevf_err_handler
3649 * ixgbevf_init_module - Driver Registration Routine
3651 * ixgbevf_init_module is the first routine called when the driver is
3652 * loaded. All it does is register with the PCI subsystem.
3654 static int __init
ixgbevf_init_module(void)
3657 pr_info("%s - version %s\n", ixgbevf_driver_string
,
3658 ixgbevf_driver_version
);
3660 pr_info("%s\n", ixgbevf_copyright
);
3662 ret
= pci_register_driver(&ixgbevf_driver
);
3666 module_init(ixgbevf_init_module
);
3669 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3671 * ixgbevf_exit_module is called just before the driver is removed
3674 static void __exit
ixgbevf_exit_module(void)
3676 pci_unregister_driver(&ixgbevf_driver
);
3681 * ixgbevf_get_hw_dev_name - return device name string
3682 * used by hardware layer to print debugging information
3684 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
3686 struct ixgbevf_adapter
*adapter
= hw
->back
;
3687 return adapter
->netdev
->name
;
3691 module_exit(ixgbevf_exit_module
);
3693 /* ixgbevf_main.c */