1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 const char ixgbevf_driver_name
[] = "ixgbevf";
58 static const char ixgbevf_driver_string
[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.12.1-k"
62 const char ixgbevf_driver_version
[] = DRV_VERSION
;
63 static char ixgbevf_copyright
[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
66 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
67 [board_82599_vf
] = &ixgbevf_82599_vf_info
,
68 [board_X540_vf
] = &ixgbevf_X540_vf_info
,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static const struct pci_device_id ixgbevf_pci_tbl
[] = {
80 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
), board_82599_vf
},
81 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF
), board_X540_vf
},
82 /* required last entry */
85 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION
);
92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93 static int debug
= -1;
94 module_param(debug
, int, 0);
95 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
98 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
);
99 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
);
100 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
);
102 static void ixgbevf_remove_adapter(struct ixgbe_hw
*hw
)
104 struct ixgbevf_adapter
*adapter
= hw
->back
;
109 dev_err(&adapter
->pdev
->dev
, "Adapter removed\n");
110 if (test_bit(__IXGBEVF_WORK_INIT
, &adapter
->state
))
111 schedule_work(&adapter
->watchdog_task
);
114 static void ixgbevf_check_remove(struct ixgbe_hw
*hw
, u32 reg
)
118 /* The following check not only optimizes a bit by not
119 * performing a read on the status register when the
120 * register just read was a status register read that
121 * returned IXGBE_FAILED_READ_REG. It also blocks any
122 * potential recursion.
124 if (reg
== IXGBE_VFSTATUS
) {
125 ixgbevf_remove_adapter(hw
);
128 value
= ixgbevf_read_reg(hw
, IXGBE_VFSTATUS
);
129 if (value
== IXGBE_FAILED_READ_REG
)
130 ixgbevf_remove_adapter(hw
);
133 u32
ixgbevf_read_reg(struct ixgbe_hw
*hw
, u32 reg
)
135 u8 __iomem
*reg_addr
= ACCESS_ONCE(hw
->hw_addr
);
138 if (IXGBE_REMOVED(reg_addr
))
139 return IXGBE_FAILED_READ_REG
;
140 value
= readl(reg_addr
+ reg
);
141 if (unlikely(value
== IXGBE_FAILED_READ_REG
))
142 ixgbevf_check_remove(hw
, reg
);
147 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
148 * @adapter: pointer to adapter struct
149 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
150 * @queue: queue to map the corresponding interrupt to
151 * @msix_vector: the vector to map to the corresponding queue
153 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
154 u8 queue
, u8 msix_vector
)
157 struct ixgbe_hw
*hw
= &adapter
->hw
;
158 if (direction
== -1) {
160 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
161 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
164 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
166 /* tx or rx causes */
167 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
168 index
= ((16 * (queue
& 1)) + (8 * direction
));
169 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
170 ivar
&= ~(0xFF << index
);
171 ivar
|= (msix_vector
<< index
);
172 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
176 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring
*tx_ring
,
177 struct ixgbevf_tx_buffer
*tx_buffer
)
179 if (tx_buffer
->skb
) {
180 dev_kfree_skb_any(tx_buffer
->skb
);
181 if (dma_unmap_len(tx_buffer
, len
))
182 dma_unmap_single(tx_ring
->dev
,
183 dma_unmap_addr(tx_buffer
, dma
),
184 dma_unmap_len(tx_buffer
, len
),
186 } else if (dma_unmap_len(tx_buffer
, len
)) {
187 dma_unmap_page(tx_ring
->dev
,
188 dma_unmap_addr(tx_buffer
, dma
),
189 dma_unmap_len(tx_buffer
, len
),
192 tx_buffer
->next_to_watch
= NULL
;
193 tx_buffer
->skb
= NULL
;
194 dma_unmap_len_set(tx_buffer
, len
, 0);
195 /* tx_buffer must be completely set up in the transmit path */
198 #define IXGBE_MAX_TXD_PWR 14
199 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
201 /* Tx Descriptors needed, worst case */
202 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
203 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
205 static void ixgbevf_tx_timeout(struct net_device
*netdev
);
208 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
209 * @q_vector: board private structure
210 * @tx_ring: tx ring to clean
212 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector
*q_vector
,
213 struct ixgbevf_ring
*tx_ring
)
215 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
216 struct ixgbevf_tx_buffer
*tx_buffer
;
217 union ixgbe_adv_tx_desc
*tx_desc
;
218 unsigned int total_bytes
= 0, total_packets
= 0;
219 unsigned int budget
= tx_ring
->count
/ 2;
220 unsigned int i
= tx_ring
->next_to_clean
;
222 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
225 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
226 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
230 union ixgbe_adv_tx_desc
*eop_desc
= tx_buffer
->next_to_watch
;
232 /* if next_to_watch is not set then there is no work pending */
236 /* prevent any other reads prior to eop_desc */
237 read_barrier_depends();
239 /* if DD is not set pending work has not been completed */
240 if (!(eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
243 /* clear next_to_watch to prevent false hangs */
244 tx_buffer
->next_to_watch
= NULL
;
246 /* update the statistics for this packet */
247 total_bytes
+= tx_buffer
->bytecount
;
248 total_packets
+= tx_buffer
->gso_segs
;
251 dev_kfree_skb_any(tx_buffer
->skb
);
253 /* unmap skb header data */
254 dma_unmap_single(tx_ring
->dev
,
255 dma_unmap_addr(tx_buffer
, dma
),
256 dma_unmap_len(tx_buffer
, len
),
259 /* clear tx_buffer data */
260 tx_buffer
->skb
= NULL
;
261 dma_unmap_len_set(tx_buffer
, len
, 0);
263 /* unmap remaining buffers */
264 while (tx_desc
!= eop_desc
) {
270 tx_buffer
= tx_ring
->tx_buffer_info
;
271 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
274 /* unmap any remaining paged data */
275 if (dma_unmap_len(tx_buffer
, len
)) {
276 dma_unmap_page(tx_ring
->dev
,
277 dma_unmap_addr(tx_buffer
, dma
),
278 dma_unmap_len(tx_buffer
, len
),
280 dma_unmap_len_set(tx_buffer
, len
, 0);
284 /* move us one more past the eop_desc for start of next pkt */
290 tx_buffer
= tx_ring
->tx_buffer_info
;
291 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
294 /* issue prefetch for next Tx descriptor */
297 /* update budget accounting */
299 } while (likely(budget
));
302 tx_ring
->next_to_clean
= i
;
303 u64_stats_update_begin(&tx_ring
->syncp
);
304 tx_ring
->stats
.bytes
+= total_bytes
;
305 tx_ring
->stats
.packets
+= total_packets
;
306 u64_stats_update_end(&tx_ring
->syncp
);
307 q_vector
->tx
.total_bytes
+= total_bytes
;
308 q_vector
->tx
.total_packets
+= total_packets
;
310 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
311 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
312 (ixgbevf_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
313 /* Make sure that anybody stopping the queue after this
314 * sees the new next_to_clean.
318 if (__netif_subqueue_stopped(tx_ring
->netdev
,
319 tx_ring
->queue_index
) &&
320 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
321 netif_wake_subqueue(tx_ring
->netdev
,
322 tx_ring
->queue_index
);
323 ++tx_ring
->tx_stats
.restart_queue
;
331 * ixgbevf_rx_skb - Helper function to determine proper Rx method
332 * @q_vector: structure containing interrupt and ring information
333 * @skb: packet to send up
335 static void ixgbevf_rx_skb(struct ixgbevf_q_vector
*q_vector
,
338 #ifdef CONFIG_NET_RX_BUSY_POLL
339 skb_mark_napi_id(skb
, &q_vector
->napi
);
341 if (ixgbevf_qv_busy_polling(q_vector
)) {
342 netif_receive_skb(skb
);
343 /* exit early if we busy polled */
346 #endif /* CONFIG_NET_RX_BUSY_POLL */
347 if (!(q_vector
->adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
))
348 napi_gro_receive(&q_vector
->napi
, skb
);
353 /* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
354 * @ring: structure containig ring specific data
355 * @rx_desc: current Rx descriptor being processed
356 * @skb: skb currently being received and modified
358 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring
*ring
,
359 union ixgbe_adv_rx_desc
*rx_desc
,
362 skb_checksum_none_assert(skb
);
364 /* Rx csum disabled */
365 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
368 /* if IP and error */
369 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_IPCS
) &&
370 ixgbevf_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_IPE
)) {
371 ring
->rx_stats
.csum_err
++;
375 if (!ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_L4CS
))
378 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_TCPE
)) {
379 ring
->rx_stats
.csum_err
++;
383 /* It must be a TCP or UDP packet with a valid checksum */
384 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
387 /* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
388 * @rx_ring: rx descriptor ring packet is being transacted on
389 * @rx_desc: pointer to the EOP Rx descriptor
390 * @skb: pointer to current skb being populated
392 * This function checks the ring, descriptor, and packet information in
393 * order to populate the checksum, VLAN, protocol, and other fields within
396 static void ixgbevf_process_skb_fields(struct ixgbevf_ring
*rx_ring
,
397 union ixgbe_adv_rx_desc
*rx_desc
,
400 ixgbevf_rx_checksum(rx_ring
, rx_desc
, skb
);
402 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_VP
)) {
403 u16 vid
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
404 unsigned long *active_vlans
= netdev_priv(rx_ring
->netdev
);
406 if (test_bit(vid
& VLAN_VID_MASK
, active_vlans
))
407 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
410 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
413 static bool ixgbevf_alloc_mapped_skb(struct ixgbevf_ring
*rx_ring
,
414 struct ixgbevf_rx_buffer
*bi
)
416 struct sk_buff
*skb
= bi
->skb
;
417 dma_addr_t dma
= bi
->dma
;
422 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
423 rx_ring
->rx_buf_len
);
424 if (unlikely(!skb
)) {
425 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
429 dma
= dma_map_single(rx_ring
->dev
, skb
->data
,
430 rx_ring
->rx_buf_len
, DMA_FROM_DEVICE
);
432 /* if mapping failed free memory back to system since
433 * there isn't much point in holding memory we can't use
435 if (dma_mapping_error(rx_ring
->dev
, dma
)) {
436 dev_kfree_skb_any(skb
);
438 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
449 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
450 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
451 * @cleaned_count: number of buffers to replace
453 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring
*rx_ring
,
456 union ixgbe_adv_rx_desc
*rx_desc
;
457 struct ixgbevf_rx_buffer
*bi
;
458 unsigned int i
= rx_ring
->next_to_use
;
460 /* nothing to do or no valid netdev defined */
461 if (!cleaned_count
|| !rx_ring
->netdev
)
464 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
465 bi
= &rx_ring
->rx_buffer_info
[i
];
469 if (!ixgbevf_alloc_mapped_skb(rx_ring
, bi
))
472 /* Refresh the desc even if pkt_addr didn't change
473 * because each write-back erases this info.
475 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
481 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, 0);
482 bi
= rx_ring
->rx_buffer_info
;
486 /* clear the hdr_addr for the next_to_use descriptor */
487 rx_desc
->read
.hdr_addr
= 0;
490 } while (cleaned_count
);
494 if (rx_ring
->next_to_use
!= i
) {
495 /* record the next descriptor to use */
496 rx_ring
->next_to_use
= i
;
498 /* Force memory writes to complete before letting h/w
499 * know there are new descriptors to fetch. (Only
500 * applicable for weak-ordered memory model archs,
504 ixgbevf_write_tail(rx_ring
, i
);
508 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
511 struct ixgbe_hw
*hw
= &adapter
->hw
;
513 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, qmask
);
516 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
517 struct ixgbevf_ring
*rx_ring
,
521 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
522 u16 cleaned_count
= ixgbevf_desc_unused(rx_ring
);
524 i
= rx_ring
->next_to_clean
;
527 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
528 struct ixgbevf_rx_buffer
*rx_buffer
;
531 /* return some buffers to hardware, one at a time is too slow */
532 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
533 ixgbevf_alloc_rx_buffers(rx_ring
, cleaned_count
);
537 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
538 rx_buffer
= &rx_ring
->rx_buffer_info
[i
];
540 if (!ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_DD
))
543 /* This memory barrier is needed to keep us from reading
544 * any other fields out of the rx_desc until we know the
545 * RXD_STAT_DD bit is set
549 skb
= rx_buffer
->skb
;
552 /* pull the header of the skb in */
553 __skb_put(skb
, le16_to_cpu(rx_desc
->wb
.upper
.length
));
555 dma_unmap_single(rx_ring
->dev
, rx_buffer
->dma
,
559 /* clear skb reference in buffer info structure */
560 rx_buffer
->skb
= NULL
;
566 if (i
== rx_ring
->count
)
569 next_rxd
= IXGBEVF_RX_DESC(rx_ring
, i
);
572 if (!(ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_EOP
))) {
573 skb
->next
= rx_ring
->rx_buffer_info
[i
].skb
;
574 IXGBE_CB(skb
->next
)->prev
= skb
;
575 rx_ring
->rx_stats
.non_eop_descs
++;
579 /* we should not be chaining buffers, if we did drop the skb */
580 if (IXGBE_CB(skb
)->prev
) {
582 struct sk_buff
*this = skb
;
583 skb
= IXGBE_CB(skb
)->prev
;
589 /* ERR_MASK will only have valid bits if EOP set */
590 if (unlikely(ixgbevf_test_staterr(rx_desc
,
591 IXGBE_RXDADV_ERR_FRAME_ERR_MASK
))) {
592 dev_kfree_skb_irq(skb
);
596 /* probably a little skewed due to removing CRC */
597 total_rx_bytes
+= skb
->len
;
600 /* Workaround hardware that can't do proper VEPA multicast
603 if ((skb
->pkt_type
== PACKET_BROADCAST
||
604 skb
->pkt_type
== PACKET_MULTICAST
) &&
605 ether_addr_equal(rx_ring
->netdev
->dev_addr
,
606 eth_hdr(skb
)->h_source
)) {
607 dev_kfree_skb_irq(skb
);
611 /* populate checksum, VLAN, and protocol */
612 ixgbevf_process_skb_fields(rx_ring
, rx_desc
, skb
);
614 ixgbevf_rx_skb(q_vector
, skb
);
616 /* update budget accounting */
618 } while (likely(budget
));
620 rx_ring
->next_to_clean
= i
;
621 u64_stats_update_begin(&rx_ring
->syncp
);
622 rx_ring
->stats
.packets
+= total_rx_packets
;
623 rx_ring
->stats
.bytes
+= total_rx_bytes
;
624 u64_stats_update_end(&rx_ring
->syncp
);
625 q_vector
->rx
.total_packets
+= total_rx_packets
;
626 q_vector
->rx
.total_bytes
+= total_rx_bytes
;
629 ixgbevf_alloc_rx_buffers(rx_ring
, cleaned_count
);
631 return total_rx_packets
;
635 * ixgbevf_poll - NAPI polling calback
636 * @napi: napi struct with our devices info in it
637 * @budget: amount of work driver is allowed to do this pass, in packets
639 * This function will clean more than one or more rings associated with a
642 static int ixgbevf_poll(struct napi_struct
*napi
, int budget
)
644 struct ixgbevf_q_vector
*q_vector
=
645 container_of(napi
, struct ixgbevf_q_vector
, napi
);
646 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
647 struct ixgbevf_ring
*ring
;
649 bool clean_complete
= true;
651 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
652 clean_complete
&= ixgbevf_clean_tx_irq(q_vector
, ring
);
654 #ifdef CONFIG_NET_RX_BUSY_POLL
655 if (!ixgbevf_qv_lock_napi(q_vector
))
659 /* attempt to distribute budget to each queue fairly, but don't allow
660 * the budget to go below 1 because we'll exit polling */
661 if (q_vector
->rx
.count
> 1)
662 per_ring_budget
= max(budget
/q_vector
->rx
.count
, 1);
664 per_ring_budget
= budget
;
666 adapter
->flags
|= IXGBE_FLAG_IN_NETPOLL
;
667 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
668 clean_complete
&= (ixgbevf_clean_rx_irq(q_vector
, ring
,
671 adapter
->flags
&= ~IXGBE_FLAG_IN_NETPOLL
;
673 #ifdef CONFIG_NET_RX_BUSY_POLL
674 ixgbevf_qv_unlock_napi(q_vector
);
677 /* If all work not completed, return budget and keep polling */
680 /* all work done, exit the polling mode */
682 if (adapter
->rx_itr_setting
& 1)
683 ixgbevf_set_itr(q_vector
);
684 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
685 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
))
686 ixgbevf_irq_enable_queues(adapter
,
687 1 << q_vector
->v_idx
);
693 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
694 * @q_vector: structure containing interrupt and ring information
696 void ixgbevf_write_eitr(struct ixgbevf_q_vector
*q_vector
)
698 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
699 struct ixgbe_hw
*hw
= &adapter
->hw
;
700 int v_idx
= q_vector
->v_idx
;
701 u32 itr_reg
= q_vector
->itr
& IXGBE_MAX_EITR
;
704 * set the WDIS bit to not clear the timer bits and cause an
705 * immediate assertion of the interrupt
707 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
709 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
712 #ifdef CONFIG_NET_RX_BUSY_POLL
713 /* must be called with local_bh_disable()d */
714 static int ixgbevf_busy_poll_recv(struct napi_struct
*napi
)
716 struct ixgbevf_q_vector
*q_vector
=
717 container_of(napi
, struct ixgbevf_q_vector
, napi
);
718 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
719 struct ixgbevf_ring
*ring
;
722 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
723 return LL_FLUSH_FAILED
;
725 if (!ixgbevf_qv_lock_poll(q_vector
))
726 return LL_FLUSH_BUSY
;
728 ixgbevf_for_each_ring(ring
, q_vector
->rx
) {
729 found
= ixgbevf_clean_rx_irq(q_vector
, ring
, 4);
730 #ifdef BP_EXTENDED_STATS
732 ring
->stats
.cleaned
+= found
;
734 ring
->stats
.misses
++;
740 ixgbevf_qv_unlock_poll(q_vector
);
744 #endif /* CONFIG_NET_RX_BUSY_POLL */
747 * ixgbevf_configure_msix - Configure MSI-X hardware
748 * @adapter: board private structure
750 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
753 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
755 struct ixgbevf_q_vector
*q_vector
;
756 int q_vectors
, v_idx
;
758 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
759 adapter
->eims_enable_mask
= 0;
762 * Populate the IVAR table and set the ITR values to the
763 * corresponding register.
765 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
766 struct ixgbevf_ring
*ring
;
767 q_vector
= adapter
->q_vector
[v_idx
];
769 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
770 ixgbevf_set_ivar(adapter
, 0, ring
->reg_idx
, v_idx
);
772 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
773 ixgbevf_set_ivar(adapter
, 1, ring
->reg_idx
, v_idx
);
775 if (q_vector
->tx
.ring
&& !q_vector
->rx
.ring
) {
777 if (adapter
->tx_itr_setting
== 1)
778 q_vector
->itr
= IXGBE_10K_ITR
;
780 q_vector
->itr
= adapter
->tx_itr_setting
;
782 /* rx or rx/tx vector */
783 if (adapter
->rx_itr_setting
== 1)
784 q_vector
->itr
= IXGBE_20K_ITR
;
786 q_vector
->itr
= adapter
->rx_itr_setting
;
789 /* add q_vector eims value to global eims_enable_mask */
790 adapter
->eims_enable_mask
|= 1 << v_idx
;
792 ixgbevf_write_eitr(q_vector
);
795 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
796 /* setup eims_other and add value to global eims_enable_mask */
797 adapter
->eims_other
= 1 << v_idx
;
798 adapter
->eims_enable_mask
|= adapter
->eims_other
;
805 latency_invalid
= 255
809 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
810 * @q_vector: structure containing interrupt and ring information
811 * @ring_container: structure containing ring performance data
813 * Stores a new ITR value based on packets and byte
814 * counts during the last interrupt. The advantage of per interrupt
815 * computation is faster updates and more accurate ITR for the current
816 * traffic pattern. Constants in this function were computed
817 * based on theoretical maximum wire speed and thresholds were set based
818 * on testing data as well as attempting to minimize response time
819 * while increasing bulk throughput.
821 static void ixgbevf_update_itr(struct ixgbevf_q_vector
*q_vector
,
822 struct ixgbevf_ring_container
*ring_container
)
824 int bytes
= ring_container
->total_bytes
;
825 int packets
= ring_container
->total_packets
;
828 u8 itr_setting
= ring_container
->itr
;
833 /* simple throttlerate management
834 * 0-20MB/s lowest (100000 ints/s)
835 * 20-100MB/s low (20000 ints/s)
836 * 100-1249MB/s bulk (8000 ints/s)
838 /* what was last interrupt timeslice? */
839 timepassed_us
= q_vector
->itr
>> 2;
840 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
842 switch (itr_setting
) {
844 if (bytes_perint
> 10)
845 itr_setting
= low_latency
;
848 if (bytes_perint
> 20)
849 itr_setting
= bulk_latency
;
850 else if (bytes_perint
<= 10)
851 itr_setting
= lowest_latency
;
854 if (bytes_perint
<= 20)
855 itr_setting
= low_latency
;
859 /* clear work counters since we have the values we need */
860 ring_container
->total_bytes
= 0;
861 ring_container
->total_packets
= 0;
863 /* write updated itr to ring container */
864 ring_container
->itr
= itr_setting
;
867 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
)
869 u32 new_itr
= q_vector
->itr
;
872 ixgbevf_update_itr(q_vector
, &q_vector
->tx
);
873 ixgbevf_update_itr(q_vector
, &q_vector
->rx
);
875 current_itr
= max(q_vector
->rx
.itr
, q_vector
->tx
.itr
);
877 switch (current_itr
) {
878 /* counts and packets in update_itr are dependent on these numbers */
880 new_itr
= IXGBE_100K_ITR
;
883 new_itr
= IXGBE_20K_ITR
;
887 new_itr
= IXGBE_8K_ITR
;
891 if (new_itr
!= q_vector
->itr
) {
892 /* do an exponential smoothing */
893 new_itr
= (10 * new_itr
* q_vector
->itr
) /
894 ((9 * new_itr
) + q_vector
->itr
);
896 /* save the algorithm value here */
897 q_vector
->itr
= new_itr
;
899 ixgbevf_write_eitr(q_vector
);
903 static irqreturn_t
ixgbevf_msix_other(int irq
, void *data
)
905 struct ixgbevf_adapter
*adapter
= data
;
906 struct ixgbe_hw
*hw
= &adapter
->hw
;
908 hw
->mac
.get_link_status
= 1;
910 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
911 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
))
912 mod_timer(&adapter
->watchdog_timer
, jiffies
);
914 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_other
);
920 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
922 * @data: pointer to our q_vector struct for this interrupt vector
924 static irqreturn_t
ixgbevf_msix_clean_rings(int irq
, void *data
)
926 struct ixgbevf_q_vector
*q_vector
= data
;
928 /* EIAM disabled interrupts (on this vector) for us */
929 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
930 napi_schedule(&q_vector
->napi
);
935 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
938 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
940 a
->rx_ring
[r_idx
]->next
= q_vector
->rx
.ring
;
941 q_vector
->rx
.ring
= a
->rx_ring
[r_idx
];
942 q_vector
->rx
.count
++;
945 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
948 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
950 a
->tx_ring
[t_idx
]->next
= q_vector
->tx
.ring
;
951 q_vector
->tx
.ring
= a
->tx_ring
[t_idx
];
952 q_vector
->tx
.count
++;
956 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
957 * @adapter: board private structure to initialize
959 * This function maps descriptor rings to the queue-specific vectors
960 * we were allotted through the MSI-X enabling code. Ideally, we'd have
961 * one vector per ring/queue, but on a constrained vector budget, we
962 * group the rings as "efficiently" as possible. You would add new
963 * mapping configurations in here.
965 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
969 int rxr_idx
= 0, txr_idx
= 0;
970 int rxr_remaining
= adapter
->num_rx_queues
;
971 int txr_remaining
= adapter
->num_tx_queues
;
976 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
979 * The ideal configuration...
980 * We have enough vectors to map one per queue.
982 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
983 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
984 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
986 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
987 map_vector_to_txq(adapter
, v_start
, txr_idx
);
992 * If we don't have enough vectors for a 1-to-1
993 * mapping, we'll have to group them so there are
994 * multiple queues per vector.
996 /* Re-adjusting *qpv takes care of the remainder. */
997 for (i
= v_start
; i
< q_vectors
; i
++) {
998 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
999 for (j
= 0; j
< rqpv
; j
++) {
1000 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1005 for (i
= v_start
; i
< q_vectors
; i
++) {
1006 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
1007 for (j
= 0; j
< tqpv
; j
++) {
1008 map_vector_to_txq(adapter
, i
, txr_idx
);
1019 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1020 * @adapter: board private structure
1022 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1023 * interrupts from the kernel.
1025 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
1027 struct net_device
*netdev
= adapter
->netdev
;
1028 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1032 for (vector
= 0; vector
< q_vectors
; vector
++) {
1033 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[vector
];
1034 struct msix_entry
*entry
= &adapter
->msix_entries
[vector
];
1036 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
1037 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1038 "%s-%s-%d", netdev
->name
, "TxRx", ri
++);
1040 } else if (q_vector
->rx
.ring
) {
1041 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1042 "%s-%s-%d", netdev
->name
, "rx", ri
++);
1043 } else if (q_vector
->tx
.ring
) {
1044 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
1045 "%s-%s-%d", netdev
->name
, "tx", ti
++);
1047 /* skip this unused q_vector */
1050 err
= request_irq(entry
->vector
, &ixgbevf_msix_clean_rings
, 0,
1051 q_vector
->name
, q_vector
);
1053 hw_dbg(&adapter
->hw
,
1054 "request_irq failed for MSIX interrupt "
1055 "Error: %d\n", err
);
1056 goto free_queue_irqs
;
1060 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1061 &ixgbevf_msix_other
, 0, netdev
->name
, adapter
);
1063 hw_dbg(&adapter
->hw
,
1064 "request_irq for msix_other failed: %d\n", err
);
1065 goto free_queue_irqs
;
1073 free_irq(adapter
->msix_entries
[vector
].vector
,
1074 adapter
->q_vector
[vector
]);
1076 /* This failure is non-recoverable - it indicates the system is
1077 * out of MSIX vector resources and the VF driver cannot run
1078 * without them. Set the number of msix vectors to zero
1079 * indicating that not enough can be allocated. The error
1080 * will be returned to the user indicating device open failed.
1081 * Any further attempts to force the driver to open will also
1082 * fail. The only way to recover is to unload the driver and
1083 * reload it again. If the system has recovered some MSIX
1084 * vectors then it may succeed.
1086 adapter
->num_msix_vectors
= 0;
1090 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
1092 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1094 for (i
= 0; i
< q_vectors
; i
++) {
1095 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
1096 q_vector
->rx
.ring
= NULL
;
1097 q_vector
->tx
.ring
= NULL
;
1098 q_vector
->rx
.count
= 0;
1099 q_vector
->tx
.count
= 0;
1104 * ixgbevf_request_irq - initialize interrupts
1105 * @adapter: board private structure
1107 * Attempts to configure interrupts using the best available
1108 * capabilities of the hardware and kernel.
1110 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
1114 err
= ixgbevf_request_msix_irqs(adapter
);
1117 hw_dbg(&adapter
->hw
,
1118 "request_irq failed, Error %d\n", err
);
1123 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1127 q_vectors
= adapter
->num_msix_vectors
;
1130 free_irq(adapter
->msix_entries
[i
].vector
, adapter
);
1133 for (; i
>= 0; i
--) {
1134 /* free only the irqs that were actually requested */
1135 if (!adapter
->q_vector
[i
]->rx
.ring
&&
1136 !adapter
->q_vector
[i
]->tx
.ring
)
1139 free_irq(adapter
->msix_entries
[i
].vector
,
1140 adapter
->q_vector
[i
]);
1143 ixgbevf_reset_q_vectors(adapter
);
1147 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1148 * @adapter: board private structure
1150 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1152 struct ixgbe_hw
*hw
= &adapter
->hw
;
1155 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, 0);
1156 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1157 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, 0);
1159 IXGBE_WRITE_FLUSH(hw
);
1161 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1162 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1166 * ixgbevf_irq_enable - Enable default interrupt generation settings
1167 * @adapter: board private structure
1169 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
)
1171 struct ixgbe_hw
*hw
= &adapter
->hw
;
1173 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, adapter
->eims_enable_mask
);
1174 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, adapter
->eims_enable_mask
);
1175 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_enable_mask
);
1179 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1180 * @adapter: board private structure
1181 * @ring: structure containing ring specific data
1183 * Configure the Tx descriptor ring after a reset.
1185 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter
*adapter
,
1186 struct ixgbevf_ring
*ring
)
1188 struct ixgbe_hw
*hw
= &adapter
->hw
;
1189 u64 tdba
= ring
->dma
;
1191 u32 txdctl
= IXGBE_TXDCTL_ENABLE
;
1192 u8 reg_idx
= ring
->reg_idx
;
1194 /* disable queue to avoid issues while updating state */
1195 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), IXGBE_TXDCTL_SWFLSH
);
1196 IXGBE_WRITE_FLUSH(hw
);
1198 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(reg_idx
), tdba
& DMA_BIT_MASK(32));
1199 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(reg_idx
), tdba
>> 32);
1200 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(reg_idx
),
1201 ring
->count
* sizeof(union ixgbe_adv_tx_desc
));
1203 /* disable head writeback */
1204 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAH(reg_idx
), 0);
1205 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAL(reg_idx
), 0);
1207 /* enable relaxed ordering */
1208 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(reg_idx
),
1209 (IXGBE_DCA_TXCTRL_DESC_RRO_EN
|
1210 IXGBE_DCA_TXCTRL_DATA_RRO_EN
));
1212 /* reset head and tail pointers */
1213 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(reg_idx
), 0);
1214 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(reg_idx
), 0);
1215 ring
->tail
= adapter
->io_addr
+ IXGBE_VFTDT(reg_idx
);
1217 /* reset ntu and ntc to place SW in sync with hardwdare */
1218 ring
->next_to_clean
= 0;
1219 ring
->next_to_use
= 0;
1221 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1222 * to or less than the number of on chip descriptors, which is
1225 txdctl
|= (8 << 16); /* WTHRESH = 8 */
1227 /* Setting PTHRESH to 32 both improves performance */
1228 txdctl
|= (1 << 8) | /* HTHRESH = 1 */
1229 32; /* PTHRESH = 32 */
1231 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), txdctl
);
1233 /* poll to verify queue is enabled */
1235 usleep_range(1000, 2000);
1236 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(reg_idx
));
1237 } while (--wait_loop
&& !(txdctl
& IXGBE_TXDCTL_ENABLE
));
1239 pr_err("Could not enable Tx Queue %d\n", reg_idx
);
1243 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1244 * @adapter: board private structure
1246 * Configure the Tx unit of the MAC after a reset.
1248 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1252 /* Setup the HW Tx Head and Tail descriptor pointers */
1253 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1254 ixgbevf_configure_tx_ring(adapter
, adapter
->tx_ring
[i
]);
1257 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1259 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1261 struct ixgbevf_ring
*rx_ring
;
1262 struct ixgbe_hw
*hw
= &adapter
->hw
;
1265 rx_ring
= adapter
->rx_ring
[index
];
1267 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1269 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1271 srrctl
|= ALIGN(rx_ring
->rx_buf_len
, 1024) >>
1272 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1274 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1277 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter
*adapter
)
1279 struct ixgbe_hw
*hw
= &adapter
->hw
;
1281 /* PSRTYPE must be initialized in 82599 */
1282 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
| IXGBE_PSRTYPE_UDPHDR
|
1283 IXGBE_PSRTYPE_IPV4HDR
| IXGBE_PSRTYPE_IPV6HDR
|
1284 IXGBE_PSRTYPE_L2HDR
;
1286 if (adapter
->num_rx_queues
> 1)
1289 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, psrtype
);
1292 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter
*adapter
)
1294 struct ixgbe_hw
*hw
= &adapter
->hw
;
1295 struct net_device
*netdev
= adapter
->netdev
;
1296 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1300 /* notify the PF of our intent to use this size of frame */
1301 ixgbevf_rlpml_set_vf(hw
, max_frame
);
1303 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1304 max_frame
+= VLAN_HLEN
;
1307 * Allocate buffer sizes that fit well into 32K and
1308 * take into account max frame size of 9.5K
1310 if ((hw
->mac
.type
== ixgbe_mac_X540_vf
) &&
1311 (max_frame
<= MAXIMUM_ETHERNET_VLAN_SIZE
))
1312 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1313 else if (max_frame
<= IXGBEVF_RXBUFFER_2K
)
1314 rx_buf_len
= IXGBEVF_RXBUFFER_2K
;
1315 else if (max_frame
<= IXGBEVF_RXBUFFER_4K
)
1316 rx_buf_len
= IXGBEVF_RXBUFFER_4K
;
1317 else if (max_frame
<= IXGBEVF_RXBUFFER_8K
)
1318 rx_buf_len
= IXGBEVF_RXBUFFER_8K
;
1320 rx_buf_len
= IXGBEVF_RXBUFFER_10K
;
1322 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1323 adapter
->rx_ring
[i
]->rx_buf_len
= rx_buf_len
;
1326 #define IXGBEVF_MAX_RX_DESC_POLL 10
1327 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter
*adapter
,
1328 struct ixgbevf_ring
*ring
)
1330 struct ixgbe_hw
*hw
= &adapter
->hw
;
1331 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1333 u8 reg_idx
= ring
->reg_idx
;
1335 if (IXGBE_REMOVED(hw
->hw_addr
))
1337 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1338 rxdctl
&= ~IXGBE_RXDCTL_ENABLE
;
1340 /* write value back with RXDCTL.ENABLE bit cleared */
1341 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1343 /* the hardware may take up to 100us to really disable the rx queue */
1346 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1347 } while (--wait_loop
&& (rxdctl
& IXGBE_RXDCTL_ENABLE
));
1350 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1354 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1355 struct ixgbevf_ring
*ring
)
1357 struct ixgbe_hw
*hw
= &adapter
->hw
;
1358 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1360 u8 reg_idx
= ring
->reg_idx
;
1362 if (IXGBE_REMOVED(hw
->hw_addr
))
1365 usleep_range(1000, 2000);
1366 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1367 } while (--wait_loop
&& !(rxdctl
& IXGBE_RXDCTL_ENABLE
));
1370 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1374 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter
*adapter
,
1375 struct ixgbevf_ring
*ring
)
1377 struct ixgbe_hw
*hw
= &adapter
->hw
;
1378 u64 rdba
= ring
->dma
;
1380 u8 reg_idx
= ring
->reg_idx
;
1382 /* disable queue to avoid issues while updating state */
1383 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1384 ixgbevf_disable_rx_queue(adapter
, ring
);
1386 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(reg_idx
), rdba
& DMA_BIT_MASK(32));
1387 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(reg_idx
), rdba
>> 32);
1388 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(reg_idx
),
1389 ring
->count
* sizeof(union ixgbe_adv_rx_desc
));
1391 /* enable relaxed ordering */
1392 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_RXCTRL(reg_idx
),
1393 IXGBE_DCA_RXCTRL_DESC_RRO_EN
);
1395 /* reset head and tail pointers */
1396 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(reg_idx
), 0);
1397 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(reg_idx
), 0);
1398 ring
->tail
= adapter
->io_addr
+ IXGBE_VFRDT(reg_idx
);
1400 /* reset ntu and ntc to place SW in sync with hardwdare */
1401 ring
->next_to_clean
= 0;
1402 ring
->next_to_use
= 0;
1404 ixgbevf_configure_srrctl(adapter
, reg_idx
);
1406 /* prevent DMA from exceeding buffer space available */
1407 rxdctl
&= ~IXGBE_RXDCTL_RLPMLMASK
;
1408 rxdctl
|= ring
->rx_buf_len
| IXGBE_RXDCTL_RLPML_EN
;
1409 rxdctl
|= IXGBE_RXDCTL_ENABLE
| IXGBE_RXDCTL_VME
;
1410 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1412 ixgbevf_rx_desc_queue_enable(adapter
, ring
);
1413 ixgbevf_alloc_rx_buffers(ring
, ixgbevf_desc_unused(ring
));
1417 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1418 * @adapter: board private structure
1420 * Configure the Rx unit of the MAC after a reset.
1422 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1426 ixgbevf_setup_psrtype(adapter
);
1428 /* set_rx_buffer_len must be called before ring initialization */
1429 ixgbevf_set_rx_buffer_len(adapter
);
1431 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1432 * the Base and Length of the Rx Descriptor Ring */
1433 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1434 ixgbevf_configure_rx_ring(adapter
, adapter
->rx_ring
[i
]);
1437 static int ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
,
1438 __be16 proto
, u16 vid
)
1440 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1441 struct ixgbe_hw
*hw
= &adapter
->hw
;
1444 spin_lock_bh(&adapter
->mbx_lock
);
1446 /* add VID to filter table */
1447 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1449 spin_unlock_bh(&adapter
->mbx_lock
);
1451 /* translate error return types so error makes sense */
1452 if (err
== IXGBE_ERR_MBX
)
1455 if (err
== IXGBE_ERR_INVALID_ARGUMENT
)
1458 set_bit(vid
, adapter
->active_vlans
);
1463 static int ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
,
1464 __be16 proto
, u16 vid
)
1466 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1467 struct ixgbe_hw
*hw
= &adapter
->hw
;
1468 int err
= -EOPNOTSUPP
;
1470 spin_lock_bh(&adapter
->mbx_lock
);
1472 /* remove VID from filter table */
1473 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1475 spin_unlock_bh(&adapter
->mbx_lock
);
1477 clear_bit(vid
, adapter
->active_vlans
);
1482 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1486 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1487 ixgbevf_vlan_rx_add_vid(adapter
->netdev
,
1488 htons(ETH_P_8021Q
), vid
);
1491 static int ixgbevf_write_uc_addr_list(struct net_device
*netdev
)
1493 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1494 struct ixgbe_hw
*hw
= &adapter
->hw
;
1497 if ((netdev_uc_count(netdev
)) > 10) {
1498 pr_err("Too many unicast filters - No Space\n");
1502 if (!netdev_uc_empty(netdev
)) {
1503 struct netdev_hw_addr
*ha
;
1504 netdev_for_each_uc_addr(ha
, netdev
) {
1505 hw
->mac
.ops
.set_uc_addr(hw
, ++count
, ha
->addr
);
1510 * If the list is empty then send message to PF driver to
1511 * clear all macvlans on this VF.
1513 hw
->mac
.ops
.set_uc_addr(hw
, 0, NULL
);
1520 * ixgbevf_set_rx_mode - Multicast and unicast set
1521 * @netdev: network interface device structure
1523 * The set_rx_method entry point is called whenever the multicast address
1524 * list, unicast address list or the network interface flags are updated.
1525 * This routine is responsible for configuring the hardware for proper
1526 * multicast mode and configuring requested unicast filters.
1528 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1530 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1531 struct ixgbe_hw
*hw
= &adapter
->hw
;
1533 spin_lock_bh(&adapter
->mbx_lock
);
1535 /* reprogram multicast list */
1536 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1538 ixgbevf_write_uc_addr_list(netdev
);
1540 spin_unlock_bh(&adapter
->mbx_lock
);
1543 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1546 struct ixgbevf_q_vector
*q_vector
;
1547 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1549 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1550 q_vector
= adapter
->q_vector
[q_idx
];
1551 #ifdef CONFIG_NET_RX_BUSY_POLL
1552 ixgbevf_qv_init_lock(adapter
->q_vector
[q_idx
]);
1554 napi_enable(&q_vector
->napi
);
1558 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1561 struct ixgbevf_q_vector
*q_vector
;
1562 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1564 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1565 q_vector
= adapter
->q_vector
[q_idx
];
1566 napi_disable(&q_vector
->napi
);
1567 #ifdef CONFIG_NET_RX_BUSY_POLL
1568 while (!ixgbevf_qv_disable(adapter
->q_vector
[q_idx
])) {
1569 pr_info("QV %d locked\n", q_idx
);
1570 usleep_range(1000, 20000);
1572 #endif /* CONFIG_NET_RX_BUSY_POLL */
1576 static int ixgbevf_configure_dcb(struct ixgbevf_adapter
*adapter
)
1578 struct ixgbe_hw
*hw
= &adapter
->hw
;
1579 unsigned int def_q
= 0;
1580 unsigned int num_tcs
= 0;
1581 unsigned int num_rx_queues
= 1;
1584 spin_lock_bh(&adapter
->mbx_lock
);
1586 /* fetch queue configuration from the PF */
1587 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
1589 spin_unlock_bh(&adapter
->mbx_lock
);
1595 /* update default Tx ring register index */
1596 adapter
->tx_ring
[0]->reg_idx
= def_q
;
1598 /* we need as many queues as traffic classes */
1599 num_rx_queues
= num_tcs
;
1602 /* if we have a bad config abort request queue reset */
1603 if (adapter
->num_rx_queues
!= num_rx_queues
) {
1604 /* force mailbox timeout to prevent further messages */
1605 hw
->mbx
.timeout
= 0;
1607 /* wait for watchdog to come around and bail us out */
1608 adapter
->flags
|= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
;
1614 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1616 ixgbevf_configure_dcb(adapter
);
1618 ixgbevf_set_rx_mode(adapter
->netdev
);
1620 ixgbevf_restore_vlan(adapter
);
1622 ixgbevf_configure_tx(adapter
);
1623 ixgbevf_configure_rx(adapter
);
1626 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
1628 /* Only save pre-reset stats if there are some */
1629 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
1630 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
1631 adapter
->stats
.base_vfgprc
;
1632 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
1633 adapter
->stats
.base_vfgptc
;
1634 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
1635 adapter
->stats
.base_vfgorc
;
1636 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
1637 adapter
->stats
.base_vfgotc
;
1638 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
1639 adapter
->stats
.base_vfmprc
;
1643 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
1645 struct ixgbe_hw
*hw
= &adapter
->hw
;
1647 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
1648 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
1649 adapter
->stats
.last_vfgorc
|=
1650 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
1651 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
1652 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
1653 adapter
->stats
.last_vfgotc
|=
1654 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
1655 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
1657 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
1658 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
1659 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
1660 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
1661 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
1664 static void ixgbevf_negotiate_api(struct ixgbevf_adapter
*adapter
)
1666 struct ixgbe_hw
*hw
= &adapter
->hw
;
1667 int api
[] = { ixgbe_mbox_api_11
,
1669 ixgbe_mbox_api_unknown
};
1670 int err
= 0, idx
= 0;
1672 spin_lock_bh(&adapter
->mbx_lock
);
1674 while (api
[idx
] != ixgbe_mbox_api_unknown
) {
1675 err
= ixgbevf_negotiate_api_version(hw
, api
[idx
]);
1681 spin_unlock_bh(&adapter
->mbx_lock
);
1684 static void ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
1686 struct net_device
*netdev
= adapter
->netdev
;
1687 struct ixgbe_hw
*hw
= &adapter
->hw
;
1689 ixgbevf_configure_msix(adapter
);
1691 spin_lock_bh(&adapter
->mbx_lock
);
1693 if (is_valid_ether_addr(hw
->mac
.addr
))
1694 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
1696 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
1698 spin_unlock_bh(&adapter
->mbx_lock
);
1700 smp_mb__before_atomic();
1701 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1702 ixgbevf_napi_enable_all(adapter
);
1704 /* enable transmits */
1705 netif_tx_start_all_queues(netdev
);
1707 ixgbevf_save_reset_stats(adapter
);
1708 ixgbevf_init_last_counter_stats(adapter
);
1710 hw
->mac
.get_link_status
= 1;
1711 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1714 void ixgbevf_up(struct ixgbevf_adapter
*adapter
)
1716 struct ixgbe_hw
*hw
= &adapter
->hw
;
1718 ixgbevf_configure(adapter
);
1720 ixgbevf_up_complete(adapter
);
1722 /* clear any pending interrupts, may auto mask */
1723 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
1725 ixgbevf_irq_enable(adapter
);
1729 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1730 * @rx_ring: ring to free buffers from
1732 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring
*rx_ring
)
1737 if (!rx_ring
->rx_buffer_info
)
1740 /* Free all the Rx ring sk_buffs */
1741 for (i
= 0; i
< rx_ring
->count
; i
++) {
1742 struct ixgbevf_rx_buffer
*rx_buffer_info
;
1744 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1745 if (rx_buffer_info
->dma
) {
1746 dma_unmap_single(rx_ring
->dev
, rx_buffer_info
->dma
,
1747 rx_ring
->rx_buf_len
,
1749 rx_buffer_info
->dma
= 0;
1751 if (rx_buffer_info
->skb
) {
1752 struct sk_buff
*skb
= rx_buffer_info
->skb
;
1753 rx_buffer_info
->skb
= NULL
;
1755 struct sk_buff
*this = skb
;
1756 skb
= IXGBE_CB(skb
)->prev
;
1757 dev_kfree_skb(this);
1762 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
1763 memset(rx_ring
->rx_buffer_info
, 0, size
);
1765 /* Zero out the descriptor ring */
1766 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1770 * ixgbevf_clean_tx_ring - Free Tx Buffers
1771 * @tx_ring: ring to be cleaned
1773 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring
*tx_ring
)
1775 struct ixgbevf_tx_buffer
*tx_buffer_info
;
1779 if (!tx_ring
->tx_buffer_info
)
1782 /* Free all the Tx ring sk_buffs */
1783 for (i
= 0; i
< tx_ring
->count
; i
++) {
1784 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
1785 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
1788 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
1789 memset(tx_ring
->tx_buffer_info
, 0, size
);
1791 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1795 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1796 * @adapter: board private structure
1798 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
1802 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1803 ixgbevf_clean_rx_ring(adapter
->rx_ring
[i
]);
1807 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1808 * @adapter: board private structure
1810 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
1814 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1815 ixgbevf_clean_tx_ring(adapter
->tx_ring
[i
]);
1818 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
1820 struct net_device
*netdev
= adapter
->netdev
;
1821 struct ixgbe_hw
*hw
= &adapter
->hw
;
1824 /* signal that we are down to the interrupt handler */
1825 if (test_and_set_bit(__IXGBEVF_DOWN
, &adapter
->state
))
1826 return; /* do nothing if already down */
1828 /* disable all enabled rx queues */
1829 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1830 ixgbevf_disable_rx_queue(adapter
, adapter
->rx_ring
[i
]);
1832 netif_tx_disable(netdev
);
1836 netif_tx_stop_all_queues(netdev
);
1838 ixgbevf_irq_disable(adapter
);
1840 ixgbevf_napi_disable_all(adapter
);
1842 del_timer_sync(&adapter
->watchdog_timer
);
1843 /* can't call flush scheduled work here because it can deadlock
1844 * if linkwatch_event tries to acquire the rtnl_lock which we are
1846 while (adapter
->flags
& IXGBE_FLAG_IN_WATCHDOG_TASK
)
1849 /* disable transmits in the hardware now that interrupts are off */
1850 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1851 u8 reg_idx
= adapter
->tx_ring
[i
]->reg_idx
;
1853 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
),
1854 IXGBE_TXDCTL_SWFLSH
);
1857 netif_carrier_off(netdev
);
1859 if (!pci_channel_offline(adapter
->pdev
))
1860 ixgbevf_reset(adapter
);
1862 ixgbevf_clean_all_tx_rings(adapter
);
1863 ixgbevf_clean_all_rx_rings(adapter
);
1866 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
1868 WARN_ON(in_interrupt());
1870 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
1873 ixgbevf_down(adapter
);
1874 ixgbevf_up(adapter
);
1876 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
1879 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
1881 struct ixgbe_hw
*hw
= &adapter
->hw
;
1882 struct net_device
*netdev
= adapter
->netdev
;
1884 if (hw
->mac
.ops
.reset_hw(hw
)) {
1885 hw_dbg(hw
, "PF still resetting\n");
1887 hw
->mac
.ops
.init_hw(hw
);
1888 ixgbevf_negotiate_api(adapter
);
1891 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1892 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1894 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1899 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
1902 int vector_threshold
;
1904 /* We'll want at least 2 (vector_threshold):
1905 * 1) TxQ[0] + RxQ[0] handler
1906 * 2) Other (Link Status Change, etc.)
1908 vector_threshold
= MIN_MSIX_COUNT
;
1910 /* The more we get, the more we will assign to Tx/Rx Cleanup
1911 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1912 * Right now, we simply care about how many we'll get; we'll
1913 * set them up later while requesting irq's.
1915 vectors
= pci_enable_msix_range(adapter
->pdev
, adapter
->msix_entries
,
1916 vector_threshold
, vectors
);
1919 dev_err(&adapter
->pdev
->dev
,
1920 "Unable to allocate MSI-X interrupts\n");
1921 kfree(adapter
->msix_entries
);
1922 adapter
->msix_entries
= NULL
;
1926 /* Adjust for only the vectors we'll use, which is minimum
1927 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1928 * vectors we were allocated.
1930 adapter
->num_msix_vectors
= vectors
;
1936 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1937 * @adapter: board private structure to initialize
1939 * This is the top level queue allocation routine. The order here is very
1940 * important, starting with the "most" number of features turned on at once,
1941 * and ending with the smallest set of features. This way large combinations
1942 * can be allocated if they're turned on, and smaller combinations are the
1943 * fallthrough conditions.
1946 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
1948 struct ixgbe_hw
*hw
= &adapter
->hw
;
1949 unsigned int def_q
= 0;
1950 unsigned int num_tcs
= 0;
1953 /* Start with base case */
1954 adapter
->num_rx_queues
= 1;
1955 adapter
->num_tx_queues
= 1;
1957 spin_lock_bh(&adapter
->mbx_lock
);
1959 /* fetch queue configuration from the PF */
1960 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
1962 spin_unlock_bh(&adapter
->mbx_lock
);
1967 /* we need as many queues as traffic classes */
1969 adapter
->num_rx_queues
= num_tcs
;
1973 * ixgbevf_alloc_queues - Allocate memory for all rings
1974 * @adapter: board private structure to initialize
1976 * We allocate one ring per queue at run-time since we don't know the
1977 * number of queues at compile-time. The polling_netdev array is
1978 * intended for Multiqueue, but should work fine with a single queue.
1980 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
1982 struct ixgbevf_ring
*ring
;
1985 for (; tx
< adapter
->num_tx_queues
; tx
++) {
1986 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
1988 goto err_allocation
;
1990 ring
->dev
= &adapter
->pdev
->dev
;
1991 ring
->netdev
= adapter
->netdev
;
1992 ring
->count
= adapter
->tx_ring_count
;
1993 ring
->queue_index
= tx
;
1996 adapter
->tx_ring
[tx
] = ring
;
1999 for (; rx
< adapter
->num_rx_queues
; rx
++) {
2000 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
2002 goto err_allocation
;
2004 ring
->dev
= &adapter
->pdev
->dev
;
2005 ring
->netdev
= adapter
->netdev
;
2007 ring
->count
= adapter
->rx_ring_count
;
2008 ring
->queue_index
= rx
;
2011 adapter
->rx_ring
[rx
] = ring
;
2018 kfree(adapter
->tx_ring
[--tx
]);
2019 adapter
->tx_ring
[tx
] = NULL
;
2023 kfree(adapter
->rx_ring
[--rx
]);
2024 adapter
->rx_ring
[rx
] = NULL
;
2030 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2031 * @adapter: board private structure to initialize
2033 * Attempt to configure the interrupts using the best available
2034 * capabilities of the hardware and the kernel.
2036 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2038 struct net_device
*netdev
= adapter
->netdev
;
2040 int vector
, v_budget
;
2043 * It's easy to be greedy for MSI-X vectors, but it really
2044 * doesn't do us much good if we have a lot more vectors
2045 * than CPU's. So let's be conservative and only ask for
2046 * (roughly) the same number of vectors as there are CPU's.
2047 * The default is to use pairs of vectors.
2049 v_budget
= max(adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2050 v_budget
= min_t(int, v_budget
, num_online_cpus());
2051 v_budget
+= NON_Q_VECTORS
;
2053 /* A failure in MSI-X entry allocation isn't fatal, but it does
2054 * mean we disable MSI-X capabilities of the adapter. */
2055 adapter
->msix_entries
= kcalloc(v_budget
,
2056 sizeof(struct msix_entry
), GFP_KERNEL
);
2057 if (!adapter
->msix_entries
) {
2062 for (vector
= 0; vector
< v_budget
; vector
++)
2063 adapter
->msix_entries
[vector
].entry
= vector
;
2065 err
= ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
2069 err
= netif_set_real_num_tx_queues(netdev
, adapter
->num_tx_queues
);
2073 err
= netif_set_real_num_rx_queues(netdev
, adapter
->num_rx_queues
);
2080 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2081 * @adapter: board private structure to initialize
2083 * We allocate one q_vector per queue interrupt. If allocation fails we
2086 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
2088 int q_idx
, num_q_vectors
;
2089 struct ixgbevf_q_vector
*q_vector
;
2091 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2093 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2094 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
2097 q_vector
->adapter
= adapter
;
2098 q_vector
->v_idx
= q_idx
;
2099 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
2101 #ifdef CONFIG_NET_RX_BUSY_POLL
2102 napi_hash_add(&q_vector
->napi
);
2104 adapter
->q_vector
[q_idx
] = q_vector
;
2112 q_vector
= adapter
->q_vector
[q_idx
];
2113 #ifdef CONFIG_NET_RX_BUSY_POLL
2114 napi_hash_del(&q_vector
->napi
);
2116 netif_napi_del(&q_vector
->napi
);
2118 adapter
->q_vector
[q_idx
] = NULL
;
2124 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2125 * @adapter: board private structure to initialize
2127 * This function frees the memory allocated to the q_vectors. In addition if
2128 * NAPI is enabled it will delete any references to the NAPI struct prior
2129 * to freeing the q_vector.
2131 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
2133 int q_idx
, num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2135 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2136 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
2138 adapter
->q_vector
[q_idx
] = NULL
;
2139 #ifdef CONFIG_NET_RX_BUSY_POLL
2140 napi_hash_del(&q_vector
->napi
);
2142 netif_napi_del(&q_vector
->napi
);
2148 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2149 * @adapter: board private structure
2152 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2154 pci_disable_msix(adapter
->pdev
);
2155 kfree(adapter
->msix_entries
);
2156 adapter
->msix_entries
= NULL
;
2160 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2161 * @adapter: board private structure to initialize
2164 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2168 /* Number of supported queues */
2169 ixgbevf_set_num_queues(adapter
);
2171 err
= ixgbevf_set_interrupt_capability(adapter
);
2173 hw_dbg(&adapter
->hw
,
2174 "Unable to setup interrupt capabilities\n");
2175 goto err_set_interrupt
;
2178 err
= ixgbevf_alloc_q_vectors(adapter
);
2180 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue "
2182 goto err_alloc_q_vectors
;
2185 err
= ixgbevf_alloc_queues(adapter
);
2187 pr_err("Unable to allocate memory for queues\n");
2188 goto err_alloc_queues
;
2191 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, "
2192 "Tx Queue count = %u\n",
2193 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2194 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2196 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2200 ixgbevf_free_q_vectors(adapter
);
2201 err_alloc_q_vectors
:
2202 ixgbevf_reset_interrupt_capability(adapter
);
2208 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2209 * @adapter: board private structure to clear interrupt scheme on
2211 * We go through and clear interrupt specific resources and reset the structure
2212 * to pre-load conditions
2214 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2218 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2219 kfree(adapter
->tx_ring
[i
]);
2220 adapter
->tx_ring
[i
] = NULL
;
2222 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2223 kfree(adapter
->rx_ring
[i
]);
2224 adapter
->rx_ring
[i
] = NULL
;
2227 adapter
->num_tx_queues
= 0;
2228 adapter
->num_rx_queues
= 0;
2230 ixgbevf_free_q_vectors(adapter
);
2231 ixgbevf_reset_interrupt_capability(adapter
);
2235 * ixgbevf_sw_init - Initialize general software structures
2236 * (struct ixgbevf_adapter)
2237 * @adapter: board private structure to initialize
2239 * ixgbevf_sw_init initializes the Adapter private data structure.
2240 * Fields are initialized based on PCI device information and
2241 * OS network device settings (MTU size).
2243 static int ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2245 struct ixgbe_hw
*hw
= &adapter
->hw
;
2246 struct pci_dev
*pdev
= adapter
->pdev
;
2247 struct net_device
*netdev
= adapter
->netdev
;
2250 /* PCI config space info */
2252 hw
->vendor_id
= pdev
->vendor
;
2253 hw
->device_id
= pdev
->device
;
2254 hw
->revision_id
= pdev
->revision
;
2255 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2256 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2258 hw
->mbx
.ops
.init_params(hw
);
2260 /* assume legacy case in which PF would only give VF 2 queues */
2261 hw
->mac
.max_tx_queues
= 2;
2262 hw
->mac
.max_rx_queues
= 2;
2264 /* lock to protect mailbox accesses */
2265 spin_lock_init(&adapter
->mbx_lock
);
2267 err
= hw
->mac
.ops
.reset_hw(hw
);
2269 dev_info(&pdev
->dev
,
2270 "PF still in reset state. Is the PF interface up?\n");
2272 err
= hw
->mac
.ops
.init_hw(hw
);
2274 pr_err("init_shared_code failed: %d\n", err
);
2277 ixgbevf_negotiate_api(adapter
);
2278 err
= hw
->mac
.ops
.get_mac_addr(hw
, hw
->mac
.addr
);
2280 dev_info(&pdev
->dev
, "Error reading MAC address\n");
2281 else if (is_zero_ether_addr(adapter
->hw
.mac
.addr
))
2282 dev_info(&pdev
->dev
,
2283 "MAC address not assigned by administrator.\n");
2284 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
2287 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2288 dev_info(&pdev
->dev
, "Assigning random MAC address\n");
2289 eth_hw_addr_random(netdev
);
2290 memcpy(hw
->mac
.addr
, netdev
->dev_addr
, netdev
->addr_len
);
2293 /* Enable dynamic interrupt throttling rates */
2294 adapter
->rx_itr_setting
= 1;
2295 adapter
->tx_itr_setting
= 1;
2297 /* set default ring sizes */
2298 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2299 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2301 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2308 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2310 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2311 if (current_counter < last_counter) \
2312 counter += 0x100000000LL; \
2313 last_counter = current_counter; \
2314 counter &= 0xFFFFFFFF00000000LL; \
2315 counter |= current_counter; \
2318 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2320 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2321 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2322 u64 current_counter = (current_counter_msb << 32) | \
2323 current_counter_lsb; \
2324 if (current_counter < last_counter) \
2325 counter += 0x1000000000LL; \
2326 last_counter = current_counter; \
2327 counter &= 0xFFFFFFF000000000LL; \
2328 counter |= current_counter; \
2331 * ixgbevf_update_stats - Update the board statistics counters.
2332 * @adapter: board private structure
2334 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2336 struct ixgbe_hw
*hw
= &adapter
->hw
;
2339 if (!adapter
->link_up
)
2342 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2343 adapter
->stats
.vfgprc
);
2344 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2345 adapter
->stats
.vfgptc
);
2346 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2347 adapter
->stats
.last_vfgorc
,
2348 adapter
->stats
.vfgorc
);
2349 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2350 adapter
->stats
.last_vfgotc
,
2351 adapter
->stats
.vfgotc
);
2352 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2353 adapter
->stats
.vfmprc
);
2355 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2356 adapter
->hw_csum_rx_error
+=
2357 adapter
->rx_ring
[i
]->hw_csum_rx_error
;
2358 adapter
->rx_ring
[i
]->hw_csum_rx_error
= 0;
2363 * ixgbevf_watchdog - Timer Call-back
2364 * @data: pointer to adapter cast into an unsigned long
2366 static void ixgbevf_watchdog(unsigned long data
)
2368 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2369 struct ixgbe_hw
*hw
= &adapter
->hw
;
2374 * Do the watchdog outside of interrupt context due to the lovely
2375 * delays that some of the newer hardware requires
2378 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2379 goto watchdog_short_circuit
;
2381 /* get one bit for every active tx/rx interrupt vector */
2382 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2383 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2384 if (qv
->rx
.ring
|| qv
->tx
.ring
)
2388 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, eics
);
2390 watchdog_short_circuit
:
2391 schedule_work(&adapter
->watchdog_task
);
2395 * ixgbevf_tx_timeout - Respond to a Tx Hang
2396 * @netdev: network interface device structure
2398 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
2400 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2402 /* Do the reset outside of interrupt context */
2403 schedule_work(&adapter
->reset_task
);
2406 static void ixgbevf_reset_task(struct work_struct
*work
)
2408 struct ixgbevf_adapter
*adapter
;
2409 adapter
= container_of(work
, struct ixgbevf_adapter
, reset_task
);
2411 /* If we're already down or resetting, just bail */
2412 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2413 test_bit(__IXGBEVF_REMOVING
, &adapter
->state
) ||
2414 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2417 adapter
->tx_timeout_count
++;
2419 ixgbevf_reinit_locked(adapter
);
2423 * ixgbevf_watchdog_task - worker thread to bring link up
2424 * @work: pointer to work_struct containing our data
2426 static void ixgbevf_watchdog_task(struct work_struct
*work
)
2428 struct ixgbevf_adapter
*adapter
= container_of(work
,
2429 struct ixgbevf_adapter
,
2431 struct net_device
*netdev
= adapter
->netdev
;
2432 struct ixgbe_hw
*hw
= &adapter
->hw
;
2433 u32 link_speed
= adapter
->link_speed
;
2434 bool link_up
= adapter
->link_up
;
2437 if (IXGBE_REMOVED(hw
->hw_addr
)) {
2438 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
2440 ixgbevf_down(adapter
);
2445 ixgbevf_queue_reset_subtask(adapter
);
2447 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
2450 * Always check the link on the watchdog because we have
2453 spin_lock_bh(&adapter
->mbx_lock
);
2455 need_reset
= hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
2457 spin_unlock_bh(&adapter
->mbx_lock
);
2460 adapter
->link_up
= link_up
;
2461 adapter
->link_speed
= link_speed
;
2462 netif_carrier_off(netdev
);
2463 netif_tx_stop_all_queues(netdev
);
2464 schedule_work(&adapter
->reset_task
);
2467 adapter
->link_up
= link_up
;
2468 adapter
->link_speed
= link_speed
;
2471 if (!netif_carrier_ok(netdev
)) {
2472 char *link_speed_string
;
2473 switch (link_speed
) {
2474 case IXGBE_LINK_SPEED_10GB_FULL
:
2475 link_speed_string
= "10 Gbps";
2477 case IXGBE_LINK_SPEED_1GB_FULL
:
2478 link_speed_string
= "1 Gbps";
2480 case IXGBE_LINK_SPEED_100_FULL
:
2481 link_speed_string
= "100 Mbps";
2484 link_speed_string
= "unknown speed";
2487 dev_info(&adapter
->pdev
->dev
,
2488 "NIC Link is Up, %s\n", link_speed_string
);
2489 netif_carrier_on(netdev
);
2490 netif_tx_wake_all_queues(netdev
);
2493 adapter
->link_up
= false;
2494 adapter
->link_speed
= 0;
2495 if (netif_carrier_ok(netdev
)) {
2496 dev_info(&adapter
->pdev
->dev
, "NIC Link is Down\n");
2497 netif_carrier_off(netdev
);
2498 netif_tx_stop_all_queues(netdev
);
2502 ixgbevf_update_stats(adapter
);
2505 /* Reset the timer */
2506 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
2507 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
))
2508 mod_timer(&adapter
->watchdog_timer
,
2509 round_jiffies(jiffies
+ (2 * HZ
)));
2511 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
2515 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2516 * @tx_ring: Tx descriptor ring for a specific queue
2518 * Free all transmit software resources
2520 void ixgbevf_free_tx_resources(struct ixgbevf_ring
*tx_ring
)
2522 ixgbevf_clean_tx_ring(tx_ring
);
2524 vfree(tx_ring
->tx_buffer_info
);
2525 tx_ring
->tx_buffer_info
= NULL
;
2527 /* if not set, then don't free */
2531 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
, tx_ring
->desc
,
2534 tx_ring
->desc
= NULL
;
2538 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2539 * @adapter: board private structure
2541 * Free all transmit software resources
2543 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2547 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2548 if (adapter
->tx_ring
[i
]->desc
)
2549 ixgbevf_free_tx_resources(adapter
->tx_ring
[i
]);
2553 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2554 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2556 * Return 0 on success, negative on failure
2558 int ixgbevf_setup_tx_resources(struct ixgbevf_ring
*tx_ring
)
2562 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2563 tx_ring
->tx_buffer_info
= vzalloc(size
);
2564 if (!tx_ring
->tx_buffer_info
)
2567 /* round up to nearest 4K */
2568 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2569 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2571 tx_ring
->desc
= dma_alloc_coherent(tx_ring
->dev
, tx_ring
->size
,
2572 &tx_ring
->dma
, GFP_KERNEL
);
2579 vfree(tx_ring
->tx_buffer_info
);
2580 tx_ring
->tx_buffer_info
= NULL
;
2581 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit "
2582 "descriptor ring\n");
2587 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2588 * @adapter: board private structure
2590 * If this function returns with an error, then it's possible one or
2591 * more of the rings is populated (while the rest are not). It is the
2592 * callers duty to clean those orphaned rings.
2594 * Return 0 on success, negative on failure
2596 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2600 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2601 err
= ixgbevf_setup_tx_resources(adapter
->tx_ring
[i
]);
2604 hw_dbg(&adapter
->hw
,
2605 "Allocation for Tx Queue %u failed\n", i
);
2613 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2614 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2616 * Returns 0 on success, negative on failure
2618 int ixgbevf_setup_rx_resources(struct ixgbevf_ring
*rx_ring
)
2622 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2623 rx_ring
->rx_buffer_info
= vzalloc(size
);
2624 if (!rx_ring
->rx_buffer_info
)
2627 /* Round up to nearest 4K */
2628 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2629 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2631 rx_ring
->desc
= dma_alloc_coherent(rx_ring
->dev
, rx_ring
->size
,
2632 &rx_ring
->dma
, GFP_KERNEL
);
2639 vfree(rx_ring
->rx_buffer_info
);
2640 rx_ring
->rx_buffer_info
= NULL
;
2641 dev_err(rx_ring
->dev
, "Unable to allocate memory for the Rx descriptor ring\n");
2646 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2647 * @adapter: board private structure
2649 * If this function returns with an error, then it's possible one or
2650 * more of the rings is populated (while the rest are not). It is the
2651 * callers duty to clean those orphaned rings.
2653 * Return 0 on success, negative on failure
2655 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2659 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2660 err
= ixgbevf_setup_rx_resources(adapter
->rx_ring
[i
]);
2663 hw_dbg(&adapter
->hw
,
2664 "Allocation for Rx Queue %u failed\n", i
);
2671 * ixgbevf_free_rx_resources - Free Rx Resources
2672 * @rx_ring: ring to clean the resources from
2674 * Free all receive software resources
2676 void ixgbevf_free_rx_resources(struct ixgbevf_ring
*rx_ring
)
2678 ixgbevf_clean_rx_ring(rx_ring
);
2680 vfree(rx_ring
->rx_buffer_info
);
2681 rx_ring
->rx_buffer_info
= NULL
;
2683 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
, rx_ring
->desc
,
2686 rx_ring
->desc
= NULL
;
2690 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2691 * @adapter: board private structure
2693 * Free all receive software resources
2695 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2699 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2700 if (adapter
->rx_ring
[i
]->desc
)
2701 ixgbevf_free_rx_resources(adapter
->rx_ring
[i
]);
2705 * ixgbevf_open - Called when a network interface is made active
2706 * @netdev: network interface device structure
2708 * Returns 0 on success, negative value on failure
2710 * The open entry point is called when a network interface is made
2711 * active by the system (IFF_UP). At this point all resources needed
2712 * for transmit and receive operations are allocated, the interrupt
2713 * handler is registered with the OS, the watchdog timer is started,
2714 * and the stack is notified that the interface is ready.
2716 static int ixgbevf_open(struct net_device
*netdev
)
2718 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2719 struct ixgbe_hw
*hw
= &adapter
->hw
;
2722 /* A previous failure to open the device because of a lack of
2723 * available MSIX vector resources may have reset the number
2724 * of msix vectors variable to zero. The only way to recover
2725 * is to unload/reload the driver and hope that the system has
2726 * been able to recover some MSIX vector resources.
2728 if (!adapter
->num_msix_vectors
)
2731 /* disallow open during test */
2732 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
2735 if (hw
->adapter_stopped
) {
2736 ixgbevf_reset(adapter
);
2737 /* if adapter is still stopped then PF isn't up and
2738 * the vf can't start. */
2739 if (hw
->adapter_stopped
) {
2740 err
= IXGBE_ERR_MBX
;
2741 pr_err("Unable to start - perhaps the PF Driver isn't "
2743 goto err_setup_reset
;
2747 /* allocate transmit descriptors */
2748 err
= ixgbevf_setup_all_tx_resources(adapter
);
2752 /* allocate receive descriptors */
2753 err
= ixgbevf_setup_all_rx_resources(adapter
);
2757 ixgbevf_configure(adapter
);
2760 * Map the Tx/Rx rings to the vectors we were allotted.
2761 * if request_irq will be called in this function map_rings
2762 * must be called *before* up_complete
2764 ixgbevf_map_rings_to_vectors(adapter
);
2766 ixgbevf_up_complete(adapter
);
2768 /* clear any pending interrupts, may auto mask */
2769 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2770 err
= ixgbevf_request_irq(adapter
);
2774 ixgbevf_irq_enable(adapter
);
2779 ixgbevf_down(adapter
);
2781 ixgbevf_free_all_rx_resources(adapter
);
2783 ixgbevf_free_all_tx_resources(adapter
);
2784 ixgbevf_reset(adapter
);
2792 * ixgbevf_close - Disables a network interface
2793 * @netdev: network interface device structure
2795 * Returns 0, this is not allowed to fail
2797 * The close entry point is called when an interface is de-activated
2798 * by the OS. The hardware is still under the drivers control, but
2799 * needs to be disabled. A global MAC reset is issued to stop the
2800 * hardware, and all transmit and receive resources are freed.
2802 static int ixgbevf_close(struct net_device
*netdev
)
2804 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2806 ixgbevf_down(adapter
);
2807 ixgbevf_free_irq(adapter
);
2809 ixgbevf_free_all_tx_resources(adapter
);
2810 ixgbevf_free_all_rx_resources(adapter
);
2815 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
)
2817 struct net_device
*dev
= adapter
->netdev
;
2819 if (!(adapter
->flags
& IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
))
2822 adapter
->flags
&= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED
;
2824 /* if interface is down do nothing */
2825 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2826 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2829 /* Hardware has to reinitialize queues and interrupts to
2830 * match packet buffer alignment. Unfortunately, the
2831 * hardware is not flexible enough to do this dynamically.
2833 if (netif_running(dev
))
2836 ixgbevf_clear_interrupt_scheme(adapter
);
2837 ixgbevf_init_interrupt_scheme(adapter
);
2839 if (netif_running(dev
))
2843 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring
*tx_ring
,
2844 u32 vlan_macip_lens
, u32 type_tucmd
,
2847 struct ixgbe_adv_tx_context_desc
*context_desc
;
2848 u16 i
= tx_ring
->next_to_use
;
2850 context_desc
= IXGBEVF_TX_CTXTDESC(tx_ring
, i
);
2853 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
2855 /* set bits to identify this as an advanced context descriptor */
2856 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
2858 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2859 context_desc
->seqnum_seed
= 0;
2860 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
2861 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
2864 static int ixgbevf_tso(struct ixgbevf_ring
*tx_ring
,
2865 struct ixgbevf_tx_buffer
*first
,
2868 struct sk_buff
*skb
= first
->skb
;
2869 u32 vlan_macip_lens
, type_tucmd
;
2870 u32 mss_l4len_idx
, l4len
;
2873 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2876 if (!skb_is_gso(skb
))
2879 err
= skb_cow_head(skb
, 0);
2883 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2884 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2886 if (skb
->protocol
== htons(ETH_P_IP
)) {
2887 struct iphdr
*iph
= ip_hdr(skb
);
2890 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2894 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2895 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
2896 IXGBE_TX_FLAGS_CSUM
|
2897 IXGBE_TX_FLAGS_IPV4
;
2898 } else if (skb_is_gso_v6(skb
)) {
2899 ipv6_hdr(skb
)->payload_len
= 0;
2900 tcp_hdr(skb
)->check
=
2901 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2902 &ipv6_hdr(skb
)->daddr
,
2904 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
2905 IXGBE_TX_FLAGS_CSUM
;
2908 /* compute header lengths */
2909 l4len
= tcp_hdrlen(skb
);
2911 *hdr_len
= skb_transport_offset(skb
) + l4len
;
2913 /* update gso size and bytecount with header size */
2914 first
->gso_segs
= skb_shinfo(skb
)->gso_segs
;
2915 first
->bytecount
+= (first
->gso_segs
- 1) * *hdr_len
;
2917 /* mss_l4len_id: use 1 as index for TSO */
2918 mss_l4len_idx
= l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
;
2919 mss_l4len_idx
|= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
2920 mss_l4len_idx
|= 1 << IXGBE_ADVTXD_IDX_SHIFT
;
2922 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2923 vlan_macip_lens
= skb_network_header_len(skb
);
2924 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
2925 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
2927 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
2928 type_tucmd
, mss_l4len_idx
);
2933 static void ixgbevf_tx_csum(struct ixgbevf_ring
*tx_ring
,
2934 struct ixgbevf_tx_buffer
*first
)
2936 struct sk_buff
*skb
= first
->skb
;
2937 u32 vlan_macip_lens
= 0;
2938 u32 mss_l4len_idx
= 0;
2941 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2943 switch (skb
->protocol
) {
2944 case htons(ETH_P_IP
):
2945 vlan_macip_lens
|= skb_network_header_len(skb
);
2946 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2947 l4_hdr
= ip_hdr(skb
)->protocol
;
2949 case htons(ETH_P_IPV6
):
2950 vlan_macip_lens
|= skb_network_header_len(skb
);
2951 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
2954 if (unlikely(net_ratelimit())) {
2955 dev_warn(tx_ring
->dev
,
2956 "partial checksum but proto=%x!\n",
2964 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2965 mss_l4len_idx
= tcp_hdrlen(skb
) <<
2966 IXGBE_ADVTXD_L4LEN_SHIFT
;
2969 type_tucmd
|= IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
2970 mss_l4len_idx
= sizeof(struct sctphdr
) <<
2971 IXGBE_ADVTXD_L4LEN_SHIFT
;
2974 mss_l4len_idx
= sizeof(struct udphdr
) <<
2975 IXGBE_ADVTXD_L4LEN_SHIFT
;
2978 if (unlikely(net_ratelimit())) {
2979 dev_warn(tx_ring
->dev
,
2980 "partial checksum but l4 proto=%x!\n",
2986 /* update TX checksum flag */
2987 first
->tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
2990 /* vlan_macip_lens: MACLEN, VLAN tag */
2991 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
2992 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
2994 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
2995 type_tucmd
, mss_l4len_idx
);
2998 static __le32
ixgbevf_tx_cmd_type(u32 tx_flags
)
3000 /* set type for advanced descriptor with frame checksum insertion */
3001 __le32 cmd_type
= cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA
|
3002 IXGBE_ADVTXD_DCMD_IFCS
|
3003 IXGBE_ADVTXD_DCMD_DEXT
);
3005 /* set HW vlan bit if vlan is present */
3006 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3007 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE
);
3009 /* set segmentation enable bits for TSO/FSO */
3010 if (tx_flags
& IXGBE_TX_FLAGS_TSO
)
3011 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE
);
3016 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc
*tx_desc
,
3017 u32 tx_flags
, unsigned int paylen
)
3019 __le32 olinfo_status
= cpu_to_le32(paylen
<< IXGBE_ADVTXD_PAYLEN_SHIFT
);
3021 /* enable L4 checksum for TSO and TX checksum offload */
3022 if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3023 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM
);
3025 /* enble IPv4 checksum for TSO */
3026 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3027 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM
);
3029 /* use index 1 context for TSO/FSO/FCOE */
3030 if (tx_flags
& IXGBE_TX_FLAGS_TSO
)
3031 olinfo_status
|= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT
);
3033 /* Check Context must be set if Tx switch is enabled, which it
3034 * always is for case where virtual functions are running
3036 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_CC
);
3038 tx_desc
->read
.olinfo_status
= olinfo_status
;
3041 static void ixgbevf_tx_map(struct ixgbevf_ring
*tx_ring
,
3042 struct ixgbevf_tx_buffer
*first
,
3046 struct sk_buff
*skb
= first
->skb
;
3047 struct ixgbevf_tx_buffer
*tx_buffer
;
3048 union ixgbe_adv_tx_desc
*tx_desc
;
3049 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
3050 unsigned int data_len
= skb
->data_len
;
3051 unsigned int size
= skb_headlen(skb
);
3052 unsigned int paylen
= skb
->len
- hdr_len
;
3053 u32 tx_flags
= first
->tx_flags
;
3055 u16 i
= tx_ring
->next_to_use
;
3057 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
3059 ixgbevf_tx_olinfo_status(tx_desc
, tx_flags
, paylen
);
3060 cmd_type
= ixgbevf_tx_cmd_type(tx_flags
);
3062 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
3063 if (dma_mapping_error(tx_ring
->dev
, dma
))
3066 /* record length, and DMA address */
3067 dma_unmap_len_set(first
, len
, size
);
3068 dma_unmap_addr_set(first
, dma
, dma
);
3070 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3073 while (unlikely(size
> IXGBE_MAX_DATA_PER_TXD
)) {
3074 tx_desc
->read
.cmd_type_len
=
3075 cmd_type
| cpu_to_le32(IXGBE_MAX_DATA_PER_TXD
);
3079 if (i
== tx_ring
->count
) {
3080 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
3084 dma
+= IXGBE_MAX_DATA_PER_TXD
;
3085 size
-= IXGBE_MAX_DATA_PER_TXD
;
3087 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3088 tx_desc
->read
.olinfo_status
= 0;
3091 if (likely(!data_len
))
3094 tx_desc
->read
.cmd_type_len
= cmd_type
| cpu_to_le32(size
);
3098 if (i
== tx_ring
->count
) {
3099 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
3103 size
= skb_frag_size(frag
);
3106 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
3108 if (dma_mapping_error(tx_ring
->dev
, dma
))
3111 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
3112 dma_unmap_len_set(tx_buffer
, len
, size
);
3113 dma_unmap_addr_set(tx_buffer
, dma
, dma
);
3115 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3116 tx_desc
->read
.olinfo_status
= 0;
3121 /* write last descriptor with RS and EOP bits */
3122 cmd_type
|= cpu_to_le32(size
) | cpu_to_le32(IXGBE_TXD_CMD
);
3123 tx_desc
->read
.cmd_type_len
= cmd_type
;
3125 /* set the timestamp */
3126 first
->time_stamp
= jiffies
;
3128 /* Force memory writes to complete before letting h/w know there
3129 * are new descriptors to fetch. (Only applicable for weak-ordered
3130 * memory model archs, such as IA-64).
3132 * We also need this memory barrier (wmb) to make certain all of the
3133 * status bits have been updated before next_to_watch is written.
3137 /* set next_to_watch value indicating a packet is present */
3138 first
->next_to_watch
= tx_desc
;
3141 if (i
== tx_ring
->count
)
3144 tx_ring
->next_to_use
= i
;
3146 /* notify HW of packet */
3147 ixgbevf_write_tail(tx_ring
, i
);
3151 dev_err(tx_ring
->dev
, "TX DMA map failed\n");
3153 /* clear dma mappings for failed tx_buffer_info map */
3155 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
3156 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer
);
3157 if (tx_buffer
== first
)
3164 tx_ring
->next_to_use
= i
;
3167 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3169 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3170 /* Herbert's original patch had:
3171 * smp_mb__after_netif_stop_queue();
3172 * but since that doesn't exist yet, just open code it. */
3175 /* We need to check again in a case another CPU has just
3176 * made room available. */
3177 if (likely(ixgbevf_desc_unused(tx_ring
) < size
))
3180 /* A reprieve! - use start_queue because it doesn't call schedule */
3181 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3182 ++tx_ring
->tx_stats
.restart_queue
;
3187 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3189 if (likely(ixgbevf_desc_unused(tx_ring
) >= size
))
3191 return __ixgbevf_maybe_stop_tx(tx_ring
, size
);
3194 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3196 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3197 struct ixgbevf_tx_buffer
*first
;
3198 struct ixgbevf_ring
*tx_ring
;
3201 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
3202 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3206 u8
*dst_mac
= skb_header_pointer(skb
, 0, 0, NULL
);
3208 if (!dst_mac
|| is_link_local_ether_addr(dst_mac
)) {
3210 return NETDEV_TX_OK
;
3213 tx_ring
= adapter
->tx_ring
[skb
->queue_mapping
];
3216 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3217 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3218 * + 2 desc gap to keep tail from touching head,
3219 * + 1 desc for context descriptor,
3220 * otherwise try next time
3222 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3223 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3224 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3226 count
+= skb_shinfo(skb
)->nr_frags
;
3228 if (ixgbevf_maybe_stop_tx(tx_ring
, count
+ 3)) {
3229 tx_ring
->tx_stats
.tx_busy
++;
3230 return NETDEV_TX_BUSY
;
3233 /* record the location of the first descriptor for this packet */
3234 first
= &tx_ring
->tx_buffer_info
[tx_ring
->next_to_use
];
3236 first
->bytecount
= skb
->len
;
3237 first
->gso_segs
= 1;
3239 if (vlan_tx_tag_present(skb
)) {
3240 tx_flags
|= vlan_tx_tag_get(skb
);
3241 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3242 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3245 /* record initial flags and protocol */
3246 first
->tx_flags
= tx_flags
;
3247 first
->protocol
= vlan_get_protocol(skb
);
3249 tso
= ixgbevf_tso(tx_ring
, first
, &hdr_len
);
3253 ixgbevf_tx_csum(tx_ring
, first
);
3255 ixgbevf_tx_map(tx_ring
, first
, hdr_len
);
3257 ixgbevf_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
3259 return NETDEV_TX_OK
;
3262 dev_kfree_skb_any(first
->skb
);
3265 return NETDEV_TX_OK
;
3269 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3270 * @netdev: network interface device structure
3271 * @p: pointer to an address structure
3273 * Returns 0 on success, negative on failure
3275 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3277 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3278 struct ixgbe_hw
*hw
= &adapter
->hw
;
3279 struct sockaddr
*addr
= p
;
3281 if (!is_valid_ether_addr(addr
->sa_data
))
3282 return -EADDRNOTAVAIL
;
3284 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3285 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3287 spin_lock_bh(&adapter
->mbx_lock
);
3289 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
3291 spin_unlock_bh(&adapter
->mbx_lock
);
3297 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3298 * @netdev: network interface device structure
3299 * @new_mtu: new value for maximum frame size
3301 * Returns 0 on success, negative on failure
3303 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3305 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3306 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3307 int max_possible_frame
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3309 switch (adapter
->hw
.api_version
) {
3310 case ixgbe_mbox_api_11
:
3311 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3314 if (adapter
->hw
.mac
.type
== ixgbe_mac_X540_vf
)
3315 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3319 /* MTU < 68 is an error and causes problems on some kernels */
3320 if ((new_mtu
< 68) || (max_frame
> max_possible_frame
))
3323 hw_dbg(&adapter
->hw
, "changing MTU from %d to %d\n",
3324 netdev
->mtu
, new_mtu
);
3325 /* must set new MTU before calling down or up */
3326 netdev
->mtu
= new_mtu
;
3328 if (netif_running(netdev
))
3329 ixgbevf_reinit_locked(adapter
);
3334 static int ixgbevf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3336 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3337 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3342 netif_device_detach(netdev
);
3344 if (netif_running(netdev
)) {
3346 ixgbevf_down(adapter
);
3347 ixgbevf_free_irq(adapter
);
3348 ixgbevf_free_all_tx_resources(adapter
);
3349 ixgbevf_free_all_rx_resources(adapter
);
3353 ixgbevf_clear_interrupt_scheme(adapter
);
3356 retval
= pci_save_state(pdev
);
3361 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
3362 pci_disable_device(pdev
);
3368 static int ixgbevf_resume(struct pci_dev
*pdev
)
3370 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3371 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3374 pci_restore_state(pdev
);
3376 * pci_restore_state clears dev->state_saved so call
3377 * pci_save_state to restore it.
3379 pci_save_state(pdev
);
3381 err
= pci_enable_device_mem(pdev
);
3383 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
3386 smp_mb__before_atomic();
3387 clear_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
3388 pci_set_master(pdev
);
3390 ixgbevf_reset(adapter
);
3393 err
= ixgbevf_init_interrupt_scheme(adapter
);
3396 dev_err(&pdev
->dev
, "Cannot initialize interrupts\n");
3400 if (netif_running(netdev
)) {
3401 err
= ixgbevf_open(netdev
);
3406 netif_device_attach(netdev
);
3411 #endif /* CONFIG_PM */
3412 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3414 ixgbevf_suspend(pdev
, PMSG_SUSPEND
);
3417 static struct rtnl_link_stats64
*ixgbevf_get_stats(struct net_device
*netdev
,
3418 struct rtnl_link_stats64
*stats
)
3420 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3423 const struct ixgbevf_ring
*ring
;
3426 ixgbevf_update_stats(adapter
);
3428 stats
->multicast
= adapter
->stats
.vfmprc
- adapter
->stats
.base_vfmprc
;
3430 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3431 ring
= adapter
->rx_ring
[i
];
3433 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
3434 bytes
= ring
->stats
.bytes
;
3435 packets
= ring
->stats
.packets
;
3436 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
3437 stats
->rx_bytes
+= bytes
;
3438 stats
->rx_packets
+= packets
;
3441 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3442 ring
= adapter
->tx_ring
[i
];
3444 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
3445 bytes
= ring
->stats
.bytes
;
3446 packets
= ring
->stats
.packets
;
3447 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
3448 stats
->tx_bytes
+= bytes
;
3449 stats
->tx_packets
+= packets
;
3455 static const struct net_device_ops ixgbevf_netdev_ops
= {
3456 .ndo_open
= ixgbevf_open
,
3457 .ndo_stop
= ixgbevf_close
,
3458 .ndo_start_xmit
= ixgbevf_xmit_frame
,
3459 .ndo_set_rx_mode
= ixgbevf_set_rx_mode
,
3460 .ndo_get_stats64
= ixgbevf_get_stats
,
3461 .ndo_validate_addr
= eth_validate_addr
,
3462 .ndo_set_mac_address
= ixgbevf_set_mac
,
3463 .ndo_change_mtu
= ixgbevf_change_mtu
,
3464 .ndo_tx_timeout
= ixgbevf_tx_timeout
,
3465 .ndo_vlan_rx_add_vid
= ixgbevf_vlan_rx_add_vid
,
3466 .ndo_vlan_rx_kill_vid
= ixgbevf_vlan_rx_kill_vid
,
3467 #ifdef CONFIG_NET_RX_BUSY_POLL
3468 .ndo_busy_poll
= ixgbevf_busy_poll_recv
,
3472 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3474 dev
->netdev_ops
= &ixgbevf_netdev_ops
;
3475 ixgbevf_set_ethtool_ops(dev
);
3476 dev
->watchdog_timeo
= 5 * HZ
;
3480 * ixgbevf_probe - Device Initialization Routine
3481 * @pdev: PCI device information struct
3482 * @ent: entry in ixgbevf_pci_tbl
3484 * Returns 0 on success, negative on failure
3486 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3487 * The OS initialization, configuring of the adapter private structure,
3488 * and a hardware reset occur.
3490 static int ixgbevf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3492 struct net_device
*netdev
;
3493 struct ixgbevf_adapter
*adapter
= NULL
;
3494 struct ixgbe_hw
*hw
= NULL
;
3495 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3496 int err
, pci_using_dac
;
3498 err
= pci_enable_device(pdev
);
3502 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
3505 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
3507 dev_err(&pdev
->dev
, "No usable DMA "
3508 "configuration, aborting\n");
3514 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
3516 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3520 pci_set_master(pdev
);
3522 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
3526 goto err_alloc_etherdev
;
3529 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3531 pci_set_drvdata(pdev
, netdev
);
3532 adapter
= netdev_priv(netdev
);
3534 adapter
->netdev
= netdev
;
3535 adapter
->pdev
= pdev
;
3538 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3541 * call save state here in standalone driver because it relies on
3542 * adapter struct to exist, and needs to call netdev_priv
3544 pci_save_state(pdev
);
3546 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3547 pci_resource_len(pdev
, 0));
3548 adapter
->io_addr
= hw
->hw_addr
;
3554 ixgbevf_assign_netdev_ops(netdev
);
3557 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3558 hw
->mac
.type
= ii
->mac
;
3560 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
3561 sizeof(struct ixgbe_mbx_operations
));
3563 /* setup the private structure */
3564 err
= ixgbevf_sw_init(adapter
);
3568 /* The HW MAC address was set and/or determined in sw_init */
3569 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3570 pr_err("invalid MAC address\n");
3575 netdev
->hw_features
= NETIF_F_SG
|
3582 netdev
->features
= netdev
->hw_features
|
3583 NETIF_F_HW_VLAN_CTAG_TX
|
3584 NETIF_F_HW_VLAN_CTAG_RX
|
3585 NETIF_F_HW_VLAN_CTAG_FILTER
;
3587 netdev
->vlan_features
|= NETIF_F_TSO
;
3588 netdev
->vlan_features
|= NETIF_F_TSO6
;
3589 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3590 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3591 netdev
->vlan_features
|= NETIF_F_SG
;
3594 netdev
->features
|= NETIF_F_HIGHDMA
;
3596 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3598 init_timer(&adapter
->watchdog_timer
);
3599 adapter
->watchdog_timer
.function
= ixgbevf_watchdog
;
3600 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3602 if (IXGBE_REMOVED(hw
->hw_addr
)) {
3606 INIT_WORK(&adapter
->reset_task
, ixgbevf_reset_task
);
3607 INIT_WORK(&adapter
->watchdog_task
, ixgbevf_watchdog_task
);
3608 set_bit(__IXGBEVF_WORK_INIT
, &adapter
->state
);
3610 err
= ixgbevf_init_interrupt_scheme(adapter
);
3614 strcpy(netdev
->name
, "eth%d");
3616 err
= register_netdev(netdev
);
3620 netif_carrier_off(netdev
);
3622 ixgbevf_init_last_counter_stats(adapter
);
3624 /* print the MAC address */
3625 hw_dbg(hw
, "%pM\n", netdev
->dev_addr
);
3627 hw_dbg(hw
, "MAC: %d\n", hw
->mac
.type
);
3629 hw_dbg(hw
, "Intel(R) 82599 Virtual Function\n");
3633 ixgbevf_clear_interrupt_scheme(adapter
);
3635 ixgbevf_reset_interrupt_capability(adapter
);
3636 iounmap(adapter
->io_addr
);
3638 free_netdev(netdev
);
3640 pci_release_regions(pdev
);
3643 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
3644 pci_disable_device(pdev
);
3649 * ixgbevf_remove - Device Removal Routine
3650 * @pdev: PCI device information struct
3652 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3653 * that it should release a PCI device. The could be caused by a
3654 * Hot-Plug event, or because the driver is going to be removed from
3657 static void ixgbevf_remove(struct pci_dev
*pdev
)
3659 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3660 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3662 set_bit(__IXGBEVF_REMOVING
, &adapter
->state
);
3664 del_timer_sync(&adapter
->watchdog_timer
);
3666 cancel_work_sync(&adapter
->reset_task
);
3667 cancel_work_sync(&adapter
->watchdog_task
);
3669 if (netdev
->reg_state
== NETREG_REGISTERED
)
3670 unregister_netdev(netdev
);
3672 ixgbevf_clear_interrupt_scheme(adapter
);
3673 ixgbevf_reset_interrupt_capability(adapter
);
3675 iounmap(adapter
->io_addr
);
3676 pci_release_regions(pdev
);
3678 hw_dbg(&adapter
->hw
, "Remove complete\n");
3680 free_netdev(netdev
);
3682 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
3683 pci_disable_device(pdev
);
3687 * ixgbevf_io_error_detected - called when PCI error is detected
3688 * @pdev: Pointer to PCI device
3689 * @state: The current pci connection state
3691 * This function is called after a PCI bus error affecting
3692 * this device has been detected.
3694 static pci_ers_result_t
ixgbevf_io_error_detected(struct pci_dev
*pdev
,
3695 pci_channel_state_t state
)
3697 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3698 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3700 if (!test_bit(__IXGBEVF_WORK_INIT
, &adapter
->state
))
3701 return PCI_ERS_RESULT_DISCONNECT
;
3704 netif_device_detach(netdev
);
3706 if (state
== pci_channel_io_perm_failure
) {
3708 return PCI_ERS_RESULT_DISCONNECT
;
3711 if (netif_running(netdev
))
3712 ixgbevf_down(adapter
);
3714 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
3715 pci_disable_device(pdev
);
3718 /* Request a slot slot reset. */
3719 return PCI_ERS_RESULT_NEED_RESET
;
3723 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3724 * @pdev: Pointer to PCI device
3726 * Restart the card from scratch, as if from a cold-boot. Implementation
3727 * resembles the first-half of the ixgbevf_resume routine.
3729 static pci_ers_result_t
ixgbevf_io_slot_reset(struct pci_dev
*pdev
)
3731 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3732 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3734 if (pci_enable_device_mem(pdev
)) {
3736 "Cannot re-enable PCI device after reset.\n");
3737 return PCI_ERS_RESULT_DISCONNECT
;
3740 smp_mb__before_atomic();
3741 clear_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
3742 pci_set_master(pdev
);
3744 ixgbevf_reset(adapter
);
3746 return PCI_ERS_RESULT_RECOVERED
;
3750 * ixgbevf_io_resume - called when traffic can start flowing again.
3751 * @pdev: Pointer to PCI device
3753 * This callback is called when the error recovery driver tells us that
3754 * its OK to resume normal operation. Implementation resembles the
3755 * second-half of the ixgbevf_resume routine.
3757 static void ixgbevf_io_resume(struct pci_dev
*pdev
)
3759 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3760 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3762 if (netif_running(netdev
))
3763 ixgbevf_up(adapter
);
3765 netif_device_attach(netdev
);
3768 /* PCI Error Recovery (ERS) */
3769 static const struct pci_error_handlers ixgbevf_err_handler
= {
3770 .error_detected
= ixgbevf_io_error_detected
,
3771 .slot_reset
= ixgbevf_io_slot_reset
,
3772 .resume
= ixgbevf_io_resume
,
3775 static struct pci_driver ixgbevf_driver
= {
3776 .name
= ixgbevf_driver_name
,
3777 .id_table
= ixgbevf_pci_tbl
,
3778 .probe
= ixgbevf_probe
,
3779 .remove
= ixgbevf_remove
,
3781 /* Power Management Hooks */
3782 .suspend
= ixgbevf_suspend
,
3783 .resume
= ixgbevf_resume
,
3785 .shutdown
= ixgbevf_shutdown
,
3786 .err_handler
= &ixgbevf_err_handler
3790 * ixgbevf_init_module - Driver Registration Routine
3792 * ixgbevf_init_module is the first routine called when the driver is
3793 * loaded. All it does is register with the PCI subsystem.
3795 static int __init
ixgbevf_init_module(void)
3798 pr_info("%s - version %s\n", ixgbevf_driver_string
,
3799 ixgbevf_driver_version
);
3801 pr_info("%s\n", ixgbevf_copyright
);
3803 ret
= pci_register_driver(&ixgbevf_driver
);
3807 module_init(ixgbevf_init_module
);
3810 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3812 * ixgbevf_exit_module is called just before the driver is removed
3815 static void __exit
ixgbevf_exit_module(void)
3817 pci_unregister_driver(&ixgbevf_driver
);
3822 * ixgbevf_get_hw_dev_name - return device name string
3823 * used by hardware layer to print debugging information
3825 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
3827 struct ixgbevf_adapter
*adapter
= hw
->back
;
3828 return adapter
->netdev
->name
;
3832 module_exit(ixgbevf_exit_module
);
3834 /* ixgbevf_main.c */