1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2010 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32 #include <linux/types.h>
33 #include <linux/bitops.h>
34 #include <linux/module.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/vmalloc.h>
38 #include <linux/string.h>
41 #include <linux/tcp.h>
42 #include <linux/ipv6.h>
43 #include <linux/slab.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/ethtool.h>
48 #include <linux/if_vlan.h>
49 #include <linux/prefetch.h>
53 char ixgbevf_driver_name
[] = "ixgbevf";
54 static const char ixgbevf_driver_string
[] =
55 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
57 #define DRV_VERSION "2.1.0-k"
58 const char ixgbevf_driver_version
[] = DRV_VERSION
;
59 static char ixgbevf_copyright
[] =
60 "Copyright (c) 2009 - 2010 Intel Corporation.";
62 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
63 [board_82599_vf
] = &ixgbevf_82599_vf_info
,
64 [board_X540_vf
] = &ixgbevf_X540_vf_info
,
67 /* ixgbevf_pci_tbl - PCI Device ID Table
69 * Wildcard entries (PCI_ANY_ID) should come last
70 * Last entry must be all 0s
72 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
73 * Class, Class Mask, private data (not used) }
75 static struct pci_device_id ixgbevf_pci_tbl
[] = {
76 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
),
78 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF
),
81 /* required last entry */
84 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
86 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
87 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(DRV_VERSION
);
91 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
94 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector
*q_vector
);
95 static void ixgbevf_write_eitr(struct ixgbevf_adapter
*adapter
, int v_idx
,
98 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw
*hw
,
99 struct ixgbevf_ring
*rx_ring
,
103 * Force memory writes to complete before letting h/w
104 * know there are new descriptors to fetch. (Only
105 * applicable for weak-ordered memory model archs,
109 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(rx_ring
->reg_idx
), val
);
113 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
114 * @adapter: pointer to adapter struct
115 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
116 * @queue: queue to map the corresponding interrupt to
117 * @msix_vector: the vector to map to the corresponding queue
120 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
121 u8 queue
, u8 msix_vector
)
124 struct ixgbe_hw
*hw
= &adapter
->hw
;
125 if (direction
== -1) {
127 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
128 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
131 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
133 /* tx or rx causes */
134 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
135 index
= ((16 * (queue
& 1)) + (8 * direction
));
136 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
137 ivar
&= ~(0xFF << index
);
138 ivar
|= (msix_vector
<< index
);
139 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
143 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter
*adapter
,
144 struct ixgbevf_tx_buffer
147 if (tx_buffer_info
->dma
) {
148 if (tx_buffer_info
->mapped_as_page
)
149 dma_unmap_page(&adapter
->pdev
->dev
,
151 tx_buffer_info
->length
,
154 dma_unmap_single(&adapter
->pdev
->dev
,
156 tx_buffer_info
->length
,
158 tx_buffer_info
->dma
= 0;
160 if (tx_buffer_info
->skb
) {
161 dev_kfree_skb_any(tx_buffer_info
->skb
);
162 tx_buffer_info
->skb
= NULL
;
164 tx_buffer_info
->time_stamp
= 0;
165 /* tx_buffer_info must be completely set up in the transmit path */
168 #define IXGBE_MAX_TXD_PWR 14
169 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
171 /* Tx Descriptors needed, worst case */
172 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
173 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
175 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
176 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
178 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
181 static void ixgbevf_tx_timeout(struct net_device
*netdev
);
184 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
185 * @adapter: board private structure
186 * @tx_ring: tx ring to clean
188 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter
*adapter
,
189 struct ixgbevf_ring
*tx_ring
)
191 struct net_device
*netdev
= adapter
->netdev
;
192 struct ixgbe_hw
*hw
= &adapter
->hw
;
193 union ixgbe_adv_tx_desc
*tx_desc
, *eop_desc
;
194 struct ixgbevf_tx_buffer
*tx_buffer_info
;
195 unsigned int i
, eop
, count
= 0;
196 unsigned int total_bytes
= 0, total_packets
= 0;
198 i
= tx_ring
->next_to_clean
;
199 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
200 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
202 while ((eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)) &&
203 (count
< tx_ring
->work_limit
)) {
204 bool cleaned
= false;
205 rmb(); /* read buffer_info after eop_desc */
206 for ( ; !cleaned
; count
++) {
208 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
209 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
210 cleaned
= (i
== eop
);
211 skb
= tx_buffer_info
->skb
;
213 if (cleaned
&& skb
) {
214 unsigned int segs
, bytecount
;
216 /* gso_segs is currently only valid for tcp */
217 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
218 /* multiply data chunks by size of headers */
219 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
221 total_packets
+= segs
;
222 total_bytes
+= bytecount
;
225 ixgbevf_unmap_and_free_tx_resource(adapter
,
228 tx_desc
->wb
.status
= 0;
231 if (i
== tx_ring
->count
)
235 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
236 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
239 tx_ring
->next_to_clean
= i
;
241 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
242 if (unlikely(count
&& netif_carrier_ok(netdev
) &&
243 (IXGBE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
244 /* Make sure that anybody stopping the queue after this
245 * sees the new next_to_clean.
249 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
250 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
251 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
252 ++adapter
->restart_queue
;
255 if (netif_queue_stopped(netdev
) &&
256 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
257 netif_wake_queue(netdev
);
258 ++adapter
->restart_queue
;
263 /* re-arm the interrupt */
264 if ((count
>= tx_ring
->work_limit
) &&
265 (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))) {
266 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, tx_ring
->v_idx
);
269 tx_ring
->total_bytes
+= total_bytes
;
270 tx_ring
->total_packets
+= total_packets
;
272 netdev
->stats
.tx_bytes
+= total_bytes
;
273 netdev
->stats
.tx_packets
+= total_packets
;
275 return count
< tx_ring
->work_limit
;
279 * ixgbevf_receive_skb - Send a completed packet up the stack
280 * @q_vector: structure containing interrupt and ring information
281 * @skb: packet to send up
282 * @status: hardware indication of status of receive
283 * @rx_ring: rx descriptor ring (for a specific queue) to setup
284 * @rx_desc: rx descriptor
286 static void ixgbevf_receive_skb(struct ixgbevf_q_vector
*q_vector
,
287 struct sk_buff
*skb
, u8 status
,
288 struct ixgbevf_ring
*ring
,
289 union ixgbe_adv_rx_desc
*rx_desc
)
291 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
292 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
295 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
297 __vlan_hwaccel_put_tag(skb
, tag
);
300 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
))
301 napi_gro_receive(&q_vector
->napi
, skb
);
307 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
308 * @adapter: address of board private structure
309 * @status_err: hardware indication of status of receive
310 * @skb: skb currently being received and modified
312 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter
*adapter
,
313 u32 status_err
, struct sk_buff
*skb
)
315 skb_checksum_none_assert(skb
);
317 /* Rx csum disabled */
318 if (!(adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
))
321 /* if IP and error */
322 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
323 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
324 adapter
->hw_csum_rx_error
++;
328 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
331 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
332 adapter
->hw_csum_rx_error
++;
336 /* It must be a TCP or UDP packet with a valid checksum */
337 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
338 adapter
->hw_csum_rx_good
++;
342 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
343 * @adapter: address of board private structure
345 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter
*adapter
,
346 struct ixgbevf_ring
*rx_ring
,
349 struct pci_dev
*pdev
= adapter
->pdev
;
350 union ixgbe_adv_rx_desc
*rx_desc
;
351 struct ixgbevf_rx_buffer
*bi
;
354 unsigned int bufsz
= rx_ring
->rx_buf_len
+ NET_IP_ALIGN
;
356 i
= rx_ring
->next_to_use
;
357 bi
= &rx_ring
->rx_buffer_info
[i
];
359 while (cleaned_count
--) {
360 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
363 (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
)) {
365 bi
->page
= netdev_alloc_page(adapter
->netdev
);
367 adapter
->alloc_rx_page_failed
++;
372 /* use a half page if we're re-using */
373 bi
->page_offset
^= (PAGE_SIZE
/ 2);
376 bi
->page_dma
= dma_map_page(&pdev
->dev
, bi
->page
,
384 skb
= netdev_alloc_skb(adapter
->netdev
,
388 adapter
->alloc_rx_buff_failed
++;
393 * Make buffer alignment 2 beyond a 16 byte boundary
394 * this will result in a 16 byte aligned IP header after
395 * the 14 byte MAC header is removed
397 skb_reserve(skb
, NET_IP_ALIGN
);
402 bi
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
406 /* Refresh the desc even if buffer_addrs didn't change because
407 * each write-back erases this info. */
408 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
409 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
410 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
412 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
416 if (i
== rx_ring
->count
)
418 bi
= &rx_ring
->rx_buffer_info
[i
];
422 if (rx_ring
->next_to_use
!= i
) {
423 rx_ring
->next_to_use
= i
;
425 i
= (rx_ring
->count
- 1);
427 ixgbevf_release_rx_desc(&adapter
->hw
, rx_ring
, i
);
431 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
435 struct ixgbe_hw
*hw
= &adapter
->hw
;
437 mask
= (qmask
& 0xFFFFFFFF);
438 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, mask
);
441 static inline u16
ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc
*rx_desc
)
443 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
;
446 static inline u16
ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc
*rx_desc
)
448 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
451 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
452 struct ixgbevf_ring
*rx_ring
,
453 int *work_done
, int work_to_do
)
455 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
456 struct pci_dev
*pdev
= adapter
->pdev
;
457 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
458 struct ixgbevf_rx_buffer
*rx_buffer_info
, *next_buffer
;
463 bool cleaned
= false;
464 int cleaned_count
= 0;
465 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
467 i
= rx_ring
->next_to_clean
;
468 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
469 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
470 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
472 while (staterr
& IXGBE_RXD_STAT_DD
) {
474 if (*work_done
>= work_to_do
)
478 rmb(); /* read descriptor and rx_buffer_info after status DD */
479 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
480 hdr_info
= le16_to_cpu(ixgbevf_get_hdr_info(rx_desc
));
481 len
= (hdr_info
& IXGBE_RXDADV_HDRBUFLEN_MASK
) >>
482 IXGBE_RXDADV_HDRBUFLEN_SHIFT
;
483 if (hdr_info
& IXGBE_RXDADV_SPH
)
484 adapter
->rx_hdr_split
++;
485 if (len
> IXGBEVF_RX_HDR_SIZE
)
486 len
= IXGBEVF_RX_HDR_SIZE
;
487 upper_len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
489 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
492 skb
= rx_buffer_info
->skb
;
493 prefetch(skb
->data
- NET_IP_ALIGN
);
494 rx_buffer_info
->skb
= NULL
;
496 if (rx_buffer_info
->dma
) {
497 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
500 rx_buffer_info
->dma
= 0;
505 dma_unmap_page(&pdev
->dev
, rx_buffer_info
->page_dma
,
506 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
507 rx_buffer_info
->page_dma
= 0;
508 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
509 rx_buffer_info
->page
,
510 rx_buffer_info
->page_offset
,
513 if ((rx_ring
->rx_buf_len
> (PAGE_SIZE
/ 2)) ||
514 (page_count(rx_buffer_info
->page
) != 1))
515 rx_buffer_info
->page
= NULL
;
517 get_page(rx_buffer_info
->page
);
519 skb
->len
+= upper_len
;
520 skb
->data_len
+= upper_len
;
521 skb
->truesize
+= upper_len
;
525 if (i
== rx_ring
->count
)
528 next_rxd
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
532 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
534 if (!(staterr
& IXGBE_RXD_STAT_EOP
)) {
535 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
536 rx_buffer_info
->skb
= next_buffer
->skb
;
537 rx_buffer_info
->dma
= next_buffer
->dma
;
538 next_buffer
->skb
= skb
;
539 next_buffer
->dma
= 0;
541 skb
->next
= next_buffer
->skb
;
542 skb
->next
->prev
= skb
;
544 adapter
->non_eop_descs
++;
548 /* ERR_MASK will only have valid bits if EOP set */
549 if (unlikely(staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
)) {
550 dev_kfree_skb_irq(skb
);
554 ixgbevf_rx_checksum(adapter
, staterr
, skb
);
556 /* probably a little skewed due to removing CRC */
557 total_rx_bytes
+= skb
->len
;
561 * Work around issue of some types of VM to VM loop back
562 * packets not getting split correctly
564 if (staterr
& IXGBE_RXD_STAT_LB
) {
565 u32 header_fixup_len
= skb_headlen(skb
);
566 if (header_fixup_len
< 14)
567 skb_push(skb
, header_fixup_len
);
569 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
571 ixgbevf_receive_skb(q_vector
, skb
, staterr
, rx_ring
, rx_desc
);
574 rx_desc
->wb
.upper
.status_error
= 0;
576 /* return some buffers to hardware, one at a time is too slow */
577 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
578 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
,
583 /* use prefetched values */
585 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
587 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
590 rx_ring
->next_to_clean
= i
;
591 cleaned_count
= IXGBE_DESC_UNUSED(rx_ring
);
594 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
596 rx_ring
->total_packets
+= total_rx_packets
;
597 rx_ring
->total_bytes
+= total_rx_bytes
;
598 adapter
->netdev
->stats
.rx_bytes
+= total_rx_bytes
;
599 adapter
->netdev
->stats
.rx_packets
+= total_rx_packets
;
605 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
606 * @napi: napi struct with our devices info in it
607 * @budget: amount of work driver is allowed to do this pass, in packets
609 * This function is optimized for cleaning one queue only on a single
612 static int ixgbevf_clean_rxonly(struct napi_struct
*napi
, int budget
)
614 struct ixgbevf_q_vector
*q_vector
=
615 container_of(napi
, struct ixgbevf_q_vector
, napi
);
616 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
617 struct ixgbevf_ring
*rx_ring
= NULL
;
621 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
622 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
624 ixgbevf_clean_rx_irq(q_vector
, rx_ring
, &work_done
, budget
);
626 /* If all Rx work done, exit the polling mode */
627 if (work_done
< budget
) {
629 if (adapter
->itr_setting
& 1)
630 ixgbevf_set_itr_msix(q_vector
);
631 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
632 ixgbevf_irq_enable_queues(adapter
, rx_ring
->v_idx
);
639 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
640 * @napi: napi struct with our devices info in it
641 * @budget: amount of work driver is allowed to do this pass, in packets
643 * This function will clean more than one rx queue associated with a
646 static int ixgbevf_clean_rxonly_many(struct napi_struct
*napi
, int budget
)
648 struct ixgbevf_q_vector
*q_vector
=
649 container_of(napi
, struct ixgbevf_q_vector
, napi
);
650 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
651 struct ixgbevf_ring
*rx_ring
= NULL
;
652 int work_done
= 0, i
;
656 /* attempt to distribute budget to each queue fairly, but don't allow
657 * the budget to go below 1 because we'll exit polling */
658 budget
/= (q_vector
->rxr_count
?: 1);
659 budget
= max(budget
, 1);
660 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
661 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
662 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
663 ixgbevf_clean_rx_irq(q_vector
, rx_ring
, &work_done
, budget
);
664 enable_mask
|= rx_ring
->v_idx
;
665 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
669 #ifndef HAVE_NETDEV_NAPI_LIST
670 if (!netif_running(adapter
->netdev
))
674 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
675 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
677 /* If all Rx work done, exit the polling mode */
678 if (work_done
< budget
) {
680 if (adapter
->itr_setting
& 1)
681 ixgbevf_set_itr_msix(q_vector
);
682 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
683 ixgbevf_irq_enable_queues(adapter
, enable_mask
);
691 * ixgbevf_configure_msix - Configure MSI-X hardware
692 * @adapter: board private structure
694 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
697 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
699 struct ixgbevf_q_vector
*q_vector
;
700 struct ixgbe_hw
*hw
= &adapter
->hw
;
701 int i
, j
, q_vectors
, v_idx
, r_idx
;
704 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
707 * Populate the IVAR table and set the ITR values to the
708 * corresponding register.
710 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
711 q_vector
= adapter
->q_vector
[v_idx
];
712 /* XXX for_each_set_bit(...) */
713 r_idx
= find_first_bit(q_vector
->rxr_idx
,
714 adapter
->num_rx_queues
);
716 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
717 j
= adapter
->rx_ring
[r_idx
].reg_idx
;
718 ixgbevf_set_ivar(adapter
, 0, j
, v_idx
);
719 r_idx
= find_next_bit(q_vector
->rxr_idx
,
720 adapter
->num_rx_queues
,
723 r_idx
= find_first_bit(q_vector
->txr_idx
,
724 adapter
->num_tx_queues
);
726 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
727 j
= adapter
->tx_ring
[r_idx
].reg_idx
;
728 ixgbevf_set_ivar(adapter
, 1, j
, v_idx
);
729 r_idx
= find_next_bit(q_vector
->txr_idx
,
730 adapter
->num_tx_queues
,
734 /* if this is a tx only vector halve the interrupt rate */
735 if (q_vector
->txr_count
&& !q_vector
->rxr_count
)
736 q_vector
->eitr
= (adapter
->eitr_param
>> 1);
737 else if (q_vector
->rxr_count
)
739 q_vector
->eitr
= adapter
->eitr_param
;
741 ixgbevf_write_eitr(adapter
, v_idx
, q_vector
->eitr
);
744 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
746 /* set up to autoclear timer, and the vectors */
747 mask
= IXGBE_EIMS_ENABLE_MASK
;
748 mask
&= ~IXGBE_EIMS_OTHER
;
749 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, mask
);
756 latency_invalid
= 255
760 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
761 * @adapter: pointer to adapter
762 * @eitr: eitr setting (ints per sec) to give last timeslice
763 * @itr_setting: current throttle rate in ints/second
764 * @packets: the number of packets during this measurement interval
765 * @bytes: the number of bytes during this measurement interval
767 * Stores a new ITR value based on packets and byte
768 * counts during the last interrupt. The advantage of per interrupt
769 * computation is faster updates and more accurate ITR for the current
770 * traffic pattern. Constants in this function were computed
771 * based on theoretical maximum wire speed and thresholds were set based
772 * on testing data as well as attempting to minimize response time
773 * while increasing bulk throughput.
775 static u8
ixgbevf_update_itr(struct ixgbevf_adapter
*adapter
,
776 u32 eitr
, u8 itr_setting
,
777 int packets
, int bytes
)
779 unsigned int retval
= itr_setting
;
784 goto update_itr_done
;
787 /* simple throttlerate management
788 * 0-20MB/s lowest (100000 ints/s)
789 * 20-100MB/s low (20000 ints/s)
790 * 100-1249MB/s bulk (8000 ints/s)
792 /* what was last interrupt timeslice? */
793 timepassed_us
= 1000000/eitr
;
794 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
796 switch (itr_setting
) {
798 if (bytes_perint
> adapter
->eitr_low
)
799 retval
= low_latency
;
802 if (bytes_perint
> adapter
->eitr_high
)
803 retval
= bulk_latency
;
804 else if (bytes_perint
<= adapter
->eitr_low
)
805 retval
= lowest_latency
;
808 if (bytes_perint
<= adapter
->eitr_high
)
809 retval
= low_latency
;
818 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
819 * @adapter: pointer to adapter struct
820 * @v_idx: vector index into q_vector array
821 * @itr_reg: new value to be written in *register* format, not ints/s
823 * This function is made to be called by ethtool and by the driver
824 * when it needs to update VTEITR registers at runtime. Hardware
825 * specific quirks/differences are taken care of here.
827 static void ixgbevf_write_eitr(struct ixgbevf_adapter
*adapter
, int v_idx
,
830 struct ixgbe_hw
*hw
= &adapter
->hw
;
832 itr_reg
= EITR_INTS_PER_SEC_TO_REG(itr_reg
);
835 * set the WDIS bit to not clear the timer bits and cause an
836 * immediate assertion of the interrupt
838 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
840 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
843 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector
*q_vector
)
845 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
847 u8 current_itr
, ret_itr
;
848 int i
, r_idx
, v_idx
= q_vector
->v_idx
;
849 struct ixgbevf_ring
*rx_ring
, *tx_ring
;
851 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
852 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
853 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
854 ret_itr
= ixgbevf_update_itr(adapter
, q_vector
->eitr
,
856 tx_ring
->total_packets
,
857 tx_ring
->total_bytes
);
858 /* if the result for this queue would decrease interrupt
859 * rate for this vector then use that result */
860 q_vector
->tx_itr
= ((q_vector
->tx_itr
> ret_itr
) ?
861 q_vector
->tx_itr
- 1 : ret_itr
);
862 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
866 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
867 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
868 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
869 ret_itr
= ixgbevf_update_itr(adapter
, q_vector
->eitr
,
871 rx_ring
->total_packets
,
872 rx_ring
->total_bytes
);
873 /* if the result for this queue would decrease interrupt
874 * rate for this vector then use that result */
875 q_vector
->rx_itr
= ((q_vector
->rx_itr
> ret_itr
) ?
876 q_vector
->rx_itr
- 1 : ret_itr
);
877 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
881 current_itr
= max(q_vector
->rx_itr
, q_vector
->tx_itr
);
883 switch (current_itr
) {
884 /* counts and packets in update_itr are dependent on these numbers */
889 new_itr
= 20000; /* aka hwitr = ~200 */
897 if (new_itr
!= q_vector
->eitr
) {
900 /* save the algorithm value here, not the smoothed one */
901 q_vector
->eitr
= new_itr
;
902 /* do an exponential smoothing */
903 new_itr
= ((q_vector
->eitr
* 90)/100) + ((new_itr
* 10)/100);
904 itr_reg
= EITR_INTS_PER_SEC_TO_REG(new_itr
);
905 ixgbevf_write_eitr(adapter
, v_idx
, itr_reg
);
909 static irqreturn_t
ixgbevf_msix_mbx(int irq
, void *data
)
911 struct net_device
*netdev
= data
;
912 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
913 struct ixgbe_hw
*hw
= &adapter
->hw
;
917 eicr
= IXGBE_READ_REG(hw
, IXGBE_VTEICS
);
918 IXGBE_WRITE_REG(hw
, IXGBE_VTEICR
, eicr
);
920 if (!hw
->mbx
.ops
.check_for_ack(hw
)) {
922 * checking for the ack clears the PFACK bit. Place
923 * it back in the v2p_mailbox cache so that anyone
924 * polling for an ack will not miss it. Also
925 * avoid the read below because the code to read
926 * the mailbox will also clear the ack bit. This was
927 * causing lost acks. Just cache the bit and exit
930 hw
->mbx
.v2p_mailbox
|= IXGBE_VFMAILBOX_PFACK
;
934 /* Not an ack interrupt, go ahead and read the message */
935 hw
->mbx
.ops
.read(hw
, &msg
, 1);
937 if ((msg
& IXGBE_MBVFICR_VFREQ_MASK
) == IXGBE_PF_CONTROL_MSG
)
938 mod_timer(&adapter
->watchdog_timer
,
939 round_jiffies(jiffies
+ 1));
945 static irqreturn_t
ixgbevf_msix_clean_tx(int irq
, void *data
)
947 struct ixgbevf_q_vector
*q_vector
= data
;
948 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
949 struct ixgbevf_ring
*tx_ring
;
952 if (!q_vector
->txr_count
)
955 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
956 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
957 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
958 tx_ring
->total_bytes
= 0;
959 tx_ring
->total_packets
= 0;
960 ixgbevf_clean_tx_irq(adapter
, tx_ring
);
961 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
965 if (adapter
->itr_setting
& 1)
966 ixgbevf_set_itr_msix(q_vector
);
972 * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
974 * @data: pointer to our q_vector struct for this interrupt vector
976 static irqreturn_t
ixgbevf_msix_clean_rx(int irq
, void *data
)
978 struct ixgbevf_q_vector
*q_vector
= data
;
979 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
980 struct ixgbe_hw
*hw
= &adapter
->hw
;
981 struct ixgbevf_ring
*rx_ring
;
985 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
986 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
987 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
988 rx_ring
->total_bytes
= 0;
989 rx_ring
->total_packets
= 0;
990 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
994 if (!q_vector
->rxr_count
)
997 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
998 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
999 /* disable interrupts on this vector only */
1000 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, rx_ring
->v_idx
);
1001 napi_schedule(&q_vector
->napi
);
1007 static irqreturn_t
ixgbevf_msix_clean_many(int irq
, void *data
)
1009 ixgbevf_msix_clean_rx(irq
, data
);
1010 ixgbevf_msix_clean_tx(irq
, data
);
1015 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
1018 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1020 set_bit(r_idx
, q_vector
->rxr_idx
);
1021 q_vector
->rxr_count
++;
1022 a
->rx_ring
[r_idx
].v_idx
= 1 << v_idx
;
1025 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
1028 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1030 set_bit(t_idx
, q_vector
->txr_idx
);
1031 q_vector
->txr_count
++;
1032 a
->tx_ring
[t_idx
].v_idx
= 1 << v_idx
;
1036 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1037 * @adapter: board private structure to initialize
1039 * This function maps descriptor rings to the queue-specific vectors
1040 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1041 * one vector per ring/queue, but on a constrained vector budget, we
1042 * group the rings as "efficiently" as possible. You would add new
1043 * mapping configurations in here.
1045 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
1049 int rxr_idx
= 0, txr_idx
= 0;
1050 int rxr_remaining
= adapter
->num_rx_queues
;
1051 int txr_remaining
= adapter
->num_tx_queues
;
1056 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1059 * The ideal configuration...
1060 * We have enough vectors to map one per queue.
1062 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
1063 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
1064 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
1066 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
1067 map_vector_to_txq(adapter
, v_start
, txr_idx
);
1072 * If we don't have enough vectors for a 1-to-1
1073 * mapping, we'll have to group them so there are
1074 * multiple queues per vector.
1076 /* Re-adjusting *qpv takes care of the remainder. */
1077 for (i
= v_start
; i
< q_vectors
; i
++) {
1078 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
1079 for (j
= 0; j
< rqpv
; j
++) {
1080 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1085 for (i
= v_start
; i
< q_vectors
; i
++) {
1086 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
1087 for (j
= 0; j
< tqpv
; j
++) {
1088 map_vector_to_txq(adapter
, i
, txr_idx
);
1099 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1100 * @adapter: board private structure
1102 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1103 * interrupts from the kernel.
1105 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
1107 struct net_device
*netdev
= adapter
->netdev
;
1108 irqreturn_t (*handler
)(int, void *);
1109 int i
, vector
, q_vectors
, err
;
1112 /* Decrement for Other and TCP Timer vectors */
1113 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1115 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1116 ? &ixgbevf_msix_clean_many : \
1117 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1118 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1120 for (vector
= 0; vector
< q_vectors
; vector
++) {
1121 handler
= SET_HANDLER(adapter
->q_vector
[vector
]);
1123 if (handler
== &ixgbevf_msix_clean_rx
) {
1124 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1125 netdev
->name
, "rx", ri
++);
1126 } else if (handler
== &ixgbevf_msix_clean_tx
) {
1127 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1128 netdev
->name
, "tx", ti
++);
1129 } else if (handler
== &ixgbevf_msix_clean_many
) {
1130 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1131 netdev
->name
, "TxRx", vector
);
1133 /* skip this unused q_vector */
1136 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1137 handler
, 0, adapter
->name
[vector
],
1138 adapter
->q_vector
[vector
]);
1140 hw_dbg(&adapter
->hw
,
1141 "request_irq failed for MSIX interrupt "
1142 "Error: %d\n", err
);
1143 goto free_queue_irqs
;
1147 sprintf(adapter
->name
[vector
], "%s:mbx", netdev
->name
);
1148 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1149 &ixgbevf_msix_mbx
, 0, adapter
->name
[vector
], netdev
);
1151 hw_dbg(&adapter
->hw
,
1152 "request_irq for msix_mbx failed: %d\n", err
);
1153 goto free_queue_irqs
;
1159 for (i
= vector
- 1; i
>= 0; i
--)
1160 free_irq(adapter
->msix_entries
[--vector
].vector
,
1161 &(adapter
->q_vector
[i
]));
1162 pci_disable_msix(adapter
->pdev
);
1163 kfree(adapter
->msix_entries
);
1164 adapter
->msix_entries
= NULL
;
1168 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
1170 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1172 for (i
= 0; i
< q_vectors
; i
++) {
1173 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
1174 bitmap_zero(q_vector
->rxr_idx
, MAX_RX_QUEUES
);
1175 bitmap_zero(q_vector
->txr_idx
, MAX_TX_QUEUES
);
1176 q_vector
->rxr_count
= 0;
1177 q_vector
->txr_count
= 0;
1178 q_vector
->eitr
= adapter
->eitr_param
;
1183 * ixgbevf_request_irq - initialize interrupts
1184 * @adapter: board private structure
1186 * Attempts to configure interrupts using the best available
1187 * capabilities of the hardware and kernel.
1189 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
1193 err
= ixgbevf_request_msix_irqs(adapter
);
1196 hw_dbg(&adapter
->hw
,
1197 "request_irq failed, Error %d\n", err
);
1202 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1204 struct net_device
*netdev
= adapter
->netdev
;
1207 q_vectors
= adapter
->num_msix_vectors
;
1211 free_irq(adapter
->msix_entries
[i
].vector
, netdev
);
1214 for (; i
>= 0; i
--) {
1215 free_irq(adapter
->msix_entries
[i
].vector
,
1216 adapter
->q_vector
[i
]);
1219 ixgbevf_reset_q_vectors(adapter
);
1223 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1224 * @adapter: board private structure
1226 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1229 struct ixgbe_hw
*hw
= &adapter
->hw
;
1231 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1233 IXGBE_WRITE_FLUSH(hw
);
1235 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1236 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1240 * ixgbevf_irq_enable - Enable default interrupt generation settings
1241 * @adapter: board private structure
1243 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
,
1244 bool queues
, bool flush
)
1246 struct ixgbe_hw
*hw
= &adapter
->hw
;
1250 mask
= (IXGBE_EIMS_ENABLE_MASK
& ~IXGBE_EIMS_RTX_QUEUE
);
1253 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, mask
);
1256 ixgbevf_irq_enable_queues(adapter
, qmask
);
1259 IXGBE_WRITE_FLUSH(hw
);
1263 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1264 * @adapter: board private structure
1266 * Configure the Tx unit of the MAC after a reset.
1268 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1271 struct ixgbe_hw
*hw
= &adapter
->hw
;
1272 u32 i
, j
, tdlen
, txctrl
;
1274 /* Setup the HW Tx Head and Tail descriptor pointers */
1275 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1276 struct ixgbevf_ring
*ring
= &adapter
->tx_ring
[i
];
1279 tdlen
= ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
1280 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(j
),
1281 (tdba
& DMA_BIT_MASK(32)));
1282 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(j
), (tdba
>> 32));
1283 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(j
), tdlen
);
1284 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(j
), 0);
1285 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(j
), 0);
1286 adapter
->tx_ring
[i
].head
= IXGBE_VFTDH(j
);
1287 adapter
->tx_ring
[i
].tail
= IXGBE_VFTDT(j
);
1288 /* Disable Tx Head Writeback RO bit, since this hoses
1289 * bookkeeping if things aren't delivered in order.
1291 txctrl
= IXGBE_READ_REG(hw
, IXGBE_VFDCA_TXCTRL(j
));
1292 txctrl
&= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN
;
1293 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(j
), txctrl
);
1297 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1299 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1301 struct ixgbevf_ring
*rx_ring
;
1302 struct ixgbe_hw
*hw
= &adapter
->hw
;
1305 rx_ring
= &adapter
->rx_ring
[index
];
1307 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1309 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1310 u16 bufsz
= IXGBEVF_RXBUFFER_2048
;
1311 /* grow the amount we can receive on large page machines */
1312 if (bufsz
< (PAGE_SIZE
/ 2))
1313 bufsz
= (PAGE_SIZE
/ 2);
1314 /* cap the bufsz at our largest descriptor size */
1315 bufsz
= min((u16
)IXGBEVF_MAX_RXBUFFER
, bufsz
);
1317 srrctl
|= bufsz
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1318 srrctl
|= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1319 srrctl
|= ((IXGBEVF_RX_HDR_SIZE
<<
1320 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
) &
1321 IXGBE_SRRCTL_BSIZEHDR_MASK
);
1323 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1325 if (rx_ring
->rx_buf_len
== MAXIMUM_ETHERNET_VLAN_SIZE
)
1326 srrctl
|= IXGBEVF_RXBUFFER_2048
>>
1327 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1329 srrctl
|= rx_ring
->rx_buf_len
>>
1330 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1332 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1336 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1337 * @adapter: board private structure
1339 * Configure the Rx unit of the MAC after a reset.
1341 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1344 struct ixgbe_hw
*hw
= &adapter
->hw
;
1345 struct net_device
*netdev
= adapter
->netdev
;
1346 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1351 /* Decide whether to use packet split mode or not */
1352 if (netdev
->mtu
> ETH_DATA_LEN
) {
1353 if (adapter
->flags
& IXGBE_FLAG_RX_PS_CAPABLE
)
1354 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1356 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
1358 if (adapter
->flags
& IXGBE_FLAG_RX_1BUF_CAPABLE
)
1359 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
1361 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1364 /* Set the RX buffer length according to the mode */
1365 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1366 /* PSRTYPE must be initialized in 82599 */
1367 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
|
1368 IXGBE_PSRTYPE_UDPHDR
|
1369 IXGBE_PSRTYPE_IPV4HDR
|
1370 IXGBE_PSRTYPE_IPV6HDR
|
1371 IXGBE_PSRTYPE_L2HDR
;
1372 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, psrtype
);
1373 rx_buf_len
= IXGBEVF_RX_HDR_SIZE
;
1375 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, 0);
1376 if (netdev
->mtu
<= ETH_DATA_LEN
)
1377 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1379 rx_buf_len
= ALIGN(max_frame
, 1024);
1382 rdlen
= adapter
->rx_ring
[0].count
* sizeof(union ixgbe_adv_rx_desc
);
1383 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1384 * the Base and Length of the Rx Descriptor Ring */
1385 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1386 rdba
= adapter
->rx_ring
[i
].dma
;
1387 j
= adapter
->rx_ring
[i
].reg_idx
;
1388 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(j
),
1389 (rdba
& DMA_BIT_MASK(32)));
1390 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(j
), (rdba
>> 32));
1391 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(j
), rdlen
);
1392 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(j
), 0);
1393 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(j
), 0);
1394 adapter
->rx_ring
[i
].head
= IXGBE_VFRDH(j
);
1395 adapter
->rx_ring
[i
].tail
= IXGBE_VFRDT(j
);
1396 adapter
->rx_ring
[i
].rx_buf_len
= rx_buf_len
;
1398 ixgbevf_configure_srrctl(adapter
, j
);
1402 static void ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1404 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1405 struct ixgbe_hw
*hw
= &adapter
->hw
;
1407 /* add VID to filter table */
1408 if (hw
->mac
.ops
.set_vfta
)
1409 hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1410 set_bit(vid
, adapter
->active_vlans
);
1413 static void ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1415 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1416 struct ixgbe_hw
*hw
= &adapter
->hw
;
1418 /* remove VID from filter table */
1419 if (hw
->mac
.ops
.set_vfta
)
1420 hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1421 clear_bit(vid
, adapter
->active_vlans
);
1424 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1428 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1429 ixgbevf_vlan_rx_add_vid(adapter
->netdev
, vid
);
1432 static int ixgbevf_write_uc_addr_list(struct net_device
*netdev
)
1434 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1435 struct ixgbe_hw
*hw
= &adapter
->hw
;
1438 if ((netdev_uc_count(netdev
)) > 10) {
1439 printk(KERN_ERR
"Too many unicast filters - No Space\n");
1443 if (!netdev_uc_empty(netdev
)) {
1444 struct netdev_hw_addr
*ha
;
1445 netdev_for_each_uc_addr(ha
, netdev
) {
1446 hw
->mac
.ops
.set_uc_addr(hw
, ++count
, ha
->addr
);
1451 * If the list is empty then send message to PF driver to
1452 * clear all macvlans on this VF.
1454 hw
->mac
.ops
.set_uc_addr(hw
, 0, NULL
);
1461 * ixgbevf_set_rx_mode - Multicast set
1462 * @netdev: network interface device structure
1464 * The set_rx_method entry point is called whenever the multicast address
1465 * list or the network interface flags are updated. This routine is
1466 * responsible for configuring the hardware for proper multicast mode.
1468 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1470 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1471 struct ixgbe_hw
*hw
= &adapter
->hw
;
1473 /* reprogram multicast list */
1474 if (hw
->mac
.ops
.update_mc_addr_list
)
1475 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1477 ixgbevf_write_uc_addr_list(netdev
);
1480 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1483 struct ixgbevf_q_vector
*q_vector
;
1484 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1486 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1487 struct napi_struct
*napi
;
1488 q_vector
= adapter
->q_vector
[q_idx
];
1489 if (!q_vector
->rxr_count
)
1491 napi
= &q_vector
->napi
;
1492 if (q_vector
->rxr_count
> 1)
1493 napi
->poll
= &ixgbevf_clean_rxonly_many
;
1499 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1502 struct ixgbevf_q_vector
*q_vector
;
1503 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1505 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1506 q_vector
= adapter
->q_vector
[q_idx
];
1507 if (!q_vector
->rxr_count
)
1509 napi_disable(&q_vector
->napi
);
1513 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1515 struct net_device
*netdev
= adapter
->netdev
;
1518 ixgbevf_set_rx_mode(netdev
);
1520 ixgbevf_restore_vlan(adapter
);
1522 ixgbevf_configure_tx(adapter
);
1523 ixgbevf_configure_rx(adapter
);
1524 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1525 struct ixgbevf_ring
*ring
= &adapter
->rx_ring
[i
];
1526 ixgbevf_alloc_rx_buffers(adapter
, ring
, ring
->count
);
1527 ring
->next_to_use
= ring
->count
- 1;
1528 writel(ring
->next_to_use
, adapter
->hw
.hw_addr
+ ring
->tail
);
1532 #define IXGBE_MAX_RX_DESC_POLL 10
1533 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1536 struct ixgbe_hw
*hw
= &adapter
->hw
;
1537 int j
= adapter
->rx_ring
[rxr
].reg_idx
;
1540 for (k
= 0; k
< IXGBE_MAX_RX_DESC_POLL
; k
++) {
1541 if (IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
)) & IXGBE_RXDCTL_ENABLE
)
1546 if (k
>= IXGBE_MAX_RX_DESC_POLL
) {
1547 hw_dbg(hw
, "RXDCTL.ENABLE on Rx queue %d "
1548 "not set within the polling period\n", rxr
);
1551 ixgbevf_release_rx_desc(&adapter
->hw
, &adapter
->rx_ring
[rxr
],
1552 (adapter
->rx_ring
[rxr
].count
- 1));
1555 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
1557 /* Only save pre-reset stats if there are some */
1558 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
1559 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
1560 adapter
->stats
.base_vfgprc
;
1561 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
1562 adapter
->stats
.base_vfgptc
;
1563 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
1564 adapter
->stats
.base_vfgorc
;
1565 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
1566 adapter
->stats
.base_vfgotc
;
1567 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
1568 adapter
->stats
.base_vfmprc
;
1572 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
1574 struct ixgbe_hw
*hw
= &adapter
->hw
;
1576 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
1577 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
1578 adapter
->stats
.last_vfgorc
|=
1579 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
1580 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
1581 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
1582 adapter
->stats
.last_vfgotc
|=
1583 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
1584 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
1586 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
1587 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
1588 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
1589 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
1590 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
1593 static int ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
1595 struct net_device
*netdev
= adapter
->netdev
;
1596 struct ixgbe_hw
*hw
= &adapter
->hw
;
1598 int num_rx_rings
= adapter
->num_rx_queues
;
1601 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1602 j
= adapter
->tx_ring
[i
].reg_idx
;
1603 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1604 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1605 txdctl
|= (8 << 16);
1606 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1609 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1610 j
= adapter
->tx_ring
[i
].reg_idx
;
1611 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1612 txdctl
|= IXGBE_TXDCTL_ENABLE
;
1613 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1616 for (i
= 0; i
< num_rx_rings
; i
++) {
1617 j
= adapter
->rx_ring
[i
].reg_idx
;
1618 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
));
1619 rxdctl
|= IXGBE_RXDCTL_ENABLE
| IXGBE_RXDCTL_VME
;
1620 if (hw
->mac
.type
== ixgbe_mac_X540_vf
) {
1621 rxdctl
&= ~IXGBE_RXDCTL_RLPMLMASK
;
1622 rxdctl
|= ((netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
) |
1623 IXGBE_RXDCTL_RLPML_EN
);
1625 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(j
), rxdctl
);
1626 ixgbevf_rx_desc_queue_enable(adapter
, i
);
1629 ixgbevf_configure_msix(adapter
);
1631 if (hw
->mac
.ops
.set_rar
) {
1632 if (is_valid_ether_addr(hw
->mac
.addr
))
1633 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
1635 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
1638 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1639 ixgbevf_napi_enable_all(adapter
);
1641 /* enable transmits */
1642 netif_tx_start_all_queues(netdev
);
1644 ixgbevf_save_reset_stats(adapter
);
1645 ixgbevf_init_last_counter_stats(adapter
);
1647 /* bring the link up in the watchdog, this could race with our first
1648 * link up interrupt but shouldn't be a problem */
1649 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
1650 adapter
->link_check_timeout
= jiffies
;
1651 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1655 int ixgbevf_up(struct ixgbevf_adapter
*adapter
)
1658 struct ixgbe_hw
*hw
= &adapter
->hw
;
1660 ixgbevf_configure(adapter
);
1662 err
= ixgbevf_up_complete(adapter
);
1664 /* clear any pending interrupts, may auto mask */
1665 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
1667 ixgbevf_irq_enable(adapter
, true, true);
1673 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1674 * @adapter: board private structure
1675 * @rx_ring: ring to free buffers from
1677 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter
*adapter
,
1678 struct ixgbevf_ring
*rx_ring
)
1680 struct pci_dev
*pdev
= adapter
->pdev
;
1684 if (!rx_ring
->rx_buffer_info
)
1687 /* Free all the Rx ring sk_buffs */
1688 for (i
= 0; i
< rx_ring
->count
; i
++) {
1689 struct ixgbevf_rx_buffer
*rx_buffer_info
;
1691 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1692 if (rx_buffer_info
->dma
) {
1693 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
1694 rx_ring
->rx_buf_len
,
1696 rx_buffer_info
->dma
= 0;
1698 if (rx_buffer_info
->skb
) {
1699 struct sk_buff
*skb
= rx_buffer_info
->skb
;
1700 rx_buffer_info
->skb
= NULL
;
1702 struct sk_buff
*this = skb
;
1704 dev_kfree_skb(this);
1707 if (!rx_buffer_info
->page
)
1709 dma_unmap_page(&pdev
->dev
, rx_buffer_info
->page_dma
,
1710 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
1711 rx_buffer_info
->page_dma
= 0;
1712 put_page(rx_buffer_info
->page
);
1713 rx_buffer_info
->page
= NULL
;
1714 rx_buffer_info
->page_offset
= 0;
1717 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
1718 memset(rx_ring
->rx_buffer_info
, 0, size
);
1720 /* Zero out the descriptor ring */
1721 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1723 rx_ring
->next_to_clean
= 0;
1724 rx_ring
->next_to_use
= 0;
1727 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
1729 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
1733 * ixgbevf_clean_tx_ring - Free Tx Buffers
1734 * @adapter: board private structure
1735 * @tx_ring: ring to be cleaned
1737 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter
*adapter
,
1738 struct ixgbevf_ring
*tx_ring
)
1740 struct ixgbevf_tx_buffer
*tx_buffer_info
;
1744 if (!tx_ring
->tx_buffer_info
)
1747 /* Free all the Tx ring sk_buffs */
1749 for (i
= 0; i
< tx_ring
->count
; i
++) {
1750 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
1751 ixgbevf_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
1754 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
1755 memset(tx_ring
->tx_buffer_info
, 0, size
);
1757 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1759 tx_ring
->next_to_use
= 0;
1760 tx_ring
->next_to_clean
= 0;
1763 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1765 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1769 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1770 * @adapter: board private structure
1772 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
1776 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1777 ixgbevf_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
1781 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1782 * @adapter: board private structure
1784 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
1788 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1789 ixgbevf_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
1792 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
1794 struct net_device
*netdev
= adapter
->netdev
;
1795 struct ixgbe_hw
*hw
= &adapter
->hw
;
1799 /* signal that we are down to the interrupt handler */
1800 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1801 /* disable receives */
1803 netif_tx_disable(netdev
);
1807 netif_tx_stop_all_queues(netdev
);
1809 ixgbevf_irq_disable(adapter
);
1811 ixgbevf_napi_disable_all(adapter
);
1813 del_timer_sync(&adapter
->watchdog_timer
);
1814 /* can't call flush scheduled work here because it can deadlock
1815 * if linkwatch_event tries to acquire the rtnl_lock which we are
1817 while (adapter
->flags
& IXGBE_FLAG_IN_WATCHDOG_TASK
)
1820 /* disable transmits in the hardware now that interrupts are off */
1821 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1822 j
= adapter
->tx_ring
[i
].reg_idx
;
1823 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1824 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
),
1825 (txdctl
& ~IXGBE_TXDCTL_ENABLE
));
1828 netif_carrier_off(netdev
);
1830 if (!pci_channel_offline(adapter
->pdev
))
1831 ixgbevf_reset(adapter
);
1833 ixgbevf_clean_all_tx_rings(adapter
);
1834 ixgbevf_clean_all_rx_rings(adapter
);
1837 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
1839 struct ixgbe_hw
*hw
= &adapter
->hw
;
1841 WARN_ON(in_interrupt());
1843 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
1847 * Check if PF is up before re-init. If not then skip until
1848 * later when the PF is up and ready to service requests from
1849 * the VF via mailbox. If the VF is up and running then the
1850 * watchdog task will continue to schedule reset tasks until
1851 * the PF is up and running.
1853 if (!hw
->mac
.ops
.reset_hw(hw
)) {
1854 ixgbevf_down(adapter
);
1855 ixgbevf_up(adapter
);
1858 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
1861 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
1863 struct ixgbe_hw
*hw
= &adapter
->hw
;
1864 struct net_device
*netdev
= adapter
->netdev
;
1866 if (hw
->mac
.ops
.reset_hw(hw
))
1867 hw_dbg(hw
, "PF still resetting\n");
1869 hw
->mac
.ops
.init_hw(hw
);
1871 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1872 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1874 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1879 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
1882 int err
, vector_threshold
;
1884 /* We'll want at least 3 (vector_threshold):
1887 * 3) Other (Link Status Change, etc.)
1889 vector_threshold
= MIN_MSIX_COUNT
;
1891 /* The more we get, the more we will assign to Tx/Rx Cleanup
1892 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1893 * Right now, we simply care about how many we'll get; we'll
1894 * set them up later while requesting irq's.
1896 while (vectors
>= vector_threshold
) {
1897 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1899 if (!err
) /* Success in acquiring all requested vectors. */
1902 vectors
= 0; /* Nasty failure, quit now */
1903 else /* err == number of vectors we should try again with */
1907 if (vectors
< vector_threshold
) {
1908 /* Can't allocate enough MSI-X interrupts? Oh well.
1909 * This just means we'll go with either a single MSI
1910 * vector or fall back to legacy interrupts.
1912 hw_dbg(&adapter
->hw
,
1913 "Unable to allocate MSI-X interrupts\n");
1914 kfree(adapter
->msix_entries
);
1915 adapter
->msix_entries
= NULL
;
1918 * Adjust for only the vectors we'll use, which is minimum
1919 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1920 * vectors we were allocated.
1922 adapter
->num_msix_vectors
= vectors
;
1927 * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
1928 * @adapter: board private structure to initialize
1930 * This is the top level queue allocation routine. The order here is very
1931 * important, starting with the "most" number of features turned on at once,
1932 * and ending with the smallest set of features. This way large combinations
1933 * can be allocated if they're turned on, and smaller combinations are the
1934 * fallthrough conditions.
1937 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
1939 /* Start with base case */
1940 adapter
->num_rx_queues
= 1;
1941 adapter
->num_tx_queues
= 1;
1942 adapter
->num_rx_pools
= adapter
->num_rx_queues
;
1943 adapter
->num_rx_queues_per_pool
= 1;
1947 * ixgbevf_alloc_queues - Allocate memory for all rings
1948 * @adapter: board private structure to initialize
1950 * We allocate one ring per queue at run-time since we don't know the
1951 * number of queues at compile-time. The polling_netdev array is
1952 * intended for Multiqueue, but should work fine with a single queue.
1954 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
1958 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
1959 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1960 if (!adapter
->tx_ring
)
1961 goto err_tx_ring_allocation
;
1963 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
1964 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1965 if (!adapter
->rx_ring
)
1966 goto err_rx_ring_allocation
;
1968 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1969 adapter
->tx_ring
[i
].count
= adapter
->tx_ring_count
;
1970 adapter
->tx_ring
[i
].queue_index
= i
;
1971 adapter
->tx_ring
[i
].reg_idx
= i
;
1974 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1975 adapter
->rx_ring
[i
].count
= adapter
->rx_ring_count
;
1976 adapter
->rx_ring
[i
].queue_index
= i
;
1977 adapter
->rx_ring
[i
].reg_idx
= i
;
1982 err_rx_ring_allocation
:
1983 kfree(adapter
->tx_ring
);
1984 err_tx_ring_allocation
:
1989 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1990 * @adapter: board private structure to initialize
1992 * Attempt to configure the interrupts using the best available
1993 * capabilities of the hardware and the kernel.
1995 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
1998 int vector
, v_budget
;
2001 * It's easy to be greedy for MSI-X vectors, but it really
2002 * doesn't do us much good if we have a lot more vectors
2003 * than CPU's. So let's be conservative and only ask for
2004 * (roughly) twice the number of vectors as there are CPU's.
2006 v_budget
= min(adapter
->num_rx_queues
+ adapter
->num_tx_queues
,
2007 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS
;
2009 /* A failure in MSI-X entry allocation isn't fatal, but it does
2010 * mean we disable MSI-X capabilities of the adapter. */
2011 adapter
->msix_entries
= kcalloc(v_budget
,
2012 sizeof(struct msix_entry
), GFP_KERNEL
);
2013 if (!adapter
->msix_entries
) {
2018 for (vector
= 0; vector
< v_budget
; vector
++)
2019 adapter
->msix_entries
[vector
].entry
= vector
;
2021 ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
2028 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2029 * @adapter: board private structure to initialize
2031 * We allocate one q_vector per queue interrupt. If allocation fails we
2034 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
2036 int q_idx
, num_q_vectors
;
2037 struct ixgbevf_q_vector
*q_vector
;
2039 int (*poll
)(struct napi_struct
*, int);
2041 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2042 napi_vectors
= adapter
->num_rx_queues
;
2043 poll
= &ixgbevf_clean_rxonly
;
2045 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2046 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
2049 q_vector
->adapter
= adapter
;
2050 q_vector
->v_idx
= q_idx
;
2051 q_vector
->eitr
= adapter
->eitr_param
;
2052 if (q_idx
< napi_vectors
)
2053 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
2055 adapter
->q_vector
[q_idx
] = q_vector
;
2063 q_vector
= adapter
->q_vector
[q_idx
];
2064 netif_napi_del(&q_vector
->napi
);
2066 adapter
->q_vector
[q_idx
] = NULL
;
2072 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2073 * @adapter: board private structure to initialize
2075 * This function frees the memory allocated to the q_vectors. In addition if
2076 * NAPI is enabled it will delete any references to the NAPI struct prior
2077 * to freeing the q_vector.
2079 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
2081 int q_idx
, num_q_vectors
;
2084 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2085 napi_vectors
= adapter
->num_rx_queues
;
2087 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2088 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
2090 adapter
->q_vector
[q_idx
] = NULL
;
2091 if (q_idx
< napi_vectors
)
2092 netif_napi_del(&q_vector
->napi
);
2098 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2099 * @adapter: board private structure
2102 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2104 pci_disable_msix(adapter
->pdev
);
2105 kfree(adapter
->msix_entries
);
2106 adapter
->msix_entries
= NULL
;
2110 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2111 * @adapter: board private structure to initialize
2114 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2118 /* Number of supported queues */
2119 ixgbevf_set_num_queues(adapter
);
2121 err
= ixgbevf_set_interrupt_capability(adapter
);
2123 hw_dbg(&adapter
->hw
,
2124 "Unable to setup interrupt capabilities\n");
2125 goto err_set_interrupt
;
2128 err
= ixgbevf_alloc_q_vectors(adapter
);
2130 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue "
2132 goto err_alloc_q_vectors
;
2135 err
= ixgbevf_alloc_queues(adapter
);
2137 printk(KERN_ERR
"Unable to allocate memory for queues\n");
2138 goto err_alloc_queues
;
2141 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, "
2142 "Tx Queue count = %u\n",
2143 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2144 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2146 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2150 ixgbevf_free_q_vectors(adapter
);
2151 err_alloc_q_vectors
:
2152 ixgbevf_reset_interrupt_capability(adapter
);
2158 * ixgbevf_sw_init - Initialize general software structures
2159 * (struct ixgbevf_adapter)
2160 * @adapter: board private structure to initialize
2162 * ixgbevf_sw_init initializes the Adapter private data structure.
2163 * Fields are initialized based on PCI device information and
2164 * OS network device settings (MTU size).
2166 static int __devinit
ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2168 struct ixgbe_hw
*hw
= &adapter
->hw
;
2169 struct pci_dev
*pdev
= adapter
->pdev
;
2172 /* PCI config space info */
2174 hw
->vendor_id
= pdev
->vendor
;
2175 hw
->device_id
= pdev
->device
;
2176 hw
->revision_id
= pdev
->revision
;
2177 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2178 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2180 hw
->mbx
.ops
.init_params(hw
);
2181 hw
->mac
.max_tx_queues
= MAX_TX_QUEUES
;
2182 hw
->mac
.max_rx_queues
= MAX_RX_QUEUES
;
2183 err
= hw
->mac
.ops
.reset_hw(hw
);
2185 dev_info(&pdev
->dev
,
2186 "PF still in reset state, assigning new address\n");
2187 dev_hw_addr_random(adapter
->netdev
, hw
->mac
.addr
);
2189 err
= hw
->mac
.ops
.init_hw(hw
);
2191 printk(KERN_ERR
"init_shared_code failed: %d\n", err
);
2196 /* Enable dynamic interrupt throttling rates */
2197 adapter
->eitr_param
= 20000;
2198 adapter
->itr_setting
= 1;
2200 /* set defaults for eitr in MegaBytes */
2201 adapter
->eitr_low
= 10;
2202 adapter
->eitr_high
= 20;
2204 /* set default ring sizes */
2205 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2206 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2208 /* enable rx csum by default */
2209 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
2211 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2217 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2219 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2220 if (current_counter < last_counter) \
2221 counter += 0x100000000LL; \
2222 last_counter = current_counter; \
2223 counter &= 0xFFFFFFFF00000000LL; \
2224 counter |= current_counter; \
2227 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2229 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2230 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2231 u64 current_counter = (current_counter_msb << 32) | \
2232 current_counter_lsb; \
2233 if (current_counter < last_counter) \
2234 counter += 0x1000000000LL; \
2235 last_counter = current_counter; \
2236 counter &= 0xFFFFFFF000000000LL; \
2237 counter |= current_counter; \
2240 * ixgbevf_update_stats - Update the board statistics counters.
2241 * @adapter: board private structure
2243 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2245 struct ixgbe_hw
*hw
= &adapter
->hw
;
2247 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2248 adapter
->stats
.vfgprc
);
2249 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2250 adapter
->stats
.vfgptc
);
2251 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2252 adapter
->stats
.last_vfgorc
,
2253 adapter
->stats
.vfgorc
);
2254 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2255 adapter
->stats
.last_vfgotc
,
2256 adapter
->stats
.vfgotc
);
2257 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2258 adapter
->stats
.vfmprc
);
2260 /* Fill out the OS statistics structure */
2261 adapter
->netdev
->stats
.multicast
= adapter
->stats
.vfmprc
-
2262 adapter
->stats
.base_vfmprc
;
2266 * ixgbevf_watchdog - Timer Call-back
2267 * @data: pointer to adapter cast into an unsigned long
2269 static void ixgbevf_watchdog(unsigned long data
)
2271 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2272 struct ixgbe_hw
*hw
= &adapter
->hw
;
2277 * Do the watchdog outside of interrupt context due to the lovely
2278 * delays that some of the newer hardware requires
2281 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2282 goto watchdog_short_circuit
;
2284 /* get one bit for every active tx/rx interrupt vector */
2285 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2286 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2287 if (qv
->rxr_count
|| qv
->txr_count
)
2291 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, (u32
)eics
);
2293 watchdog_short_circuit
:
2294 schedule_work(&adapter
->watchdog_task
);
2298 * ixgbevf_tx_timeout - Respond to a Tx Hang
2299 * @netdev: network interface device structure
2301 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
2303 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2305 /* Do the reset outside of interrupt context */
2306 schedule_work(&adapter
->reset_task
);
2309 static void ixgbevf_reset_task(struct work_struct
*work
)
2311 struct ixgbevf_adapter
*adapter
;
2312 adapter
= container_of(work
, struct ixgbevf_adapter
, reset_task
);
2314 /* If we're already down or resetting, just bail */
2315 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2316 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2319 adapter
->tx_timeout_count
++;
2321 ixgbevf_reinit_locked(adapter
);
2325 * ixgbevf_watchdog_task - worker thread to bring link up
2326 * @work: pointer to work_struct containing our data
2328 static void ixgbevf_watchdog_task(struct work_struct
*work
)
2330 struct ixgbevf_adapter
*adapter
= container_of(work
,
2331 struct ixgbevf_adapter
,
2333 struct net_device
*netdev
= adapter
->netdev
;
2334 struct ixgbe_hw
*hw
= &adapter
->hw
;
2335 u32 link_speed
= adapter
->link_speed
;
2336 bool link_up
= adapter
->link_up
;
2338 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
2341 * Always check the link on the watchdog because we have
2344 if (hw
->mac
.ops
.check_link
) {
2345 if ((hw
->mac
.ops
.check_link(hw
, &link_speed
,
2346 &link_up
, false)) != 0) {
2347 adapter
->link_up
= link_up
;
2348 adapter
->link_speed
= link_speed
;
2349 netif_carrier_off(netdev
);
2350 netif_tx_stop_all_queues(netdev
);
2351 schedule_work(&adapter
->reset_task
);
2355 /* always assume link is up, if no check link
2357 link_speed
= IXGBE_LINK_SPEED_10GB_FULL
;
2360 adapter
->link_up
= link_up
;
2361 adapter
->link_speed
= link_speed
;
2364 if (!netif_carrier_ok(netdev
)) {
2365 hw_dbg(&adapter
->hw
, "NIC Link is Up, %u Gbps\n",
2366 (link_speed
== IXGBE_LINK_SPEED_10GB_FULL
) ?
2368 netif_carrier_on(netdev
);
2369 netif_tx_wake_all_queues(netdev
);
2372 adapter
->link_up
= false;
2373 adapter
->link_speed
= 0;
2374 if (netif_carrier_ok(netdev
)) {
2375 hw_dbg(&adapter
->hw
, "NIC Link is Down\n");
2376 netif_carrier_off(netdev
);
2377 netif_tx_stop_all_queues(netdev
);
2381 ixgbevf_update_stats(adapter
);
2384 /* Reset the timer */
2385 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2386 mod_timer(&adapter
->watchdog_timer
,
2387 round_jiffies(jiffies
+ (2 * HZ
)));
2389 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
2393 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2394 * @adapter: board private structure
2395 * @tx_ring: Tx descriptor ring for a specific queue
2397 * Free all transmit software resources
2399 void ixgbevf_free_tx_resources(struct ixgbevf_adapter
*adapter
,
2400 struct ixgbevf_ring
*tx_ring
)
2402 struct pci_dev
*pdev
= adapter
->pdev
;
2404 ixgbevf_clean_tx_ring(adapter
, tx_ring
);
2406 vfree(tx_ring
->tx_buffer_info
);
2407 tx_ring
->tx_buffer_info
= NULL
;
2409 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
2412 tx_ring
->desc
= NULL
;
2416 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2417 * @adapter: board private structure
2419 * Free all transmit software resources
2421 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2425 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2426 if (adapter
->tx_ring
[i
].desc
)
2427 ixgbevf_free_tx_resources(adapter
,
2428 &adapter
->tx_ring
[i
]);
2433 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2434 * @adapter: board private structure
2435 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2437 * Return 0 on success, negative on failure
2439 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter
*adapter
,
2440 struct ixgbevf_ring
*tx_ring
)
2442 struct pci_dev
*pdev
= adapter
->pdev
;
2445 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2446 tx_ring
->tx_buffer_info
= vzalloc(size
);
2447 if (!tx_ring
->tx_buffer_info
)
2450 /* round up to nearest 4K */
2451 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2452 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2454 tx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, tx_ring
->size
,
2455 &tx_ring
->dma
, GFP_KERNEL
);
2459 tx_ring
->next_to_use
= 0;
2460 tx_ring
->next_to_clean
= 0;
2461 tx_ring
->work_limit
= tx_ring
->count
;
2465 vfree(tx_ring
->tx_buffer_info
);
2466 tx_ring
->tx_buffer_info
= NULL
;
2467 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit "
2468 "descriptor ring\n");
2473 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2474 * @adapter: board private structure
2476 * If this function returns with an error, then it's possible one or
2477 * more of the rings is populated (while the rest are not). It is the
2478 * callers duty to clean those orphaned rings.
2480 * Return 0 on success, negative on failure
2482 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2486 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2487 err
= ixgbevf_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2490 hw_dbg(&adapter
->hw
,
2491 "Allocation for Tx Queue %u failed\n", i
);
2499 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2500 * @adapter: board private structure
2501 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2503 * Returns 0 on success, negative on failure
2505 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter
*adapter
,
2506 struct ixgbevf_ring
*rx_ring
)
2508 struct pci_dev
*pdev
= adapter
->pdev
;
2511 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2512 rx_ring
->rx_buffer_info
= vzalloc(size
);
2513 if (!rx_ring
->rx_buffer_info
) {
2514 hw_dbg(&adapter
->hw
,
2515 "Unable to vmalloc buffer memory for "
2516 "the receive descriptor ring\n");
2520 /* Round up to nearest 4K */
2521 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2522 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2524 rx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, rx_ring
->size
,
2525 &rx_ring
->dma
, GFP_KERNEL
);
2527 if (!rx_ring
->desc
) {
2528 hw_dbg(&adapter
->hw
,
2529 "Unable to allocate memory for "
2530 "the receive descriptor ring\n");
2531 vfree(rx_ring
->rx_buffer_info
);
2532 rx_ring
->rx_buffer_info
= NULL
;
2536 rx_ring
->next_to_clean
= 0;
2537 rx_ring
->next_to_use
= 0;
2545 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2546 * @adapter: board private structure
2548 * If this function returns with an error, then it's possible one or
2549 * more of the rings is populated (while the rest are not). It is the
2550 * callers duty to clean those orphaned rings.
2552 * Return 0 on success, negative on failure
2554 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2558 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2559 err
= ixgbevf_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2562 hw_dbg(&adapter
->hw
,
2563 "Allocation for Rx Queue %u failed\n", i
);
2570 * ixgbevf_free_rx_resources - Free Rx Resources
2571 * @adapter: board private structure
2572 * @rx_ring: ring to clean the resources from
2574 * Free all receive software resources
2576 void ixgbevf_free_rx_resources(struct ixgbevf_adapter
*adapter
,
2577 struct ixgbevf_ring
*rx_ring
)
2579 struct pci_dev
*pdev
= adapter
->pdev
;
2581 ixgbevf_clean_rx_ring(adapter
, rx_ring
);
2583 vfree(rx_ring
->rx_buffer_info
);
2584 rx_ring
->rx_buffer_info
= NULL
;
2586 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
2589 rx_ring
->desc
= NULL
;
2593 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2594 * @adapter: board private structure
2596 * Free all receive software resources
2598 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2602 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2603 if (adapter
->rx_ring
[i
].desc
)
2604 ixgbevf_free_rx_resources(adapter
,
2605 &adapter
->rx_ring
[i
]);
2609 * ixgbevf_open - Called when a network interface is made active
2610 * @netdev: network interface device structure
2612 * Returns 0 on success, negative value on failure
2614 * The open entry point is called when a network interface is made
2615 * active by the system (IFF_UP). At this point all resources needed
2616 * for transmit and receive operations are allocated, the interrupt
2617 * handler is registered with the OS, the watchdog timer is started,
2618 * and the stack is notified that the interface is ready.
2620 static int ixgbevf_open(struct net_device
*netdev
)
2622 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2623 struct ixgbe_hw
*hw
= &adapter
->hw
;
2626 /* disallow open during test */
2627 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
2630 if (hw
->adapter_stopped
) {
2631 ixgbevf_reset(adapter
);
2632 /* if adapter is still stopped then PF isn't up and
2633 * the vf can't start. */
2634 if (hw
->adapter_stopped
) {
2635 err
= IXGBE_ERR_MBX
;
2636 printk(KERN_ERR
"Unable to start - perhaps the PF"
2637 " Driver isn't up yet\n");
2638 goto err_setup_reset
;
2642 /* allocate transmit descriptors */
2643 err
= ixgbevf_setup_all_tx_resources(adapter
);
2647 /* allocate receive descriptors */
2648 err
= ixgbevf_setup_all_rx_resources(adapter
);
2652 ixgbevf_configure(adapter
);
2655 * Map the Tx/Rx rings to the vectors we were allotted.
2656 * if request_irq will be called in this function map_rings
2657 * must be called *before* up_complete
2659 ixgbevf_map_rings_to_vectors(adapter
);
2661 err
= ixgbevf_up_complete(adapter
);
2665 /* clear any pending interrupts, may auto mask */
2666 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2667 err
= ixgbevf_request_irq(adapter
);
2671 ixgbevf_irq_enable(adapter
, true, true);
2676 ixgbevf_down(adapter
);
2678 ixgbevf_free_irq(adapter
);
2680 ixgbevf_free_all_rx_resources(adapter
);
2682 ixgbevf_free_all_tx_resources(adapter
);
2683 ixgbevf_reset(adapter
);
2691 * ixgbevf_close - Disables a network interface
2692 * @netdev: network interface device structure
2694 * Returns 0, this is not allowed to fail
2696 * The close entry point is called when an interface is de-activated
2697 * by the OS. The hardware is still under the drivers control, but
2698 * needs to be disabled. A global MAC reset is issued to stop the
2699 * hardware, and all transmit and receive resources are freed.
2701 static int ixgbevf_close(struct net_device
*netdev
)
2703 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2705 ixgbevf_down(adapter
);
2706 ixgbevf_free_irq(adapter
);
2708 ixgbevf_free_all_tx_resources(adapter
);
2709 ixgbevf_free_all_rx_resources(adapter
);
2714 static int ixgbevf_tso(struct ixgbevf_adapter
*adapter
,
2715 struct ixgbevf_ring
*tx_ring
,
2716 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
2718 struct ixgbe_adv_tx_context_desc
*context_desc
;
2721 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2722 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
;
2723 u32 mss_l4len_idx
, l4len
;
2725 if (skb_is_gso(skb
)) {
2726 if (skb_header_cloned(skb
)) {
2727 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2731 l4len
= tcp_hdrlen(skb
);
2734 if (skb
->protocol
== htons(ETH_P_IP
)) {
2735 struct iphdr
*iph
= ip_hdr(skb
);
2738 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2742 adapter
->hw_tso_ctxt
++;
2743 } else if (skb_is_gso_v6(skb
)) {
2744 ipv6_hdr(skb
)->payload_len
= 0;
2745 tcp_hdr(skb
)->check
=
2746 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2747 &ipv6_hdr(skb
)->daddr
,
2749 adapter
->hw_tso6_ctxt
++;
2752 i
= tx_ring
->next_to_use
;
2754 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2755 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2757 /* VLAN MACLEN IPLEN */
2758 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2760 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
2761 vlan_macip_lens
|= ((skb_network_offset(skb
)) <<
2762 IXGBE_ADVTXD_MACLEN_SHIFT
);
2763 *hdr_len
+= skb_network_offset(skb
);
2765 (skb_transport_header(skb
) - skb_network_header(skb
));
2767 (skb_transport_header(skb
) - skb_network_header(skb
));
2768 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2769 context_desc
->seqnum_seed
= 0;
2771 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2772 type_tucmd_mlhl
= (IXGBE_TXD_CMD_DEXT
|
2773 IXGBE_ADVTXD_DTYP_CTXT
);
2775 if (skb
->protocol
== htons(ETH_P_IP
))
2776 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2777 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2778 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2782 (skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
);
2783 mss_l4len_idx
|= (l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
);
2784 /* use index 1 for TSO */
2785 mss_l4len_idx
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
2786 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
2788 tx_buffer_info
->time_stamp
= jiffies
;
2789 tx_buffer_info
->next_to_watch
= i
;
2792 if (i
== tx_ring
->count
)
2794 tx_ring
->next_to_use
= i
;
2802 static bool ixgbevf_tx_csum(struct ixgbevf_adapter
*adapter
,
2803 struct ixgbevf_ring
*tx_ring
,
2804 struct sk_buff
*skb
, u32 tx_flags
)
2806 struct ixgbe_adv_tx_context_desc
*context_desc
;
2808 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2809 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
= 0;
2811 if (skb
->ip_summed
== CHECKSUM_PARTIAL
||
2812 (tx_flags
& IXGBE_TX_FLAGS_VLAN
)) {
2813 i
= tx_ring
->next_to_use
;
2814 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2815 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2817 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2818 vlan_macip_lens
|= (tx_flags
&
2819 IXGBE_TX_FLAGS_VLAN_MASK
);
2820 vlan_macip_lens
|= (skb_network_offset(skb
) <<
2821 IXGBE_ADVTXD_MACLEN_SHIFT
);
2822 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2823 vlan_macip_lens
|= (skb_transport_header(skb
) -
2824 skb_network_header(skb
));
2826 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2827 context_desc
->seqnum_seed
= 0;
2829 type_tucmd_mlhl
|= (IXGBE_TXD_CMD_DEXT
|
2830 IXGBE_ADVTXD_DTYP_CTXT
);
2832 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2833 switch (skb
->protocol
) {
2834 case __constant_htons(ETH_P_IP
):
2835 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2836 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2838 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2840 case __constant_htons(ETH_P_IPV6
):
2841 /* XXX what about other V6 headers?? */
2842 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2844 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2847 if (unlikely(net_ratelimit())) {
2849 "partial checksum but "
2857 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2858 /* use index zero for tx checksum offload */
2859 context_desc
->mss_l4len_idx
= 0;
2861 tx_buffer_info
->time_stamp
= jiffies
;
2862 tx_buffer_info
->next_to_watch
= i
;
2864 adapter
->hw_csum_tx_good
++;
2866 if (i
== tx_ring
->count
)
2868 tx_ring
->next_to_use
= i
;
2876 static int ixgbevf_tx_map(struct ixgbevf_adapter
*adapter
,
2877 struct ixgbevf_ring
*tx_ring
,
2878 struct sk_buff
*skb
, u32 tx_flags
,
2881 struct pci_dev
*pdev
= adapter
->pdev
;
2882 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2884 unsigned int total
= skb
->len
;
2885 unsigned int offset
= 0, size
;
2887 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2891 i
= tx_ring
->next_to_use
;
2893 len
= min(skb_headlen(skb
), total
);
2895 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2896 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2898 tx_buffer_info
->length
= size
;
2899 tx_buffer_info
->mapped_as_page
= false;
2900 tx_buffer_info
->dma
= dma_map_single(&adapter
->pdev
->dev
,
2902 size
, DMA_TO_DEVICE
);
2903 if (dma_mapping_error(&pdev
->dev
, tx_buffer_info
->dma
))
2905 tx_buffer_info
->time_stamp
= jiffies
;
2906 tx_buffer_info
->next_to_watch
= i
;
2913 if (i
== tx_ring
->count
)
2917 for (f
= 0; f
< nr_frags
; f
++) {
2918 struct skb_frag_struct
*frag
;
2920 frag
= &skb_shinfo(skb
)->frags
[f
];
2921 len
= min((unsigned int)frag
->size
, total
);
2922 offset
= frag
->page_offset
;
2925 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2926 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2928 tx_buffer_info
->length
= size
;
2929 tx_buffer_info
->dma
= dma_map_page(&adapter
->pdev
->dev
,
2934 tx_buffer_info
->mapped_as_page
= true;
2935 if (dma_mapping_error(&pdev
->dev
, tx_buffer_info
->dma
))
2937 tx_buffer_info
->time_stamp
= jiffies
;
2938 tx_buffer_info
->next_to_watch
= i
;
2945 if (i
== tx_ring
->count
)
2953 i
= tx_ring
->count
- 1;
2956 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
2957 tx_ring
->tx_buffer_info
[first
].next_to_watch
= i
;
2962 dev_err(&pdev
->dev
, "TX DMA map failed\n");
2964 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2965 tx_buffer_info
->dma
= 0;
2966 tx_buffer_info
->time_stamp
= 0;
2967 tx_buffer_info
->next_to_watch
= 0;
2970 /* clear timestamp and dma mappings for remaining portion of packet */
2971 while (count
>= 0) {
2975 i
+= tx_ring
->count
;
2976 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2977 ixgbevf_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
2983 static void ixgbevf_tx_queue(struct ixgbevf_adapter
*adapter
,
2984 struct ixgbevf_ring
*tx_ring
, int tx_flags
,
2985 int count
, u32 paylen
, u8 hdr_len
)
2987 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
2988 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2989 u32 olinfo_status
= 0, cmd_type_len
= 0;
2992 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
2994 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
2996 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
2998 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2999 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
3001 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
3002 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
3004 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3005 IXGBE_ADVTXD_POPTS_SHIFT
;
3007 /* use index 1 context for tso */
3008 olinfo_status
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
3009 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3010 olinfo_status
|= IXGBE_TXD_POPTS_IXSM
<<
3011 IXGBE_ADVTXD_POPTS_SHIFT
;
3013 } else if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3014 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3015 IXGBE_ADVTXD_POPTS_SHIFT
;
3017 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
3019 i
= tx_ring
->next_to_use
;
3021 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3022 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
3023 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
3024 tx_desc
->read
.cmd_type_len
=
3025 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
3026 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3028 if (i
== tx_ring
->count
)
3032 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
3035 * Force memory writes to complete before letting h/w
3036 * know there are new descriptors to fetch. (Only
3037 * applicable for weak-ordered memory model archs,
3042 tx_ring
->next_to_use
= i
;
3043 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3046 static int __ixgbevf_maybe_stop_tx(struct net_device
*netdev
,
3047 struct ixgbevf_ring
*tx_ring
, int size
)
3049 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3051 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3052 /* Herbert's original patch had:
3053 * smp_mb__after_netif_stop_queue();
3054 * but since that doesn't exist yet, just open code it. */
3057 /* We need to check again in a case another CPU has just
3058 * made room available. */
3059 if (likely(IXGBE_DESC_UNUSED(tx_ring
) < size
))
3062 /* A reprieve! - use start_queue because it doesn't call schedule */
3063 netif_start_subqueue(netdev
, tx_ring
->queue_index
);
3064 ++adapter
->restart_queue
;
3068 static int ixgbevf_maybe_stop_tx(struct net_device
*netdev
,
3069 struct ixgbevf_ring
*tx_ring
, int size
)
3071 if (likely(IXGBE_DESC_UNUSED(tx_ring
) >= size
))
3073 return __ixgbevf_maybe_stop_tx(netdev
, tx_ring
, size
);
3076 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3078 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3079 struct ixgbevf_ring
*tx_ring
;
3081 unsigned int tx_flags
= 0;
3088 tx_ring
= &adapter
->tx_ring
[r_idx
];
3090 if (vlan_tx_tag_present(skb
)) {
3091 tx_flags
|= vlan_tx_tag_get(skb
);
3092 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3093 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3096 /* four things can cause us to need a context descriptor */
3097 if (skb_is_gso(skb
) ||
3098 (skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3099 (tx_flags
& IXGBE_TX_FLAGS_VLAN
))
3102 count
+= TXD_USE_COUNT(skb_headlen(skb
));
3103 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3104 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3106 if (ixgbevf_maybe_stop_tx(netdev
, tx_ring
, count
)) {
3108 return NETDEV_TX_BUSY
;
3111 first
= tx_ring
->next_to_use
;
3113 if (skb
->protocol
== htons(ETH_P_IP
))
3114 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
3115 tso
= ixgbevf_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
);
3117 dev_kfree_skb_any(skb
);
3118 return NETDEV_TX_OK
;
3122 tx_flags
|= IXGBE_TX_FLAGS_TSO
;
3123 else if (ixgbevf_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
3124 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3125 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3127 ixgbevf_tx_queue(adapter
, tx_ring
, tx_flags
,
3128 ixgbevf_tx_map(adapter
, tx_ring
, skb
, tx_flags
, first
),
3131 ixgbevf_maybe_stop_tx(netdev
, tx_ring
, DESC_NEEDED
);
3133 return NETDEV_TX_OK
;
3137 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3138 * @netdev: network interface device structure
3139 * @p: pointer to an address structure
3141 * Returns 0 on success, negative on failure
3143 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3145 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3146 struct ixgbe_hw
*hw
= &adapter
->hw
;
3147 struct sockaddr
*addr
= p
;
3149 if (!is_valid_ether_addr(addr
->sa_data
))
3150 return -EADDRNOTAVAIL
;
3152 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3153 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3155 if (hw
->mac
.ops
.set_rar
)
3156 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
3162 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3163 * @netdev: network interface device structure
3164 * @new_mtu: new value for maximum frame size
3166 * Returns 0 on success, negative on failure
3168 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3170 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3171 struct ixgbe_hw
*hw
= &adapter
->hw
;
3172 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3173 int max_possible_frame
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3176 if (adapter
->hw
.mac
.type
== ixgbe_mac_X540_vf
)
3177 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3179 /* MTU < 68 is an error and causes problems on some kernels */
3180 if ((new_mtu
< 68) || (max_frame
> max_possible_frame
))
3183 hw_dbg(&adapter
->hw
, "changing MTU from %d to %d\n",
3184 netdev
->mtu
, new_mtu
);
3185 /* must set new MTU before calling down or up */
3186 netdev
->mtu
= new_mtu
;
3188 msg
[0] = IXGBE_VF_SET_LPE
;
3190 hw
->mbx
.ops
.write_posted(hw
, msg
, 2);
3192 if (netif_running(netdev
))
3193 ixgbevf_reinit_locked(adapter
);
3198 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3200 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3201 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3203 netif_device_detach(netdev
);
3205 if (netif_running(netdev
)) {
3206 ixgbevf_down(adapter
);
3207 ixgbevf_free_irq(adapter
);
3208 ixgbevf_free_all_tx_resources(adapter
);
3209 ixgbevf_free_all_rx_resources(adapter
);
3213 pci_save_state(pdev
);
3216 pci_disable_device(pdev
);
3219 static const struct net_device_ops ixgbe_netdev_ops
= {
3220 .ndo_open
= ixgbevf_open
,
3221 .ndo_stop
= ixgbevf_close
,
3222 .ndo_start_xmit
= ixgbevf_xmit_frame
,
3223 .ndo_set_rx_mode
= ixgbevf_set_rx_mode
,
3224 .ndo_set_multicast_list
= ixgbevf_set_rx_mode
,
3225 .ndo_validate_addr
= eth_validate_addr
,
3226 .ndo_set_mac_address
= ixgbevf_set_mac
,
3227 .ndo_change_mtu
= ixgbevf_change_mtu
,
3228 .ndo_tx_timeout
= ixgbevf_tx_timeout
,
3229 .ndo_vlan_rx_add_vid
= ixgbevf_vlan_rx_add_vid
,
3230 .ndo_vlan_rx_kill_vid
= ixgbevf_vlan_rx_kill_vid
,
3233 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3235 dev
->netdev_ops
= &ixgbe_netdev_ops
;
3236 ixgbevf_set_ethtool_ops(dev
);
3237 dev
->watchdog_timeo
= 5 * HZ
;
3241 * ixgbevf_probe - Device Initialization Routine
3242 * @pdev: PCI device information struct
3243 * @ent: entry in ixgbevf_pci_tbl
3245 * Returns 0 on success, negative on failure
3247 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3248 * The OS initialization, configuring of the adapter private structure,
3249 * and a hardware reset occur.
3251 static int __devinit
ixgbevf_probe(struct pci_dev
*pdev
,
3252 const struct pci_device_id
*ent
)
3254 struct net_device
*netdev
;
3255 struct ixgbevf_adapter
*adapter
= NULL
;
3256 struct ixgbe_hw
*hw
= NULL
;
3257 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3258 static int cards_found
;
3259 int err
, pci_using_dac
;
3261 err
= pci_enable_device(pdev
);
3265 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3266 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3269 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3271 err
= dma_set_coherent_mask(&pdev
->dev
,
3274 dev_err(&pdev
->dev
, "No usable DMA "
3275 "configuration, aborting\n");
3282 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
3284 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3288 pci_set_master(pdev
);
3291 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
3294 netdev
= alloc_etherdev(sizeof(struct ixgbevf_adapter
));
3298 goto err_alloc_etherdev
;
3301 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3303 pci_set_drvdata(pdev
, netdev
);
3304 adapter
= netdev_priv(netdev
);
3306 adapter
->netdev
= netdev
;
3307 adapter
->pdev
= pdev
;
3310 adapter
->msg_enable
= (1 << DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
3313 * call save state here in standalone driver because it relies on
3314 * adapter struct to exist, and needs to call netdev_priv
3316 pci_save_state(pdev
);
3318 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3319 pci_resource_len(pdev
, 0));
3325 ixgbevf_assign_netdev_ops(netdev
);
3327 adapter
->bd_number
= cards_found
;
3330 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3331 hw
->mac
.type
= ii
->mac
;
3333 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
3334 sizeof(struct ixgbe_mbx_operations
));
3336 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_CAPABLE
;
3337 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
3338 adapter
->flags
|= IXGBE_FLAG_RX_1BUF_CAPABLE
;
3340 /* setup the private structure */
3341 err
= ixgbevf_sw_init(adapter
);
3343 netdev
->features
= NETIF_F_SG
|
3345 NETIF_F_HW_VLAN_TX
|
3346 NETIF_F_HW_VLAN_RX
|
3347 NETIF_F_HW_VLAN_FILTER
;
3349 netdev
->features
|= NETIF_F_IPV6_CSUM
;
3350 netdev
->features
|= NETIF_F_TSO
;
3351 netdev
->features
|= NETIF_F_TSO6
;
3352 netdev
->features
|= NETIF_F_GRO
;
3353 netdev
->vlan_features
|= NETIF_F_TSO
;
3354 netdev
->vlan_features
|= NETIF_F_TSO6
;
3355 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3356 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3357 netdev
->vlan_features
|= NETIF_F_SG
;
3360 netdev
->features
|= NETIF_F_HIGHDMA
;
3362 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3364 /* The HW MAC address was set and/or determined in sw_init */
3365 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
3366 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
3368 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3369 printk(KERN_ERR
"invalid MAC address\n");
3374 init_timer(&adapter
->watchdog_timer
);
3375 adapter
->watchdog_timer
.function
= ixgbevf_watchdog
;
3376 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3378 INIT_WORK(&adapter
->reset_task
, ixgbevf_reset_task
);
3379 INIT_WORK(&adapter
->watchdog_task
, ixgbevf_watchdog_task
);
3381 err
= ixgbevf_init_interrupt_scheme(adapter
);
3385 /* pick up the PCI bus settings for reporting later */
3386 if (hw
->mac
.ops
.get_bus_info
)
3387 hw
->mac
.ops
.get_bus_info(hw
);
3389 strcpy(netdev
->name
, "eth%d");
3391 err
= register_netdev(netdev
);
3395 adapter
->netdev_registered
= true;
3397 netif_carrier_off(netdev
);
3399 ixgbevf_init_last_counter_stats(adapter
);
3401 /* print the MAC address */
3402 hw_dbg(hw
, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3403 netdev
->dev_addr
[0],
3404 netdev
->dev_addr
[1],
3405 netdev
->dev_addr
[2],
3406 netdev
->dev_addr
[3],
3407 netdev
->dev_addr
[4],
3408 netdev
->dev_addr
[5]);
3410 hw_dbg(hw
, "MAC: %d\n", hw
->mac
.type
);
3412 hw_dbg(hw
, "LRO is disabled\n");
3414 hw_dbg(hw
, "Intel(R) 82599 Virtual Function\n");
3420 ixgbevf_reset_interrupt_capability(adapter
);
3421 iounmap(hw
->hw_addr
);
3423 free_netdev(netdev
);
3425 pci_release_regions(pdev
);
3428 pci_disable_device(pdev
);
3433 * ixgbevf_remove - Device Removal Routine
3434 * @pdev: PCI device information struct
3436 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3437 * that it should release a PCI device. The could be caused by a
3438 * Hot-Plug event, or because the driver is going to be removed from
3441 static void __devexit
ixgbevf_remove(struct pci_dev
*pdev
)
3443 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3444 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3446 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
3448 del_timer_sync(&adapter
->watchdog_timer
);
3450 cancel_work_sync(&adapter
->reset_task
);
3451 cancel_work_sync(&adapter
->watchdog_task
);
3453 if (adapter
->netdev_registered
) {
3454 unregister_netdev(netdev
);
3455 adapter
->netdev_registered
= false;
3458 ixgbevf_reset_interrupt_capability(adapter
);
3460 iounmap(adapter
->hw
.hw_addr
);
3461 pci_release_regions(pdev
);
3463 hw_dbg(&adapter
->hw
, "Remove complete\n");
3465 kfree(adapter
->tx_ring
);
3466 kfree(adapter
->rx_ring
);
3468 free_netdev(netdev
);
3470 pci_disable_device(pdev
);
3473 static struct pci_driver ixgbevf_driver
= {
3474 .name
= ixgbevf_driver_name
,
3475 .id_table
= ixgbevf_pci_tbl
,
3476 .probe
= ixgbevf_probe
,
3477 .remove
= __devexit_p(ixgbevf_remove
),
3478 .shutdown
= ixgbevf_shutdown
,
3482 * ixgbevf_init_module - Driver Registration Routine
3484 * ixgbevf_init_module is the first routine called when the driver is
3485 * loaded. All it does is register with the PCI subsystem.
3487 static int __init
ixgbevf_init_module(void)
3490 printk(KERN_INFO
"ixgbevf: %s - version %s\n", ixgbevf_driver_string
,
3491 ixgbevf_driver_version
);
3493 printk(KERN_INFO
"%s\n", ixgbevf_copyright
);
3495 ret
= pci_register_driver(&ixgbevf_driver
);
3499 module_init(ixgbevf_init_module
);
3502 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3504 * ixgbevf_exit_module is called just before the driver is removed
3507 static void __exit
ixgbevf_exit_module(void)
3509 pci_unregister_driver(&ixgbevf_driver
);
3514 * ixgbevf_get_hw_dev_name - return device name string
3515 * used by hardware layer to print debugging information
3517 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
3519 struct ixgbevf_adapter
*adapter
= hw
->back
;
3520 return adapter
->netdev
->name
;
3524 module_exit(ixgbevf_exit_module
);
3526 /* ixgbevf_main.c */