ixgbevf: make the first tx_buffer a repository for most of the skb info
[deliverable/linux.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
CommitLineData
92915f71
GR
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
5c47a2b6 4 Copyright(c) 1999 - 2012 Intel Corporation.
92915f71
GR
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
dbd9636e
JK
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
92915f71 35#include <linux/types.h>
dadcd65f 36#include <linux/bitops.h>
92915f71
GR
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
70a10e25 45#include <linux/sctp.h>
92915f71 46#include <linux/ipv6.h>
5a0e3ad6 47#include <linux/slab.h>
92915f71
GR
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
01789349 51#include <linux/if.h>
92915f71 52#include <linux/if_vlan.h>
70c71606 53#include <linux/prefetch.h>
92915f71
GR
54
55#include "ixgbevf.h"
56
3d8fe98f 57const char ixgbevf_driver_name[] = "ixgbevf";
92915f71 58static const char ixgbevf_driver_string[] =
422e05d1 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
92915f71 60
86f359f6 61#define DRV_VERSION "2.12.1-k"
92915f71 62const char ixgbevf_driver_version[] = DRV_VERSION;
66c87bd5 63static char ixgbevf_copyright[] =
5c47a2b6 64 "Copyright (c) 2009 - 2012 Intel Corporation.";
92915f71
GR
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
2316aa2a
GR
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
92915f71
GR
69};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
39ba22b4
SH
79static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
92915f71
GR
82 /* required last entry */
83 {0, }
84};
85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86
87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION);
91
b3f4d599 92#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93static int debug = -1;
94module_param(debug, int, 0);
95MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
92915f71
GR
96
97/* forward decls */
220fe050 98static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
fa71ae27 99static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
56e94095 100static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
92915f71 101
5cdab2f6 102static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
92915f71
GR
103 u32 val)
104{
5cdab2f6
DS
105 rx_ring->next_to_use = val;
106
92915f71
GR
107 /*
108 * Force memory writes to complete before letting h/w
109 * know there are new descriptors to fetch. (Only
110 * applicable for weak-ordered memory model archs,
111 * such as IA-64).
112 */
113 wmb();
5cdab2f6 114 writel(val, rx_ring->tail);
92915f71
GR
115}
116
49ce9c2c 117/**
65d676c8 118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
92915f71
GR
119 * @adapter: pointer to adapter struct
120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
121 * @queue: queue to map the corresponding interrupt to
122 * @msix_vector: the vector to map to the corresponding queue
92915f71
GR
123 */
124static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
125 u8 queue, u8 msix_vector)
126{
127 u32 ivar, index;
128 struct ixgbe_hw *hw = &adapter->hw;
129 if (direction == -1) {
130 /* other causes */
131 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
132 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
133 ivar &= ~0xFF;
134 ivar |= msix_vector;
135 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
136 } else {
137 /* tx or rx causes */
138 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
139 index = ((16 * (queue & 1)) + (8 * direction));
140 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
141 ivar &= ~(0xFF << index);
142 ivar |= (msix_vector << index);
143 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
144 }
145}
146
70a10e25 147static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
92915f71
GR
148 struct ixgbevf_tx_buffer
149 *tx_buffer_info)
150{
151 if (tx_buffer_info->dma) {
7ad1a093 152 if (tx_buffer_info->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE)
70a10e25 153 dma_unmap_page(tx_ring->dev,
92915f71
GR
154 tx_buffer_info->dma,
155 tx_buffer_info->length,
2a1f8794 156 DMA_TO_DEVICE);
92915f71 157 else
70a10e25 158 dma_unmap_single(tx_ring->dev,
92915f71
GR
159 tx_buffer_info->dma,
160 tx_buffer_info->length,
2a1f8794 161 DMA_TO_DEVICE);
92915f71
GR
162 tx_buffer_info->dma = 0;
163 }
164 if (tx_buffer_info->skb) {
165 dev_kfree_skb_any(tx_buffer_info->skb);
166 tx_buffer_info->skb = NULL;
167 }
168 tx_buffer_info->time_stamp = 0;
169 /* tx_buffer_info must be completely set up in the transmit path */
170}
171
92915f71
GR
172#define IXGBE_MAX_TXD_PWR 14
173#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
174
175/* Tx Descriptors needed, worst case */
3595990a
AD
176#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
177#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
92915f71
GR
178
179static void ixgbevf_tx_timeout(struct net_device *netdev);
180
181/**
182 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
fa71ae27 183 * @q_vector: board private structure
92915f71
GR
184 * @tx_ring: tx ring to clean
185 **/
fa71ae27 186static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
92915f71
GR
187 struct ixgbevf_ring *tx_ring)
188{
fa71ae27 189 struct ixgbevf_adapter *adapter = q_vector->adapter;
7ad1a093
ET
190 struct ixgbevf_tx_buffer *tx_buffer;
191 union ixgbe_adv_tx_desc *tx_desc;
92915f71 192 unsigned int total_bytes = 0, total_packets = 0;
7ad1a093
ET
193 unsigned int budget = tx_ring->count / 2;
194 unsigned int i = tx_ring->next_to_clean;
92915f71 195
10cc1bdd
AD
196 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
197 return true;
198
7ad1a093
ET
199 tx_buffer = &tx_ring->tx_buffer_info[i];
200 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
201 i -= tx_ring->count;
92915f71 202
e757e3e1 203 do {
7ad1a093 204 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
e757e3e1
AD
205
206 /* if next_to_watch is not set then there is no work pending */
207 if (!eop_desc)
208 break;
209
210 /* prevent any other reads prior to eop_desc */
211 read_barrier_depends();
212
213 /* if DD is not set pending work has not been completed */
214 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
215 break;
216
217 /* clear next_to_watch to prevent false hangs */
7ad1a093 218 tx_buffer->next_to_watch = NULL;
e757e3e1 219
7ad1a093
ET
220 /* update the statistics for this packet */
221 total_bytes += tx_buffer->bytecount;
222 total_packets += tx_buffer->gso_segs;
92915f71 223
7ad1a093
ET
224 /* clear tx_buffer data */
225 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
92915f71 226
7ad1a093
ET
227 /* unmap remaining buffers */
228 while (tx_desc != eop_desc) {
92915f71
GR
229 tx_desc->wb.status = 0;
230
7ad1a093
ET
231 tx_buffer++;
232 tx_desc++;
92915f71 233 i++;
7ad1a093
ET
234 if (unlikely(!i)) {
235 i -= tx_ring->count;
236 tx_buffer = tx_ring->tx_buffer_info;
237 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
238 }
e757e3e1 239
7ad1a093 240 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
92915f71
GR
241 }
242
7ad1a093 243 tx_desc->wb.status = 0;
92915f71 244
7ad1a093
ET
245 /* move us one more past the eop_desc for start of next pkt */
246 tx_buffer++;
247 tx_desc++;
248 i++;
249 if (unlikely(!i)) {
250 i -= tx_ring->count;
251 tx_buffer = tx_ring->tx_buffer_info;
252 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
253 }
254
255 /* issue prefetch for next Tx descriptor */
256 prefetch(tx_desc);
257
258 /* update budget accounting */
259 budget--;
260 } while (likely(budget));
261
262 i += tx_ring->count;
92915f71 263 tx_ring->next_to_clean = i;
7ad1a093
ET
264 u64_stats_update_begin(&tx_ring->syncp);
265 tx_ring->stats.bytes += total_bytes;
266 tx_ring->stats.packets += total_packets;
267 u64_stats_update_end(&tx_ring->syncp);
268 q_vector->tx.total_bytes += total_bytes;
269 q_vector->tx.total_packets += total_packets;
92915f71
GR
270
271#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7ad1a093 272 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
f880d07b 273 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
92915f71
GR
274 /* Make sure that anybody stopping the queue after this
275 * sees the new next_to_clean.
276 */
277 smp_mb();
7ad1a093 278
fb40195c
AD
279 if (__netif_subqueue_stopped(tx_ring->netdev,
280 tx_ring->queue_index) &&
92915f71 281 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
fb40195c
AD
282 netif_wake_subqueue(tx_ring->netdev,
283 tx_ring->queue_index);
7ad1a093 284 ++tx_ring->tx_stats.restart_queue;
92915f71 285 }
92915f71
GR
286 }
287
7ad1a093 288 return !!budget;
92915f71
GR
289}
290
291/**
292 * ixgbevf_receive_skb - Send a completed packet up the stack
293 * @q_vector: structure containing interrupt and ring information
294 * @skb: packet to send up
295 * @status: hardware indication of status of receive
92915f71
GR
296 * @rx_desc: rx descriptor
297 **/
298static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
299 struct sk_buff *skb, u8 status,
92915f71
GR
300 union ixgbe_adv_rx_desc *rx_desc)
301{
302 struct ixgbevf_adapter *adapter = q_vector->adapter;
303 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
dd1ed3b7 304 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
92915f71 305
5d9a533b 306 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
86a9bad3 307 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
dadcd65f 308
366c1099
GR
309 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
310 napi_gro_receive(&q_vector->napi, skb);
311 else
312 netif_rx(skb);
92915f71
GR
313}
314
08681618
JK
315/**
316 * ixgbevf_rx_skb - Helper function to determine proper Rx method
317 * @q_vector: structure containing interrupt and ring information
318 * @skb: packet to send up
319 * @status: hardware indication of status of receive
320 * @rx_desc: rx descriptor
321 **/
322static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
323 struct sk_buff *skb, u8 status,
324 union ixgbe_adv_rx_desc *rx_desc)
325{
c777cdfa
JK
326#ifdef CONFIG_NET_RX_BUSY_POLL
327 skb_mark_napi_id(skb, &q_vector->napi);
328
329 if (ixgbevf_qv_busy_polling(q_vector)) {
330 netif_receive_skb(skb);
331 /* exit early if we busy polled */
332 return;
333 }
334#endif /* CONFIG_NET_RX_BUSY_POLL */
335
08681618
JK
336 ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
337}
338
92915f71
GR
339/**
340 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
55fb277c 341 * @ring: pointer to Rx descriptor ring structure
92915f71
GR
342 * @status_err: hardware indication of status of receive
343 * @skb: skb currently being received and modified
344 **/
55fb277c 345static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
92915f71
GR
346 u32 status_err, struct sk_buff *skb)
347{
bc8acf2c 348 skb_checksum_none_assert(skb);
92915f71
GR
349
350 /* Rx csum disabled */
fb40195c 351 if (!(ring->netdev->features & NETIF_F_RXCSUM))
92915f71
GR
352 return;
353
354 /* if IP and error */
355 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
356 (status_err & IXGBE_RXDADV_ERR_IPE)) {
095e2617 357 ring->rx_stats.csum_err++;
92915f71
GR
358 return;
359 }
360
361 if (!(status_err & IXGBE_RXD_STAT_L4CS))
362 return;
363
364 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
095e2617 365 ring->rx_stats.csum_err++;
92915f71
GR
366 return;
367 }
368
369 /* It must be a TCP or UDP packet with a valid checksum */
370 skb->ip_summed = CHECKSUM_UNNECESSARY;
92915f71
GR
371}
372
373/**
374 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
095e2617 375 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
92915f71 376 **/
095e2617 377static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
92915f71
GR
378 int cleaned_count)
379{
92915f71
GR
380 union ixgbe_adv_rx_desc *rx_desc;
381 struct ixgbevf_rx_buffer *bi;
fb40195c 382 unsigned int i = rx_ring->next_to_use;
92915f71 383
92915f71 384 while (cleaned_count--) {
908421f6 385 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
05d063aa 386 bi = &rx_ring->rx_buffer_info[i];
b9dd245b
GR
387
388 if (!bi->skb) {
389 struct sk_buff *skb;
390
fb40195c
AD
391 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
392 rx_ring->rx_buf_len);
05d063aa 393 if (!skb)
92915f71 394 goto no_buffers;
05d063aa 395
92915f71 396 bi->skb = skb;
b9dd245b 397
05d063aa 398 bi->dma = dma_map_single(rx_ring->dev, skb->data,
92915f71 399 rx_ring->rx_buf_len,
2a1f8794 400 DMA_FROM_DEVICE);
05d063aa 401 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
6132ee8a
GR
402 dev_kfree_skb(skb);
403 bi->skb = NULL;
05d063aa 404 dev_err(rx_ring->dev, "Rx DMA map failed\n");
6132ee8a
GR
405 break;
406 }
92915f71 407 }
77d5dfca 408 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
92915f71
GR
409
410 i++;
411 if (i == rx_ring->count)
412 i = 0;
92915f71
GR
413 }
414
415no_buffers:
095e2617 416 rx_ring->rx_stats.alloc_rx_buff_failed++;
5cdab2f6
DS
417 if (rx_ring->next_to_use != i)
418 ixgbevf_release_rx_desc(rx_ring, i);
92915f71
GR
419}
420
421static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
5f3600eb 422 u32 qmask)
92915f71 423{
92915f71
GR
424 struct ixgbe_hw *hw = &adapter->hw;
425
5f3600eb 426 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
92915f71
GR
427}
428
08e50a20
JK
429static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
430 struct ixgbevf_ring *rx_ring,
431 int budget)
92915f71 432{
92915f71
GR
433 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
434 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
435 struct sk_buff *skb;
436 unsigned int i;
437 u32 len, staterr;
92915f71
GR
438 int cleaned_count = 0;
439 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
440
441 i = rx_ring->next_to_clean;
908421f6 442 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
443 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
444 rx_buffer_info = &rx_ring->rx_buffer_info[i];
445
446 while (staterr & IXGBE_RXD_STAT_DD) {
fa71ae27 447 if (!budget)
92915f71 448 break;
fa71ae27 449 budget--;
92915f71 450
2d0bb1c1 451 rmb(); /* read descriptor and rx_buffer_info after status DD */
77d5dfca 452 len = le16_to_cpu(rx_desc->wb.upper.length);
92915f71
GR
453 skb = rx_buffer_info->skb;
454 prefetch(skb->data - NET_IP_ALIGN);
455 rx_buffer_info->skb = NULL;
456
457 if (rx_buffer_info->dma) {
05d063aa 458 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
92915f71 459 rx_ring->rx_buf_len,
2a1f8794 460 DMA_FROM_DEVICE);
92915f71
GR
461 rx_buffer_info->dma = 0;
462 skb_put(skb, len);
463 }
464
92915f71
GR
465 i++;
466 if (i == rx_ring->count)
467 i = 0;
468
908421f6 469 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
470 prefetch(next_rxd);
471 cleaned_count++;
472
473 next_buffer = &rx_ring->rx_buffer_info[i];
474
475 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
77d5dfca 476 skb->next = next_buffer->skb;
5c60f81a 477 IXGBE_CB(skb->next)->prev = skb;
095e2617 478 rx_ring->rx_stats.non_eop_descs++;
92915f71
GR
479 goto next_desc;
480 }
481
5c60f81a
AD
482 /* we should not be chaining buffers, if we did drop the skb */
483 if (IXGBE_CB(skb)->prev) {
484 do {
485 struct sk_buff *this = skb;
486 skb = IXGBE_CB(skb)->prev;
487 dev_kfree_skb(this);
488 } while (skb);
489 goto next_desc;
490 }
491
92915f71
GR
492 /* ERR_MASK will only have valid bits if EOP set */
493 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
494 dev_kfree_skb_irq(skb);
495 goto next_desc;
496 }
497
55fb277c 498 ixgbevf_rx_checksum(rx_ring, staterr, skb);
92915f71
GR
499
500 /* probably a little skewed due to removing CRC */
501 total_rx_bytes += skb->len;
502 total_rx_packets++;
503
fb40195c 504 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
92915f71 505
815cccbf
JF
506 /* Workaround hardware that can't do proper VEPA multicast
507 * source pruning.
508 */
509 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
095e2617 510 ether_addr_equal(rx_ring->netdev->dev_addr,
7367d0b5 511 eth_hdr(skb)->h_source)) {
815cccbf
JF
512 dev_kfree_skb_irq(skb);
513 goto next_desc;
514 }
515
08681618 516 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
92915f71
GR
517
518next_desc:
519 rx_desc->wb.upper.status_error = 0;
520
521 /* return some buffers to hardware, one at a time is too slow */
522 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
095e2617 523 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
92915f71
GR
524 cleaned_count = 0;
525 }
526
527 /* use prefetched values */
528 rx_desc = next_rxd;
529 rx_buffer_info = &rx_ring->rx_buffer_info[i];
530
531 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
532 }
533
534 rx_ring->next_to_clean = i;
f880d07b 535 cleaned_count = ixgbevf_desc_unused(rx_ring);
92915f71
GR
536
537 if (cleaned_count)
095e2617 538 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
92915f71 539
4197aa7b 540 u64_stats_update_begin(&rx_ring->syncp);
095e2617
ET
541 rx_ring->stats.packets += total_rx_packets;
542 rx_ring->stats.bytes += total_rx_bytes;
4197aa7b 543 u64_stats_update_end(&rx_ring->syncp);
ac6ed8f0
GR
544 q_vector->rx.total_packets += total_rx_packets;
545 q_vector->rx.total_bytes += total_rx_bytes;
92915f71 546
08e50a20 547 return total_rx_packets;
92915f71
GR
548}
549
550/**
fa71ae27 551 * ixgbevf_poll - NAPI polling calback
92915f71
GR
552 * @napi: napi struct with our devices info in it
553 * @budget: amount of work driver is allowed to do this pass, in packets
554 *
fa71ae27 555 * This function will clean more than one or more rings associated with a
92915f71
GR
556 * q_vector.
557 **/
fa71ae27 558static int ixgbevf_poll(struct napi_struct *napi, int budget)
92915f71
GR
559{
560 struct ixgbevf_q_vector *q_vector =
561 container_of(napi, struct ixgbevf_q_vector, napi);
562 struct ixgbevf_adapter *adapter = q_vector->adapter;
fa71ae27
AD
563 struct ixgbevf_ring *ring;
564 int per_ring_budget;
565 bool clean_complete = true;
566
567 ixgbevf_for_each_ring(ring, q_vector->tx)
568 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
92915f71 569
c777cdfa
JK
570#ifdef CONFIG_NET_RX_BUSY_POLL
571 if (!ixgbevf_qv_lock_napi(q_vector))
572 return budget;
573#endif
574
92915f71
GR
575 /* attempt to distribute budget to each queue fairly, but don't allow
576 * the budget to go below 1 because we'll exit polling */
fa71ae27
AD
577 if (q_vector->rx.count > 1)
578 per_ring_budget = max(budget/q_vector->rx.count, 1);
579 else
580 per_ring_budget = budget;
581
366c1099 582 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
fa71ae27 583 ixgbevf_for_each_ring(ring, q_vector->rx)
08e50a20
JK
584 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
585 per_ring_budget)
586 < per_ring_budget);
366c1099 587 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
fa71ae27 588
c777cdfa
JK
589#ifdef CONFIG_NET_RX_BUSY_POLL
590 ixgbevf_qv_unlock_napi(q_vector);
591#endif
592
fa71ae27
AD
593 /* If all work not completed, return budget and keep polling */
594 if (!clean_complete)
595 return budget;
596 /* all work done, exit the polling mode */
597 napi_complete(napi);
598 if (adapter->rx_itr_setting & 1)
599 ixgbevf_set_itr(q_vector);
600 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
601 ixgbevf_irq_enable_queues(adapter,
602 1 << q_vector->v_idx);
92915f71 603
fa71ae27 604 return 0;
92915f71
GR
605}
606
ce422606
GR
607/**
608 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
609 * @q_vector: structure containing interrupt and ring information
610 */
3849623e 611void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
ce422606
GR
612{
613 struct ixgbevf_adapter *adapter = q_vector->adapter;
614 struct ixgbe_hw *hw = &adapter->hw;
615 int v_idx = q_vector->v_idx;
616 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
617
618 /*
619 * set the WDIS bit to not clear the timer bits and cause an
620 * immediate assertion of the interrupt
621 */
622 itr_reg |= IXGBE_EITR_CNT_WDIS;
623
624 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
625}
92915f71 626
c777cdfa
JK
627#ifdef CONFIG_NET_RX_BUSY_POLL
628/* must be called with local_bh_disable()d */
629static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
630{
631 struct ixgbevf_q_vector *q_vector =
632 container_of(napi, struct ixgbevf_q_vector, napi);
633 struct ixgbevf_adapter *adapter = q_vector->adapter;
634 struct ixgbevf_ring *ring;
635 int found = 0;
636
637 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
638 return LL_FLUSH_FAILED;
639
640 if (!ixgbevf_qv_lock_poll(q_vector))
641 return LL_FLUSH_BUSY;
642
643 ixgbevf_for_each_ring(ring, q_vector->rx) {
644 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
3b5dca26
JK
645#ifdef BP_EXTENDED_STATS
646 if (found)
095e2617 647 ring->stats.cleaned += found;
3b5dca26 648 else
095e2617 649 ring->stats.misses++;
3b5dca26 650#endif
c777cdfa
JK
651 if (found)
652 break;
653 }
654
655 ixgbevf_qv_unlock_poll(q_vector);
656
657 return found;
658}
659#endif /* CONFIG_NET_RX_BUSY_POLL */
660
92915f71
GR
661/**
662 * ixgbevf_configure_msix - Configure MSI-X hardware
663 * @adapter: board private structure
664 *
665 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
666 * interrupts.
667 **/
668static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
669{
670 struct ixgbevf_q_vector *q_vector;
6b43c446 671 int q_vectors, v_idx;
92915f71
GR
672
673 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5f3600eb 674 adapter->eims_enable_mask = 0;
92915f71
GR
675
676 /*
677 * Populate the IVAR table and set the ITR values to the
678 * corresponding register.
679 */
680 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
6b43c446 681 struct ixgbevf_ring *ring;
92915f71 682 q_vector = adapter->q_vector[v_idx];
6b43c446
AD
683
684 ixgbevf_for_each_ring(ring, q_vector->rx)
685 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
686
687 ixgbevf_for_each_ring(ring, q_vector->tx)
688 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
92915f71 689
5f3600eb
AD
690 if (q_vector->tx.ring && !q_vector->rx.ring) {
691 /* tx only vector */
692 if (adapter->tx_itr_setting == 1)
693 q_vector->itr = IXGBE_10K_ITR;
694 else
695 q_vector->itr = adapter->tx_itr_setting;
696 } else {
697 /* rx or rx/tx vector */
698 if (adapter->rx_itr_setting == 1)
699 q_vector->itr = IXGBE_20K_ITR;
700 else
701 q_vector->itr = adapter->rx_itr_setting;
702 }
703
704 /* add q_vector eims value to global eims_enable_mask */
705 adapter->eims_enable_mask |= 1 << v_idx;
92915f71 706
5f3600eb 707 ixgbevf_write_eitr(q_vector);
92915f71
GR
708 }
709
710 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
5f3600eb
AD
711 /* setup eims_other and add value to global eims_enable_mask */
712 adapter->eims_other = 1 << v_idx;
713 adapter->eims_enable_mask |= adapter->eims_other;
92915f71
GR
714}
715
716enum latency_range {
717 lowest_latency = 0,
718 low_latency = 1,
719 bulk_latency = 2,
720 latency_invalid = 255
721};
722
723/**
724 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
5f3600eb
AD
725 * @q_vector: structure containing interrupt and ring information
726 * @ring_container: structure containing ring performance data
92915f71
GR
727 *
728 * Stores a new ITR value based on packets and byte
729 * counts during the last interrupt. The advantage of per interrupt
730 * computation is faster updates and more accurate ITR for the current
731 * traffic pattern. Constants in this function were computed
732 * based on theoretical maximum wire speed and thresholds were set based
733 * on testing data as well as attempting to minimize response time
734 * while increasing bulk throughput.
735 **/
5f3600eb
AD
736static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
737 struct ixgbevf_ring_container *ring_container)
92915f71 738{
5f3600eb
AD
739 int bytes = ring_container->total_bytes;
740 int packets = ring_container->total_packets;
92915f71
GR
741 u32 timepassed_us;
742 u64 bytes_perint;
5f3600eb 743 u8 itr_setting = ring_container->itr;
92915f71
GR
744
745 if (packets == 0)
5f3600eb 746 return;
92915f71
GR
747
748 /* simple throttlerate management
749 * 0-20MB/s lowest (100000 ints/s)
750 * 20-100MB/s low (20000 ints/s)
751 * 100-1249MB/s bulk (8000 ints/s)
752 */
753 /* what was last interrupt timeslice? */
5f3600eb 754 timepassed_us = q_vector->itr >> 2;
92915f71
GR
755 bytes_perint = bytes / timepassed_us; /* bytes/usec */
756
757 switch (itr_setting) {
758 case lowest_latency:
e2c28ce7 759 if (bytes_perint > 10)
5f3600eb 760 itr_setting = low_latency;
92915f71
GR
761 break;
762 case low_latency:
e2c28ce7 763 if (bytes_perint > 20)
5f3600eb 764 itr_setting = bulk_latency;
e2c28ce7 765 else if (bytes_perint <= 10)
5f3600eb 766 itr_setting = lowest_latency;
92915f71
GR
767 break;
768 case bulk_latency:
e2c28ce7 769 if (bytes_perint <= 20)
5f3600eb 770 itr_setting = low_latency;
92915f71
GR
771 break;
772 }
773
5f3600eb
AD
774 /* clear work counters since we have the values we need */
775 ring_container->total_bytes = 0;
776 ring_container->total_packets = 0;
777
778 /* write updated itr to ring container */
779 ring_container->itr = itr_setting;
92915f71
GR
780}
781
fa71ae27 782static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
92915f71 783{
5f3600eb
AD
784 u32 new_itr = q_vector->itr;
785 u8 current_itr;
92915f71 786
5f3600eb
AD
787 ixgbevf_update_itr(q_vector, &q_vector->tx);
788 ixgbevf_update_itr(q_vector, &q_vector->rx);
92915f71 789
6b43c446 790 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
92915f71
GR
791
792 switch (current_itr) {
793 /* counts and packets in update_itr are dependent on these numbers */
794 case lowest_latency:
5f3600eb 795 new_itr = IXGBE_100K_ITR;
92915f71
GR
796 break;
797 case low_latency:
5f3600eb 798 new_itr = IXGBE_20K_ITR;
92915f71
GR
799 break;
800 case bulk_latency:
801 default:
5f3600eb 802 new_itr = IXGBE_8K_ITR;
92915f71
GR
803 break;
804 }
805
5f3600eb 806 if (new_itr != q_vector->itr) {
92915f71 807 /* do an exponential smoothing */
5f3600eb
AD
808 new_itr = (10 * new_itr * q_vector->itr) /
809 ((9 * new_itr) + q_vector->itr);
810
811 /* save the algorithm value here */
812 q_vector->itr = new_itr;
813
814 ixgbevf_write_eitr(q_vector);
92915f71 815 }
92915f71
GR
816}
817
4b2cd27f 818static irqreturn_t ixgbevf_msix_other(int irq, void *data)
92915f71 819{
fa71ae27 820 struct ixgbevf_adapter *adapter = data;
92915f71 821 struct ixgbe_hw *hw = &adapter->hw;
08259594 822
4b2cd27f 823 hw->mac.get_link_status = 1;
1e72bfc3 824
c7bb417d
DS
825 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
826 mod_timer(&adapter->watchdog_timer, jiffies);
3a2c4033 827
5f3600eb
AD
828 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
829
92915f71
GR
830 return IRQ_HANDLED;
831}
832
92915f71 833/**
fa71ae27 834 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
92915f71
GR
835 * @irq: unused
836 * @data: pointer to our q_vector struct for this interrupt vector
837 **/
fa71ae27 838static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
92915f71
GR
839{
840 struct ixgbevf_q_vector *q_vector = data;
92915f71 841
5f3600eb 842 /* EIAM disabled interrupts (on this vector) for us */
fa71ae27
AD
843 if (q_vector->rx.ring || q_vector->tx.ring)
844 napi_schedule(&q_vector->napi);
92915f71
GR
845
846 return IRQ_HANDLED;
847}
848
849static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
850 int r_idx)
851{
852 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
853
87e70ab9
DS
854 a->rx_ring[r_idx]->next = q_vector->rx.ring;
855 q_vector->rx.ring = a->rx_ring[r_idx];
6b43c446 856 q_vector->rx.count++;
92915f71
GR
857}
858
859static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
860 int t_idx)
861{
862 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
863
87e70ab9
DS
864 a->tx_ring[t_idx]->next = q_vector->tx.ring;
865 q_vector->tx.ring = a->tx_ring[t_idx];
6b43c446 866 q_vector->tx.count++;
92915f71
GR
867}
868
869/**
870 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
871 * @adapter: board private structure to initialize
872 *
873 * This function maps descriptor rings to the queue-specific vectors
874 * we were allotted through the MSI-X enabling code. Ideally, we'd have
875 * one vector per ring/queue, but on a constrained vector budget, we
876 * group the rings as "efficiently" as possible. You would add new
877 * mapping configurations in here.
878 **/
879static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
880{
881 int q_vectors;
882 int v_start = 0;
883 int rxr_idx = 0, txr_idx = 0;
884 int rxr_remaining = adapter->num_rx_queues;
885 int txr_remaining = adapter->num_tx_queues;
886 int i, j;
887 int rqpv, tqpv;
888 int err = 0;
889
890 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
891
892 /*
893 * The ideal configuration...
894 * We have enough vectors to map one per queue.
895 */
896 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
897 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
898 map_vector_to_rxq(adapter, v_start, rxr_idx);
899
900 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
901 map_vector_to_txq(adapter, v_start, txr_idx);
902 goto out;
903 }
904
905 /*
906 * If we don't have enough vectors for a 1-to-1
907 * mapping, we'll have to group them so there are
908 * multiple queues per vector.
909 */
910 /* Re-adjusting *qpv takes care of the remainder. */
911 for (i = v_start; i < q_vectors; i++) {
912 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
913 for (j = 0; j < rqpv; j++) {
914 map_vector_to_rxq(adapter, i, rxr_idx);
915 rxr_idx++;
916 rxr_remaining--;
917 }
918 }
919 for (i = v_start; i < q_vectors; i++) {
920 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
921 for (j = 0; j < tqpv; j++) {
922 map_vector_to_txq(adapter, i, txr_idx);
923 txr_idx++;
924 txr_remaining--;
925 }
926 }
927
928out:
929 return err;
930}
931
932/**
933 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
934 * @adapter: board private structure
935 *
936 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
937 * interrupts from the kernel.
938 **/
939static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
940{
941 struct net_device *netdev = adapter->netdev;
fa71ae27
AD
942 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
943 int vector, err;
92915f71
GR
944 int ri = 0, ti = 0;
945
92915f71 946 for (vector = 0; vector < q_vectors; vector++) {
fa71ae27
AD
947 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
948 struct msix_entry *entry = &adapter->msix_entries[vector];
949
950 if (q_vector->tx.ring && q_vector->rx.ring) {
951 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
952 "%s-%s-%d", netdev->name, "TxRx", ri++);
953 ti++;
954 } else if (q_vector->rx.ring) {
955 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
956 "%s-%s-%d", netdev->name, "rx", ri++);
957 } else if (q_vector->tx.ring) {
958 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
959 "%s-%s-%d", netdev->name, "tx", ti++);
92915f71
GR
960 } else {
961 /* skip this unused q_vector */
962 continue;
963 }
fa71ae27
AD
964 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
965 q_vector->name, q_vector);
92915f71
GR
966 if (err) {
967 hw_dbg(&adapter->hw,
968 "request_irq failed for MSIX interrupt "
969 "Error: %d\n", err);
970 goto free_queue_irqs;
971 }
972 }
973
92915f71 974 err = request_irq(adapter->msix_entries[vector].vector,
4b2cd27f 975 &ixgbevf_msix_other, 0, netdev->name, adapter);
92915f71
GR
976 if (err) {
977 hw_dbg(&adapter->hw,
4b2cd27f 978 "request_irq for msix_other failed: %d\n", err);
92915f71
GR
979 goto free_queue_irqs;
980 }
981
982 return 0;
983
984free_queue_irqs:
fa71ae27
AD
985 while (vector) {
986 vector--;
987 free_irq(adapter->msix_entries[vector].vector,
988 adapter->q_vector[vector]);
989 }
a1f6c6b1 990 /* This failure is non-recoverable - it indicates the system is
991 * out of MSIX vector resources and the VF driver cannot run
992 * without them. Set the number of msix vectors to zero
993 * indicating that not enough can be allocated. The error
994 * will be returned to the user indicating device open failed.
995 * Any further attempts to force the driver to open will also
996 * fail. The only way to recover is to unload the driver and
997 * reload it again. If the system has recovered some MSIX
998 * vectors then it may succeed.
999 */
1000 adapter->num_msix_vectors = 0;
92915f71
GR
1001 return err;
1002}
1003
1004static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1005{
1006 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1007
1008 for (i = 0; i < q_vectors; i++) {
1009 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
6b43c446
AD
1010 q_vector->rx.ring = NULL;
1011 q_vector->tx.ring = NULL;
1012 q_vector->rx.count = 0;
1013 q_vector->tx.count = 0;
92915f71
GR
1014 }
1015}
1016
1017/**
1018 * ixgbevf_request_irq - initialize interrupts
1019 * @adapter: board private structure
1020 *
1021 * Attempts to configure interrupts using the best available
1022 * capabilities of the hardware and kernel.
1023 **/
1024static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1025{
1026 int err = 0;
1027
1028 err = ixgbevf_request_msix_irqs(adapter);
1029
1030 if (err)
1031 hw_dbg(&adapter->hw,
1032 "request_irq failed, Error %d\n", err);
1033
1034 return err;
1035}
1036
1037static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1038{
92915f71
GR
1039 int i, q_vectors;
1040
1041 q_vectors = adapter->num_msix_vectors;
92915f71
GR
1042 i = q_vectors - 1;
1043
fa71ae27 1044 free_irq(adapter->msix_entries[i].vector, adapter);
92915f71
GR
1045 i--;
1046
1047 for (; i >= 0; i--) {
fa71ae27
AD
1048 /* free only the irqs that were actually requested */
1049 if (!adapter->q_vector[i]->rx.ring &&
1050 !adapter->q_vector[i]->tx.ring)
1051 continue;
1052
92915f71
GR
1053 free_irq(adapter->msix_entries[i].vector,
1054 adapter->q_vector[i]);
1055 }
1056
1057 ixgbevf_reset_q_vectors(adapter);
1058}
1059
1060/**
1061 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1062 * @adapter: board private structure
1063 **/
1064static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1065{
92915f71 1066 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 1067 int i;
92915f71 1068
5f3600eb 1069 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
92915f71 1070 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
5f3600eb 1071 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
92915f71
GR
1072
1073 IXGBE_WRITE_FLUSH(hw);
1074
1075 for (i = 0; i < adapter->num_msix_vectors; i++)
1076 synchronize_irq(adapter->msix_entries[i].vector);
1077}
1078
1079/**
1080 * ixgbevf_irq_enable - Enable default interrupt generation settings
1081 * @adapter: board private structure
1082 **/
5f3600eb 1083static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
92915f71
GR
1084{
1085 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1086
5f3600eb
AD
1087 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1088 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1089 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
92915f71
GR
1090}
1091
de02decb
DS
1092/**
1093 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1094 * @adapter: board private structure
1095 * @ring: structure containing ring specific data
1096 *
1097 * Configure the Tx descriptor ring after a reset.
1098 **/
1099static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1100 struct ixgbevf_ring *ring)
1101{
1102 struct ixgbe_hw *hw = &adapter->hw;
1103 u64 tdba = ring->dma;
1104 int wait_loop = 10;
1105 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1106 u8 reg_idx = ring->reg_idx;
1107
1108 /* disable queue to avoid issues while updating state */
1109 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1110 IXGBE_WRITE_FLUSH(hw);
1111
1112 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1113 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1114 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1115 ring->count * sizeof(union ixgbe_adv_tx_desc));
1116
1117 /* disable head writeback */
1118 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1119 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1120
1121 /* enable relaxed ordering */
1122 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1123 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1124 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1125
1126 /* reset head and tail pointers */
1127 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1128 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1129 ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
1130
1131 /* reset ntu and ntc to place SW in sync with hardwdare */
1132 ring->next_to_clean = 0;
1133 ring->next_to_use = 0;
1134
1135 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1136 * to or less than the number of on chip descriptors, which is
1137 * currently 40.
1138 */
1139 txdctl |= (8 << 16); /* WTHRESH = 8 */
1140
1141 /* Setting PTHRESH to 32 both improves performance */
1142 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1143 32; /* PTHRESH = 32 */
1144
1145 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1146
1147 /* poll to verify queue is enabled */
1148 do {
1149 usleep_range(1000, 2000);
1150 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1151 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1152 if (!wait_loop)
1153 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1154}
1155
92915f71
GR
1156/**
1157 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1158 * @adapter: board private structure
1159 *
1160 * Configure the Tx unit of the MAC after a reset.
1161 **/
1162static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1163{
de02decb 1164 u32 i;
92915f71
GR
1165
1166 /* Setup the HW Tx Head and Tail descriptor pointers */
de02decb
DS
1167 for (i = 0; i < adapter->num_tx_queues; i++)
1168 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
92915f71
GR
1169}
1170
1171#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1172
1173static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1174{
1175 struct ixgbevf_ring *rx_ring;
1176 struct ixgbe_hw *hw = &adapter->hw;
1177 u32 srrctl;
1178
87e70ab9 1179 rx_ring = adapter->rx_ring[index];
92915f71
GR
1180
1181 srrctl = IXGBE_SRRCTL_DROP_EN;
1182
77d5dfca 1183 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
92915f71 1184
dd1fe113
AD
1185 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1186 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1187
92915f71
GR
1188 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1189}
1190
1bb9c639
DS
1191static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1192{
1193 struct ixgbe_hw *hw = &adapter->hw;
1194
1195 /* PSRTYPE must be initialized in 82599 */
1196 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1197 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1198 IXGBE_PSRTYPE_L2HDR;
1199
1200 if (adapter->num_rx_queues > 1)
1201 psrtype |= 1 << 29;
1202
1203 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1204}
1205
dd1fe113
AD
1206static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1207{
1208 struct ixgbe_hw *hw = &adapter->hw;
1209 struct net_device *netdev = adapter->netdev;
1210 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1211 int i;
1212 u16 rx_buf_len;
1213
1214 /* notify the PF of our intent to use this size of frame */
1215 ixgbevf_rlpml_set_vf(hw, max_frame);
1216
1217 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1218 max_frame += VLAN_HLEN;
1219
1220 /*
85624caf
GR
1221 * Allocate buffer sizes that fit well into 32K and
1222 * take into account max frame size of 9.5K
dd1fe113
AD
1223 */
1224 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1225 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1226 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
85624caf
GR
1227 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1228 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1229 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1230 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1231 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1232 rx_buf_len = IXGBEVF_RXBUFFER_8K;
dd1fe113 1233 else
85624caf 1234 rx_buf_len = IXGBEVF_RXBUFFER_10K;
dd1fe113
AD
1235
1236 for (i = 0; i < adapter->num_rx_queues; i++)
87e70ab9 1237 adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
dd1fe113
AD
1238}
1239
de02decb
DS
1240#define IXGBEVF_MAX_RX_DESC_POLL 10
1241static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1242 struct ixgbevf_ring *ring)
1243{
1244 struct ixgbe_hw *hw = &adapter->hw;
1245 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1246 u32 rxdctl;
1247 u8 reg_idx = ring->reg_idx;
1248
1249 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1250 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1251
1252 /* write value back with RXDCTL.ENABLE bit cleared */
1253 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1254
1255 /* the hardware may take up to 100us to really disable the rx queue */
1256 do {
1257 udelay(10);
1258 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1259 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1260
1261 if (!wait_loop)
1262 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1263 reg_idx);
1264}
1265
1266static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1267 struct ixgbevf_ring *ring)
1268{
1269 struct ixgbe_hw *hw = &adapter->hw;
1270 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1271 u32 rxdctl;
1272 u8 reg_idx = ring->reg_idx;
1273
1274 do {
1275 usleep_range(1000, 2000);
1276 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1277 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1278
1279 if (!wait_loop)
1280 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1281 reg_idx);
1282}
1283
1284static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1285 struct ixgbevf_ring *ring)
1286{
1287 struct ixgbe_hw *hw = &adapter->hw;
1288 u64 rdba = ring->dma;
1289 u32 rxdctl;
1290 u8 reg_idx = ring->reg_idx;
1291
1292 /* disable queue to avoid issues while updating state */
1293 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1294 ixgbevf_disable_rx_queue(adapter, ring);
1295
1296 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1297 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1298 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1299 ring->count * sizeof(union ixgbe_adv_rx_desc));
1300
1301 /* enable relaxed ordering */
1302 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1303 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1304
1305 /* reset head and tail pointers */
1306 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1307 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1308 ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
1309
1310 /* reset ntu and ntc to place SW in sync with hardwdare */
1311 ring->next_to_clean = 0;
1312 ring->next_to_use = 0;
1313
1314 ixgbevf_configure_srrctl(adapter, reg_idx);
1315
1316 /* prevent DMA from exceeding buffer space available */
1317 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1318 rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
1319 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1320 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1321
1322 ixgbevf_rx_desc_queue_enable(adapter, ring);
095e2617 1323 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
de02decb
DS
1324}
1325
92915f71
GR
1326/**
1327 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1328 * @adapter: board private structure
1329 *
1330 * Configure the Rx unit of the MAC after a reset.
1331 **/
1332static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1333{
de02decb 1334 int i;
92915f71 1335
1bb9c639 1336 ixgbevf_setup_psrtype(adapter);
dd1fe113
AD
1337
1338 /* set_rx_buffer_len must be called before ring initialization */
1339 ixgbevf_set_rx_buffer_len(adapter);
92915f71 1340
92915f71
GR
1341 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1342 * the Base and Length of the Rx Descriptor Ring */
de02decb
DS
1343 for (i = 0; i < adapter->num_rx_queues; i++)
1344 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
92915f71
GR
1345}
1346
80d5c368
PM
1347static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1348 __be16 proto, u16 vid)
92915f71
GR
1349{
1350 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1351 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1
AD
1352 int err;
1353
55fdd45b 1354 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1355
92915f71 1356 /* add VID to filter table */
2ddc7fe1 1357 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1c55ed76 1358
55fdd45b 1359 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1360
2ddc7fe1
AD
1361 /* translate error return types so error makes sense */
1362 if (err == IXGBE_ERR_MBX)
1363 return -EIO;
1364
1365 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1366 return -EACCES;
1367
dadcd65f 1368 set_bit(vid, adapter->active_vlans);
8e586137 1369
2ddc7fe1 1370 return err;
92915f71
GR
1371}
1372
80d5c368
PM
1373static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1374 __be16 proto, u16 vid)
92915f71
GR
1375{
1376 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1377 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1 1378 int err = -EOPNOTSUPP;
92915f71 1379
55fdd45b 1380 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1381
92915f71 1382 /* remove VID from filter table */
92fe0bf7 1383 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1c55ed76 1384
55fdd45b 1385 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1386
dadcd65f 1387 clear_bit(vid, adapter->active_vlans);
8e586137 1388
2ddc7fe1 1389 return err;
92915f71
GR
1390}
1391
1392static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1393{
dadcd65f 1394 u16 vid;
92915f71 1395
dadcd65f 1396 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
80d5c368
PM
1397 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1398 htons(ETH_P_8021Q), vid);
92915f71
GR
1399}
1400
46ec20ff
GR
1401static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1402{
1403 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1404 struct ixgbe_hw *hw = &adapter->hw;
1405 int count = 0;
1406
1407 if ((netdev_uc_count(netdev)) > 10) {
dbd9636e 1408 pr_err("Too many unicast filters - No Space\n");
46ec20ff
GR
1409 return -ENOSPC;
1410 }
1411
1412 if (!netdev_uc_empty(netdev)) {
1413 struct netdev_hw_addr *ha;
1414 netdev_for_each_uc_addr(ha, netdev) {
1415 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1416 udelay(200);
1417 }
1418 } else {
1419 /*
1420 * If the list is empty then send message to PF driver to
1421 * clear all macvlans on this VF.
1422 */
1423 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1424 }
1425
1426 return count;
1427}
1428
92915f71 1429/**
dee847f5 1430 * ixgbevf_set_rx_mode - Multicast and unicast set
92915f71
GR
1431 * @netdev: network interface device structure
1432 *
1433 * The set_rx_method entry point is called whenever the multicast address
dee847f5
GR
1434 * list, unicast address list or the network interface flags are updated.
1435 * This routine is responsible for configuring the hardware for proper
1436 * multicast mode and configuring requested unicast filters.
92915f71
GR
1437 **/
1438static void ixgbevf_set_rx_mode(struct net_device *netdev)
1439{
1440 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1441 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1442
55fdd45b 1443 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1444
92915f71 1445 /* reprogram multicast list */
92fe0bf7 1446 hw->mac.ops.update_mc_addr_list(hw, netdev);
46ec20ff
GR
1447
1448 ixgbevf_write_uc_addr_list(netdev);
1c55ed76 1449
55fdd45b 1450 spin_unlock_bh(&adapter->mbx_lock);
92915f71
GR
1451}
1452
1453static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1454{
1455 int q_idx;
1456 struct ixgbevf_q_vector *q_vector;
1457 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1458
1459 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
92915f71 1460 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
1461#ifdef CONFIG_NET_RX_BUSY_POLL
1462 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1463#endif
fa71ae27 1464 napi_enable(&q_vector->napi);
92915f71
GR
1465 }
1466}
1467
1468static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1469{
1470 int q_idx;
1471 struct ixgbevf_q_vector *q_vector;
1472 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1473
1474 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1475 q_vector = adapter->q_vector[q_idx];
92915f71 1476 napi_disable(&q_vector->napi);
c777cdfa
JK
1477#ifdef CONFIG_NET_RX_BUSY_POLL
1478 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1479 pr_info("QV %d locked\n", q_idx);
1480 usleep_range(1000, 20000);
1481 }
1482#endif /* CONFIG_NET_RX_BUSY_POLL */
92915f71
GR
1483 }
1484}
1485
220fe050
DS
1486static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1487{
1488 struct ixgbe_hw *hw = &adapter->hw;
1489 unsigned int def_q = 0;
1490 unsigned int num_tcs = 0;
1491 unsigned int num_rx_queues = 1;
1492 int err;
1493
1494 spin_lock_bh(&adapter->mbx_lock);
1495
1496 /* fetch queue configuration from the PF */
1497 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1498
1499 spin_unlock_bh(&adapter->mbx_lock);
1500
1501 if (err)
1502 return err;
1503
1504 if (num_tcs > 1) {
1505 /* update default Tx ring register index */
87e70ab9 1506 adapter->tx_ring[0]->reg_idx = def_q;
220fe050
DS
1507
1508 /* we need as many queues as traffic classes */
1509 num_rx_queues = num_tcs;
1510 }
1511
1512 /* if we have a bad config abort request queue reset */
1513 if (adapter->num_rx_queues != num_rx_queues) {
1514 /* force mailbox timeout to prevent further messages */
1515 hw->mbx.timeout = 0;
1516
1517 /* wait for watchdog to come around and bail us out */
1518 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1519 }
1520
1521 return 0;
1522}
1523
92915f71
GR
1524static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1525{
220fe050
DS
1526 ixgbevf_configure_dcb(adapter);
1527
de02decb 1528 ixgbevf_set_rx_mode(adapter->netdev);
92915f71
GR
1529
1530 ixgbevf_restore_vlan(adapter);
1531
1532 ixgbevf_configure_tx(adapter);
1533 ixgbevf_configure_rx(adapter);
92915f71
GR
1534}
1535
33bd9f60
GR
1536static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1537{
1538 /* Only save pre-reset stats if there are some */
1539 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1540 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1541 adapter->stats.base_vfgprc;
1542 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1543 adapter->stats.base_vfgptc;
1544 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1545 adapter->stats.base_vfgorc;
1546 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1547 adapter->stats.base_vfgotc;
1548 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1549 adapter->stats.base_vfmprc;
1550 }
1551}
1552
1553static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1554{
1555 struct ixgbe_hw *hw = &adapter->hw;
1556
1557 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1558 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1559 adapter->stats.last_vfgorc |=
1560 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1561 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1562 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1563 adapter->stats.last_vfgotc |=
1564 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1565 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1566
1567 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1568 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1569 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1570 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1571 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1572}
1573
31186785
AD
1574static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1575{
1576 struct ixgbe_hw *hw = &adapter->hw;
56e94095
AD
1577 int api[] = { ixgbe_mbox_api_11,
1578 ixgbe_mbox_api_10,
31186785
AD
1579 ixgbe_mbox_api_unknown };
1580 int err = 0, idx = 0;
1581
55fdd45b 1582 spin_lock_bh(&adapter->mbx_lock);
31186785
AD
1583
1584 while (api[idx] != ixgbe_mbox_api_unknown) {
1585 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1586 if (!err)
1587 break;
1588 idx++;
1589 }
1590
55fdd45b 1591 spin_unlock_bh(&adapter->mbx_lock);
31186785
AD
1592}
1593
795180d8 1594static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
92915f71
GR
1595{
1596 struct net_device *netdev = adapter->netdev;
1597 struct ixgbe_hw *hw = &adapter->hw;
92915f71
GR
1598
1599 ixgbevf_configure_msix(adapter);
1600
55fdd45b 1601 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1602
92fe0bf7
GR
1603 if (is_valid_ether_addr(hw->mac.addr))
1604 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1605 else
1606 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
92915f71 1607
55fdd45b 1608 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1609
92915f71
GR
1610 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1611 ixgbevf_napi_enable_all(adapter);
1612
1613 /* enable transmits */
1614 netif_tx_start_all_queues(netdev);
1615
33bd9f60
GR
1616 ixgbevf_save_reset_stats(adapter);
1617 ixgbevf_init_last_counter_stats(adapter);
1618
4b2cd27f 1619 hw->mac.get_link_status = 1;
92915f71 1620 mod_timer(&adapter->watchdog_timer, jiffies);
92915f71
GR
1621}
1622
795180d8 1623void ixgbevf_up(struct ixgbevf_adapter *adapter)
92915f71 1624{
92915f71
GR
1625 struct ixgbe_hw *hw = &adapter->hw;
1626
1627 ixgbevf_configure(adapter);
1628
795180d8 1629 ixgbevf_up_complete(adapter);
92915f71
GR
1630
1631 /* clear any pending interrupts, may auto mask */
1632 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1633
5f3600eb 1634 ixgbevf_irq_enable(adapter);
92915f71
GR
1635}
1636
1637/**
1638 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
92915f71
GR
1639 * @rx_ring: ring to free buffers from
1640 **/
05d063aa 1641static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
92915f71 1642{
92915f71
GR
1643 unsigned long size;
1644 unsigned int i;
1645
c0456c23
GR
1646 if (!rx_ring->rx_buffer_info)
1647 return;
92915f71 1648
c0456c23 1649 /* Free all the Rx ring sk_buffs */
92915f71
GR
1650 for (i = 0; i < rx_ring->count; i++) {
1651 struct ixgbevf_rx_buffer *rx_buffer_info;
1652
1653 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1654 if (rx_buffer_info->dma) {
05d063aa 1655 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
92915f71 1656 rx_ring->rx_buf_len,
2a1f8794 1657 DMA_FROM_DEVICE);
92915f71
GR
1658 rx_buffer_info->dma = 0;
1659 }
1660 if (rx_buffer_info->skb) {
1661 struct sk_buff *skb = rx_buffer_info->skb;
1662 rx_buffer_info->skb = NULL;
1663 do {
1664 struct sk_buff *this = skb;
5c60f81a 1665 skb = IXGBE_CB(skb)->prev;
92915f71
GR
1666 dev_kfree_skb(this);
1667 } while (skb);
1668 }
92915f71
GR
1669 }
1670
1671 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1672 memset(rx_ring->rx_buffer_info, 0, size);
1673
1674 /* Zero out the descriptor ring */
1675 memset(rx_ring->desc, 0, rx_ring->size);
92915f71
GR
1676}
1677
1678/**
1679 * ixgbevf_clean_tx_ring - Free Tx Buffers
92915f71
GR
1680 * @tx_ring: ring to be cleaned
1681 **/
05d063aa 1682static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
92915f71
GR
1683{
1684 struct ixgbevf_tx_buffer *tx_buffer_info;
1685 unsigned long size;
1686 unsigned int i;
1687
c0456c23
GR
1688 if (!tx_ring->tx_buffer_info)
1689 return;
1690
92915f71 1691 /* Free all the Tx ring sk_buffs */
92915f71
GR
1692 for (i = 0; i < tx_ring->count; i++) {
1693 tx_buffer_info = &tx_ring->tx_buffer_info[i];
70a10e25 1694 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
92915f71
GR
1695 }
1696
1697 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1698 memset(tx_ring->tx_buffer_info, 0, size);
1699
1700 memset(tx_ring->desc, 0, tx_ring->size);
92915f71
GR
1701}
1702
1703/**
1704 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1705 * @adapter: board private structure
1706 **/
1707static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1708{
1709 int i;
1710
1711 for (i = 0; i < adapter->num_rx_queues; i++)
05d063aa 1712 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
92915f71
GR
1713}
1714
1715/**
1716 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1717 * @adapter: board private structure
1718 **/
1719static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1720{
1721 int i;
1722
1723 for (i = 0; i < adapter->num_tx_queues; i++)
05d063aa 1724 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
92915f71
GR
1725}
1726
1727void ixgbevf_down(struct ixgbevf_adapter *adapter)
1728{
1729 struct net_device *netdev = adapter->netdev;
1730 struct ixgbe_hw *hw = &adapter->hw;
de02decb 1731 int i;
92915f71
GR
1732
1733 /* signal that we are down to the interrupt handler */
1734 set_bit(__IXGBEVF_DOWN, &adapter->state);
858c3dda
DS
1735
1736 /* disable all enabled rx queues */
1737 for (i = 0; i < adapter->num_rx_queues; i++)
87e70ab9 1738 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
92915f71
GR
1739
1740 netif_tx_disable(netdev);
1741
1742 msleep(10);
1743
1744 netif_tx_stop_all_queues(netdev);
1745
1746 ixgbevf_irq_disable(adapter);
1747
1748 ixgbevf_napi_disable_all(adapter);
1749
1750 del_timer_sync(&adapter->watchdog_timer);
1751 /* can't call flush scheduled work here because it can deadlock
1752 * if linkwatch_event tries to acquire the rtnl_lock which we are
1753 * holding */
1754 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1755 msleep(1);
1756
1757 /* disable transmits in the hardware now that interrupts are off */
1758 for (i = 0; i < adapter->num_tx_queues; i++) {
de02decb
DS
1759 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
1760
1761 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
1762 IXGBE_TXDCTL_SWFLSH);
92915f71
GR
1763 }
1764
1765 netif_carrier_off(netdev);
1766
1767 if (!pci_channel_offline(adapter->pdev))
1768 ixgbevf_reset(adapter);
1769
1770 ixgbevf_clean_all_tx_rings(adapter);
1771 ixgbevf_clean_all_rx_rings(adapter);
1772}
1773
1774void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1775{
1776 WARN_ON(in_interrupt());
c0456c23 1777
92915f71
GR
1778 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1779 msleep(1);
1780
4b2cd27f
AD
1781 ixgbevf_down(adapter);
1782 ixgbevf_up(adapter);
92915f71
GR
1783
1784 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1785}
1786
1787void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1788{
1789 struct ixgbe_hw *hw = &adapter->hw;
1790 struct net_device *netdev = adapter->netdev;
1791
798e381a 1792 if (hw->mac.ops.reset_hw(hw)) {
92915f71 1793 hw_dbg(hw, "PF still resetting\n");
798e381a 1794 } else {
92915f71 1795 hw->mac.ops.init_hw(hw);
798e381a
DS
1796 ixgbevf_negotiate_api(adapter);
1797 }
92915f71
GR
1798
1799 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1800 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1801 netdev->addr_len);
1802 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1803 netdev->addr_len);
1804 }
1805}
1806
e45dd5fe
JK
1807static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1808 int vectors)
92915f71 1809{
a5f9337b
ET
1810 int err = 0;
1811 int vector_threshold;
92915f71 1812
fa71ae27
AD
1813 /* We'll want at least 2 (vector_threshold):
1814 * 1) TxQ[0] + RxQ[0] handler
1815 * 2) Other (Link Status Change, etc.)
92915f71
GR
1816 */
1817 vector_threshold = MIN_MSIX_COUNT;
1818
1819 /* The more we get, the more we will assign to Tx/Rx Cleanup
1820 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1821 * Right now, we simply care about how many we'll get; we'll
1822 * set them up later while requesting irq's.
1823 */
1824 while (vectors >= vector_threshold) {
1825 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1826 vectors);
e45dd5fe 1827 if (!err || err < 0) /* Success or a nasty failure. */
92915f71 1828 break;
92915f71
GR
1829 else /* err == number of vectors we should try again with */
1830 vectors = err;
1831 }
1832
e45dd5fe
JK
1833 if (vectors < vector_threshold)
1834 err = -ENOMEM;
1835
1836 if (err) {
1837 dev_err(&adapter->pdev->dev,
1838 "Unable to allocate MSI-X interrupts\n");
92915f71
GR
1839 kfree(adapter->msix_entries);
1840 adapter->msix_entries = NULL;
1841 } else {
1842 /*
1843 * Adjust for only the vectors we'll use, which is minimum
1844 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1845 * vectors we were allocated.
1846 */
1847 adapter->num_msix_vectors = vectors;
1848 }
dee847f5 1849
e45dd5fe 1850 return err;
92915f71
GR
1851}
1852
49ce9c2c
BH
1853/**
1854 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
92915f71
GR
1855 * @adapter: board private structure to initialize
1856 *
1857 * This is the top level queue allocation routine. The order here is very
1858 * important, starting with the "most" number of features turned on at once,
1859 * and ending with the smallest set of features. This way large combinations
1860 * can be allocated if they're turned on, and smaller combinations are the
1861 * fallthrough conditions.
1862 *
1863 **/
1864static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1865{
220fe050
DS
1866 struct ixgbe_hw *hw = &adapter->hw;
1867 unsigned int def_q = 0;
1868 unsigned int num_tcs = 0;
1869 int err;
1870
92915f71
GR
1871 /* Start with base case */
1872 adapter->num_rx_queues = 1;
1873 adapter->num_tx_queues = 1;
220fe050
DS
1874
1875 spin_lock_bh(&adapter->mbx_lock);
1876
1877 /* fetch queue configuration from the PF */
1878 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1879
1880 spin_unlock_bh(&adapter->mbx_lock);
1881
1882 if (err)
1883 return;
1884
1885 /* we need as many queues as traffic classes */
1886 if (num_tcs > 1)
1887 adapter->num_rx_queues = num_tcs;
92915f71
GR
1888}
1889
1890/**
1891 * ixgbevf_alloc_queues - Allocate memory for all rings
1892 * @adapter: board private structure to initialize
1893 *
1894 * We allocate one ring per queue at run-time since we don't know the
1895 * number of queues at compile-time. The polling_netdev array is
1896 * intended for Multiqueue, but should work fine with a single queue.
1897 **/
1898static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1899{
87e70ab9
DS
1900 struct ixgbevf_ring *ring;
1901 int rx = 0, tx = 0;
92915f71 1902
87e70ab9
DS
1903 for (; tx < adapter->num_tx_queues; tx++) {
1904 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1905 if (!ring)
1906 goto err_allocation;
92915f71 1907
87e70ab9
DS
1908 ring->dev = &adapter->pdev->dev;
1909 ring->netdev = adapter->netdev;
1910 ring->count = adapter->tx_ring_count;
1911 ring->queue_index = tx;
1912 ring->reg_idx = tx;
92915f71 1913
87e70ab9 1914 adapter->tx_ring[tx] = ring;
92915f71
GR
1915 }
1916
87e70ab9
DS
1917 for (; rx < adapter->num_rx_queues; rx++) {
1918 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1919 if (!ring)
1920 goto err_allocation;
1921
1922 ring->dev = &adapter->pdev->dev;
1923 ring->netdev = adapter->netdev;
1924
1925 ring->count = adapter->rx_ring_count;
1926 ring->queue_index = rx;
1927 ring->reg_idx = rx;
1928
1929 adapter->rx_ring[rx] = ring;
92915f71
GR
1930 }
1931
1932 return 0;
1933
87e70ab9
DS
1934err_allocation:
1935 while (tx) {
1936 kfree(adapter->tx_ring[--tx]);
1937 adapter->tx_ring[tx] = NULL;
1938 }
1939
1940 while (rx) {
1941 kfree(adapter->rx_ring[--rx]);
1942 adapter->rx_ring[rx] = NULL;
1943 }
92915f71
GR
1944 return -ENOMEM;
1945}
1946
1947/**
1948 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1949 * @adapter: board private structure to initialize
1950 *
1951 * Attempt to configure the interrupts using the best available
1952 * capabilities of the hardware and the kernel.
1953 **/
1954static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1955{
91e2b89b 1956 struct net_device *netdev = adapter->netdev;
92915f71
GR
1957 int err = 0;
1958 int vector, v_budget;
1959
1960 /*
1961 * It's easy to be greedy for MSI-X vectors, but it really
1962 * doesn't do us much good if we have a lot more vectors
1963 * than CPU's. So let's be conservative and only ask for
fa71ae27
AD
1964 * (roughly) the same number of vectors as there are CPU's.
1965 * The default is to use pairs of vectors.
92915f71 1966 */
fa71ae27
AD
1967 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1968 v_budget = min_t(int, v_budget, num_online_cpus());
1969 v_budget += NON_Q_VECTORS;
92915f71
GR
1970
1971 /* A failure in MSI-X entry allocation isn't fatal, but it does
1972 * mean we disable MSI-X capabilities of the adapter. */
1973 adapter->msix_entries = kcalloc(v_budget,
1974 sizeof(struct msix_entry), GFP_KERNEL);
1975 if (!adapter->msix_entries) {
1976 err = -ENOMEM;
1977 goto out;
1978 }
1979
1980 for (vector = 0; vector < v_budget; vector++)
1981 adapter->msix_entries[vector].entry = vector;
1982
e45dd5fe
JK
1983 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1984 if (err)
1985 goto out;
92915f71 1986
91e2b89b
GR
1987 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1988 if (err)
1989 goto out;
1990
1991 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1992
92915f71
GR
1993out:
1994 return err;
1995}
1996
1997/**
1998 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1999 * @adapter: board private structure to initialize
2000 *
2001 * We allocate one q_vector per queue interrupt. If allocation fails we
2002 * return -ENOMEM.
2003 **/
2004static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2005{
2006 int q_idx, num_q_vectors;
2007 struct ixgbevf_q_vector *q_vector;
92915f71
GR
2008
2009 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
2010
2011 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2012 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2013 if (!q_vector)
2014 goto err_out;
2015 q_vector->adapter = adapter;
2016 q_vector->v_idx = q_idx;
fa71ae27
AD
2017 netif_napi_add(adapter->netdev, &q_vector->napi,
2018 ixgbevf_poll, 64);
c777cdfa
JK
2019#ifdef CONFIG_NET_RX_BUSY_POLL
2020 napi_hash_add(&q_vector->napi);
2021#endif
92915f71
GR
2022 adapter->q_vector[q_idx] = q_vector;
2023 }
2024
2025 return 0;
2026
2027err_out:
2028 while (q_idx) {
2029 q_idx--;
2030 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
2031#ifdef CONFIG_NET_RX_BUSY_POLL
2032 napi_hash_del(&q_vector->napi);
2033#endif
92915f71
GR
2034 netif_napi_del(&q_vector->napi);
2035 kfree(q_vector);
2036 adapter->q_vector[q_idx] = NULL;
2037 }
2038 return -ENOMEM;
2039}
2040
2041/**
2042 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2043 * @adapter: board private structure to initialize
2044 *
2045 * This function frees the memory allocated to the q_vectors. In addition if
2046 * NAPI is enabled it will delete any references to the NAPI struct prior
2047 * to freeing the q_vector.
2048 **/
2049static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2050{
f4477702 2051 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
2052
2053 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2054 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2055
2056 adapter->q_vector[q_idx] = NULL;
c777cdfa
JK
2057#ifdef CONFIG_NET_RX_BUSY_POLL
2058 napi_hash_del(&q_vector->napi);
2059#endif
f4477702 2060 netif_napi_del(&q_vector->napi);
92915f71
GR
2061 kfree(q_vector);
2062 }
2063}
2064
2065/**
2066 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2067 * @adapter: board private structure
2068 *
2069 **/
2070static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2071{
2072 pci_disable_msix(adapter->pdev);
2073 kfree(adapter->msix_entries);
2074 adapter->msix_entries = NULL;
92915f71
GR
2075}
2076
2077/**
2078 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2079 * @adapter: board private structure to initialize
2080 *
2081 **/
2082static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2083{
2084 int err;
2085
2086 /* Number of supported queues */
2087 ixgbevf_set_num_queues(adapter);
2088
2089 err = ixgbevf_set_interrupt_capability(adapter);
2090 if (err) {
2091 hw_dbg(&adapter->hw,
2092 "Unable to setup interrupt capabilities\n");
2093 goto err_set_interrupt;
2094 }
2095
2096 err = ixgbevf_alloc_q_vectors(adapter);
2097 if (err) {
2098 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2099 "vectors\n");
2100 goto err_alloc_q_vectors;
2101 }
2102
2103 err = ixgbevf_alloc_queues(adapter);
2104 if (err) {
dbd9636e 2105 pr_err("Unable to allocate memory for queues\n");
92915f71
GR
2106 goto err_alloc_queues;
2107 }
2108
2109 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2110 "Tx Queue count = %u\n",
2111 (adapter->num_rx_queues > 1) ? "Enabled" :
2112 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2113
2114 set_bit(__IXGBEVF_DOWN, &adapter->state);
2115
2116 return 0;
2117err_alloc_queues:
2118 ixgbevf_free_q_vectors(adapter);
2119err_alloc_q_vectors:
2120 ixgbevf_reset_interrupt_capability(adapter);
2121err_set_interrupt:
2122 return err;
2123}
2124
0ac1e8ce
AD
2125/**
2126 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2127 * @adapter: board private structure to clear interrupt scheme on
2128 *
2129 * We go through and clear interrupt specific resources and reset the structure
2130 * to pre-load conditions
2131 **/
2132static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2133{
87e70ab9
DS
2134 int i;
2135
2136 for (i = 0; i < adapter->num_tx_queues; i++) {
2137 kfree(adapter->tx_ring[i]);
2138 adapter->tx_ring[i] = NULL;
2139 }
2140 for (i = 0; i < adapter->num_rx_queues; i++) {
2141 kfree(adapter->rx_ring[i]);
2142 adapter->rx_ring[i] = NULL;
2143 }
2144
0ac1e8ce
AD
2145 adapter->num_tx_queues = 0;
2146 adapter->num_rx_queues = 0;
2147
2148 ixgbevf_free_q_vectors(adapter);
2149 ixgbevf_reset_interrupt_capability(adapter);
2150}
2151
92915f71
GR
2152/**
2153 * ixgbevf_sw_init - Initialize general software structures
2154 * (struct ixgbevf_adapter)
2155 * @adapter: board private structure to initialize
2156 *
2157 * ixgbevf_sw_init initializes the Adapter private data structure.
2158 * Fields are initialized based on PCI device information and
2159 * OS network device settings (MTU size).
2160 **/
9f9a12f8 2161static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
92915f71
GR
2162{
2163 struct ixgbe_hw *hw = &adapter->hw;
2164 struct pci_dev *pdev = adapter->pdev;
e1941a74 2165 struct net_device *netdev = adapter->netdev;
92915f71
GR
2166 int err;
2167
2168 /* PCI config space info */
2169
2170 hw->vendor_id = pdev->vendor;
2171 hw->device_id = pdev->device;
ff938e43 2172 hw->revision_id = pdev->revision;
92915f71
GR
2173 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2174 hw->subsystem_device_id = pdev->subsystem_device;
2175
2176 hw->mbx.ops.init_params(hw);
56e94095
AD
2177
2178 /* assume legacy case in which PF would only give VF 2 queues */
2179 hw->mac.max_tx_queues = 2;
2180 hw->mac.max_rx_queues = 2;
2181
798e381a
DS
2182 /* lock to protect mailbox accesses */
2183 spin_lock_init(&adapter->mbx_lock);
2184
92915f71
GR
2185 err = hw->mac.ops.reset_hw(hw);
2186 if (err) {
2187 dev_info(&pdev->dev,
e1941a74 2188 "PF still in reset state. Is the PF interface up?\n");
92915f71
GR
2189 } else {
2190 err = hw->mac.ops.init_hw(hw);
2191 if (err) {
dbd9636e 2192 pr_err("init_shared_code failed: %d\n", err);
92915f71
GR
2193 goto out;
2194 }
798e381a 2195 ixgbevf_negotiate_api(adapter);
e1941a74
GR
2196 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2197 if (err)
2198 dev_info(&pdev->dev, "Error reading MAC address\n");
2199 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2200 dev_info(&pdev->dev,
2201 "MAC address not assigned by administrator.\n");
2202 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2203 }
2204
2205 if (!is_valid_ether_addr(netdev->dev_addr)) {
2206 dev_info(&pdev->dev, "Assigning random MAC address\n");
2207 eth_hw_addr_random(netdev);
2208 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
92915f71
GR
2209 }
2210
2211 /* Enable dynamic interrupt throttling rates */
5f3600eb
AD
2212 adapter->rx_itr_setting = 1;
2213 adapter->tx_itr_setting = 1;
92915f71 2214
92915f71
GR
2215 /* set default ring sizes */
2216 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2217 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2218
92915f71 2219 set_bit(__IXGBEVF_DOWN, &adapter->state);
1a0d6ae5 2220 return 0;
92915f71
GR
2221
2222out:
2223 return err;
2224}
2225
92915f71
GR
2226#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2227 { \
2228 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2229 if (current_counter < last_counter) \
2230 counter += 0x100000000LL; \
2231 last_counter = current_counter; \
2232 counter &= 0xFFFFFFFF00000000LL; \
2233 counter |= current_counter; \
2234 }
2235
2236#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2237 { \
2238 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2239 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2240 u64 current_counter = (current_counter_msb << 32) | \
2241 current_counter_lsb; \
2242 if (current_counter < last_counter) \
2243 counter += 0x1000000000LL; \
2244 last_counter = current_counter; \
2245 counter &= 0xFFFFFFF000000000LL; \
2246 counter |= current_counter; \
2247 }
2248/**
2249 * ixgbevf_update_stats - Update the board statistics counters.
2250 * @adapter: board private structure
2251 **/
2252void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2253{
2254 struct ixgbe_hw *hw = &adapter->hw;
55fb277c 2255 int i;
92915f71 2256
088245a3
GR
2257 if (!adapter->link_up)
2258 return;
2259
92915f71
GR
2260 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2261 adapter->stats.vfgprc);
2262 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2263 adapter->stats.vfgptc);
2264 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2265 adapter->stats.last_vfgorc,
2266 adapter->stats.vfgorc);
2267 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2268 adapter->stats.last_vfgotc,
2269 adapter->stats.vfgotc);
2270 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2271 adapter->stats.vfmprc);
55fb277c
GR
2272
2273 for (i = 0; i < adapter->num_rx_queues; i++) {
2274 adapter->hw_csum_rx_error +=
87e70ab9 2275 adapter->rx_ring[i]->hw_csum_rx_error;
87e70ab9 2276 adapter->rx_ring[i]->hw_csum_rx_error = 0;
55fb277c 2277 }
92915f71
GR
2278}
2279
2280/**
2281 * ixgbevf_watchdog - Timer Call-back
2282 * @data: pointer to adapter cast into an unsigned long
2283 **/
2284static void ixgbevf_watchdog(unsigned long data)
2285{
2286 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2287 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 2288 u32 eics = 0;
92915f71
GR
2289 int i;
2290
2291 /*
2292 * Do the watchdog outside of interrupt context due to the lovely
2293 * delays that some of the newer hardware requires
2294 */
2295
2296 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2297 goto watchdog_short_circuit;
2298
2299 /* get one bit for every active tx/rx interrupt vector */
2300 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2301 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
6b43c446 2302 if (qv->rx.ring || qv->tx.ring)
5f3600eb 2303 eics |= 1 << i;
92915f71
GR
2304 }
2305
5f3600eb 2306 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
92915f71
GR
2307
2308watchdog_short_circuit:
2309 schedule_work(&adapter->watchdog_task);
2310}
2311
2312/**
2313 * ixgbevf_tx_timeout - Respond to a Tx Hang
2314 * @netdev: network interface device structure
2315 **/
2316static void ixgbevf_tx_timeout(struct net_device *netdev)
2317{
2318 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2319
2320 /* Do the reset outside of interrupt context */
2321 schedule_work(&adapter->reset_task);
2322}
2323
2324static void ixgbevf_reset_task(struct work_struct *work)
2325{
2326 struct ixgbevf_adapter *adapter;
2327 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2328
2329 /* If we're already down or resetting, just bail */
2330 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2331 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2332 return;
2333
2334 adapter->tx_timeout_count++;
2335
2336 ixgbevf_reinit_locked(adapter);
2337}
2338
2339/**
2340 * ixgbevf_watchdog_task - worker thread to bring link up
2341 * @work: pointer to work_struct containing our data
2342 **/
2343static void ixgbevf_watchdog_task(struct work_struct *work)
2344{
2345 struct ixgbevf_adapter *adapter = container_of(work,
2346 struct ixgbevf_adapter,
2347 watchdog_task);
2348 struct net_device *netdev = adapter->netdev;
2349 struct ixgbe_hw *hw = &adapter->hw;
2350 u32 link_speed = adapter->link_speed;
2351 bool link_up = adapter->link_up;
92fe0bf7 2352 s32 need_reset;
92915f71 2353
220fe050
DS
2354 ixgbevf_queue_reset_subtask(adapter);
2355
92915f71
GR
2356 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2357
2358 /*
2359 * Always check the link on the watchdog because we have
2360 * no LSC interrupt
2361 */
92fe0bf7 2362 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 2363
92fe0bf7 2364 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1c55ed76 2365
92fe0bf7 2366 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 2367
92fe0bf7
GR
2368 if (need_reset) {
2369 adapter->link_up = link_up;
2370 adapter->link_speed = link_speed;
2371 netif_carrier_off(netdev);
2372 netif_tx_stop_all_queues(netdev);
2373 schedule_work(&adapter->reset_task);
2374 goto pf_has_reset;
92915f71
GR
2375 }
2376 adapter->link_up = link_up;
2377 adapter->link_speed = link_speed;
2378
2379 if (link_up) {
2380 if (!netif_carrier_ok(netdev)) {
b876a744
GR
2381 char *link_speed_string;
2382 switch (link_speed) {
2383 case IXGBE_LINK_SPEED_10GB_FULL:
2384 link_speed_string = "10 Gbps";
2385 break;
2386 case IXGBE_LINK_SPEED_1GB_FULL:
2387 link_speed_string = "1 Gbps";
2388 break;
2389 case IXGBE_LINK_SPEED_100_FULL:
2390 link_speed_string = "100 Mbps";
2391 break;
2392 default:
2393 link_speed_string = "unknown speed";
2394 break;
2395 }
6fe59675 2396 dev_info(&adapter->pdev->dev,
b876a744 2397 "NIC Link is Up, %s\n", link_speed_string);
92915f71
GR
2398 netif_carrier_on(netdev);
2399 netif_tx_wake_all_queues(netdev);
92915f71
GR
2400 }
2401 } else {
2402 adapter->link_up = false;
2403 adapter->link_speed = 0;
2404 if (netif_carrier_ok(netdev)) {
6fe59675 2405 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
92915f71
GR
2406 netif_carrier_off(netdev);
2407 netif_tx_stop_all_queues(netdev);
2408 }
2409 }
2410
92915f71
GR
2411 ixgbevf_update_stats(adapter);
2412
33bd9f60 2413pf_has_reset:
92915f71
GR
2414 /* Reset the timer */
2415 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2416 mod_timer(&adapter->watchdog_timer,
2417 round_jiffies(jiffies + (2 * HZ)));
2418
2419 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2420}
2421
2422/**
2423 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
92915f71
GR
2424 * @tx_ring: Tx descriptor ring for a specific queue
2425 *
2426 * Free all transmit software resources
2427 **/
05d063aa 2428void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
92915f71 2429{
05d063aa 2430 ixgbevf_clean_tx_ring(tx_ring);
92915f71
GR
2431
2432 vfree(tx_ring->tx_buffer_info);
2433 tx_ring->tx_buffer_info = NULL;
2434
de02decb
DS
2435 /* if not set, then don't free */
2436 if (!tx_ring->desc)
2437 return;
2438
05d063aa 2439 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2a1f8794 2440 tx_ring->dma);
92915f71
GR
2441
2442 tx_ring->desc = NULL;
2443}
2444
2445/**
2446 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2447 * @adapter: board private structure
2448 *
2449 * Free all transmit software resources
2450 **/
2451static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2452{
2453 int i;
2454
2455 for (i = 0; i < adapter->num_tx_queues; i++)
87e70ab9 2456 if (adapter->tx_ring[i]->desc)
05d063aa 2457 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
92915f71
GR
2458}
2459
2460/**
2461 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
92915f71
GR
2462 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2463 *
2464 * Return 0 on success, negative on failure
2465 **/
05d063aa 2466int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
92915f71 2467{
92915f71
GR
2468 int size;
2469
2470 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
89bf67f1 2471 tx_ring->tx_buffer_info = vzalloc(size);
92915f71
GR
2472 if (!tx_ring->tx_buffer_info)
2473 goto err;
92915f71
GR
2474
2475 /* round up to nearest 4K */
2476 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2477 tx_ring->size = ALIGN(tx_ring->size, 4096);
2478
05d063aa 2479 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2a1f8794 2480 &tx_ring->dma, GFP_KERNEL);
92915f71
GR
2481 if (!tx_ring->desc)
2482 goto err;
2483
92915f71
GR
2484 return 0;
2485
2486err:
2487 vfree(tx_ring->tx_buffer_info);
2488 tx_ring->tx_buffer_info = NULL;
2489 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2490 "descriptor ring\n");
2491 return -ENOMEM;
2492}
2493
2494/**
2495 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2496 * @adapter: board private structure
2497 *
2498 * If this function returns with an error, then it's possible one or
2499 * more of the rings is populated (while the rest are not). It is the
2500 * callers duty to clean those orphaned rings.
2501 *
2502 * Return 0 on success, negative on failure
2503 **/
2504static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2505{
2506 int i, err = 0;
2507
2508 for (i = 0; i < adapter->num_tx_queues; i++) {
05d063aa 2509 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
92915f71
GR
2510 if (!err)
2511 continue;
2512 hw_dbg(&adapter->hw,
2513 "Allocation for Tx Queue %u failed\n", i);
2514 break;
2515 }
2516
2517 return err;
2518}
2519
2520/**
2521 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
92915f71
GR
2522 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2523 *
2524 * Returns 0 on success, negative on failure
2525 **/
05d063aa 2526int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
92915f71 2527{
92915f71
GR
2528 int size;
2529
2530 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
89bf67f1 2531 rx_ring->rx_buffer_info = vzalloc(size);
e404decb 2532 if (!rx_ring->rx_buffer_info)
05d063aa 2533 goto err;
92915f71
GR
2534
2535 /* Round up to nearest 4K */
2536 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2537 rx_ring->size = ALIGN(rx_ring->size, 4096);
2538
05d063aa 2539 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2a1f8794 2540 &rx_ring->dma, GFP_KERNEL);
92915f71 2541
05d063aa
ET
2542 if (!rx_ring->desc)
2543 goto err;
92915f71 2544
92915f71 2545 return 0;
05d063aa
ET
2546err:
2547 vfree(rx_ring->rx_buffer_info);
2548 rx_ring->rx_buffer_info = NULL;
2549 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
92915f71
GR
2550 return -ENOMEM;
2551}
2552
2553/**
2554 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2555 * @adapter: board private structure
2556 *
2557 * If this function returns with an error, then it's possible one or
2558 * more of the rings is populated (while the rest are not). It is the
2559 * callers duty to clean those orphaned rings.
2560 *
2561 * Return 0 on success, negative on failure
2562 **/
2563static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2564{
2565 int i, err = 0;
2566
2567 for (i = 0; i < adapter->num_rx_queues; i++) {
05d063aa 2568 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
92915f71
GR
2569 if (!err)
2570 continue;
2571 hw_dbg(&adapter->hw,
2572 "Allocation for Rx Queue %u failed\n", i);
2573 break;
2574 }
2575 return err;
2576}
2577
2578/**
2579 * ixgbevf_free_rx_resources - Free Rx Resources
92915f71
GR
2580 * @rx_ring: ring to clean the resources from
2581 *
2582 * Free all receive software resources
2583 **/
05d063aa 2584void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
92915f71 2585{
05d063aa 2586 ixgbevf_clean_rx_ring(rx_ring);
92915f71
GR
2587
2588 vfree(rx_ring->rx_buffer_info);
2589 rx_ring->rx_buffer_info = NULL;
2590
05d063aa 2591 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2a1f8794 2592 rx_ring->dma);
92915f71
GR
2593
2594 rx_ring->desc = NULL;
2595}
2596
2597/**
2598 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2599 * @adapter: board private structure
2600 *
2601 * Free all receive software resources
2602 **/
2603static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2604{
2605 int i;
2606
2607 for (i = 0; i < adapter->num_rx_queues; i++)
87e70ab9 2608 if (adapter->rx_ring[i]->desc)
05d063aa 2609 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
92915f71
GR
2610}
2611
2612/**
2613 * ixgbevf_open - Called when a network interface is made active
2614 * @netdev: network interface device structure
2615 *
2616 * Returns 0 on success, negative value on failure
2617 *
2618 * The open entry point is called when a network interface is made
2619 * active by the system (IFF_UP). At this point all resources needed
2620 * for transmit and receive operations are allocated, the interrupt
2621 * handler is registered with the OS, the watchdog timer is started,
2622 * and the stack is notified that the interface is ready.
2623 **/
2624static int ixgbevf_open(struct net_device *netdev)
2625{
2626 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2627 struct ixgbe_hw *hw = &adapter->hw;
2628 int err;
2629
a1f6c6b1 2630 /* A previous failure to open the device because of a lack of
2631 * available MSIX vector resources may have reset the number
2632 * of msix vectors variable to zero. The only way to recover
2633 * is to unload/reload the driver and hope that the system has
2634 * been able to recover some MSIX vector resources.
2635 */
2636 if (!adapter->num_msix_vectors)
2637 return -ENOMEM;
2638
92915f71
GR
2639 /* disallow open during test */
2640 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2641 return -EBUSY;
2642
2643 if (hw->adapter_stopped) {
2644 ixgbevf_reset(adapter);
2645 /* if adapter is still stopped then PF isn't up and
2646 * the vf can't start. */
2647 if (hw->adapter_stopped) {
2648 err = IXGBE_ERR_MBX;
dbd9636e
JK
2649 pr_err("Unable to start - perhaps the PF Driver isn't "
2650 "up yet\n");
92915f71
GR
2651 goto err_setup_reset;
2652 }
2653 }
2654
2655 /* allocate transmit descriptors */
2656 err = ixgbevf_setup_all_tx_resources(adapter);
2657 if (err)
2658 goto err_setup_tx;
2659
2660 /* allocate receive descriptors */
2661 err = ixgbevf_setup_all_rx_resources(adapter);
2662 if (err)
2663 goto err_setup_rx;
2664
2665 ixgbevf_configure(adapter);
2666
2667 /*
2668 * Map the Tx/Rx rings to the vectors we were allotted.
2669 * if request_irq will be called in this function map_rings
2670 * must be called *before* up_complete
2671 */
2672 ixgbevf_map_rings_to_vectors(adapter);
2673
795180d8 2674 ixgbevf_up_complete(adapter);
92915f71
GR
2675
2676 /* clear any pending interrupts, may auto mask */
2677 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2678 err = ixgbevf_request_irq(adapter);
2679 if (err)
2680 goto err_req_irq;
2681
5f3600eb 2682 ixgbevf_irq_enable(adapter);
92915f71
GR
2683
2684 return 0;
2685
2686err_req_irq:
2687 ixgbevf_down(adapter);
92915f71
GR
2688err_setup_rx:
2689 ixgbevf_free_all_rx_resources(adapter);
2690err_setup_tx:
2691 ixgbevf_free_all_tx_resources(adapter);
2692 ixgbevf_reset(adapter);
2693
2694err_setup_reset:
2695
2696 return err;
2697}
2698
2699/**
2700 * ixgbevf_close - Disables a network interface
2701 * @netdev: network interface device structure
2702 *
2703 * Returns 0, this is not allowed to fail
2704 *
2705 * The close entry point is called when an interface is de-activated
2706 * by the OS. The hardware is still under the drivers control, but
2707 * needs to be disabled. A global MAC reset is issued to stop the
2708 * hardware, and all transmit and receive resources are freed.
2709 **/
2710static int ixgbevf_close(struct net_device *netdev)
2711{
2712 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2713
2714 ixgbevf_down(adapter);
2715 ixgbevf_free_irq(adapter);
2716
2717 ixgbevf_free_all_tx_resources(adapter);
2718 ixgbevf_free_all_rx_resources(adapter);
2719
2720 return 0;
2721}
2722
220fe050
DS
2723static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
2724{
2725 struct net_device *dev = adapter->netdev;
2726
2727 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
2728 return;
2729
2730 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
2731
2732 /* if interface is down do nothing */
2733 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2734 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2735 return;
2736
2737 /* Hardware has to reinitialize queues and interrupts to
2738 * match packet buffer alignment. Unfortunately, the
2739 * hardware is not flexible enough to do this dynamically.
2740 */
2741 if (netif_running(dev))
2742 ixgbevf_close(dev);
2743
2744 ixgbevf_clear_interrupt_scheme(adapter);
2745 ixgbevf_init_interrupt_scheme(adapter);
2746
2747 if (netif_running(dev))
2748 ixgbevf_open(dev);
2749}
2750
70a10e25
AD
2751static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2752 u32 vlan_macip_lens, u32 type_tucmd,
2753 u32 mss_l4len_idx)
92915f71
GR
2754{
2755 struct ixgbe_adv_tx_context_desc *context_desc;
70a10e25 2756 u16 i = tx_ring->next_to_use;
92915f71 2757
70a10e25 2758 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
92915f71 2759
70a10e25
AD
2760 i++;
2761 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
92915f71 2762
70a10e25
AD
2763 /* set bits to identify this as an advanced context descriptor */
2764 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
92915f71 2765
70a10e25
AD
2766 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2767 context_desc->seqnum_seed = 0;
2768 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2769 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2770}
2771
2772static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
7ad1a093
ET
2773 struct ixgbevf_tx_buffer *first,
2774 u8 *hdr_len)
70a10e25 2775{
7ad1a093 2776 struct sk_buff *skb = first->skb;
70a10e25
AD
2777 u32 vlan_macip_lens, type_tucmd;
2778 u32 mss_l4len_idx, l4len;
2779
2780 if (!skb_is_gso(skb))
2781 return 0;
92915f71 2782
70a10e25
AD
2783 if (skb_header_cloned(skb)) {
2784 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2785 if (err)
2786 return err;
92915f71
GR
2787 }
2788
70a10e25
AD
2789 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2790 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2791
2792 if (skb->protocol == htons(ETH_P_IP)) {
2793 struct iphdr *iph = ip_hdr(skb);
2794 iph->tot_len = 0;
2795 iph->check = 0;
2796 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2797 iph->daddr, 0,
2798 IPPROTO_TCP,
2799 0);
2800 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7ad1a093
ET
2801 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
2802 IXGBE_TX_FLAGS_CSUM |
2803 IXGBE_TX_FLAGS_IPV4;
70a10e25
AD
2804 } else if (skb_is_gso_v6(skb)) {
2805 ipv6_hdr(skb)->payload_len = 0;
2806 tcp_hdr(skb)->check =
2807 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2808 &ipv6_hdr(skb)->daddr,
2809 0, IPPROTO_TCP, 0);
7ad1a093
ET
2810 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
2811 IXGBE_TX_FLAGS_CSUM;
70a10e25
AD
2812 }
2813
2814 /* compute header lengths */
2815 l4len = tcp_hdrlen(skb);
2816 *hdr_len += l4len;
2817 *hdr_len = skb_transport_offset(skb) + l4len;
2818
7ad1a093
ET
2819 /* update gso size and bytecount with header size */
2820 first->gso_segs = skb_shinfo(skb)->gso_segs;
2821 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2822
70a10e25
AD
2823 /* mss_l4len_id: use 1 as index for TSO */
2824 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2825 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2826 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2827
2828 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2829 vlan_macip_lens = skb_network_header_len(skb);
2830 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7ad1a093 2831 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
70a10e25
AD
2832
2833 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2834 type_tucmd, mss_l4len_idx);
2835
2836 return 1;
92915f71
GR
2837}
2838
7ad1a093
ET
2839static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2840 struct ixgbevf_tx_buffer *first)
92915f71 2841{
7ad1a093 2842 struct sk_buff *skb = first->skb;
70a10e25
AD
2843 u32 vlan_macip_lens = 0;
2844 u32 mss_l4len_idx = 0;
2845 u32 type_tucmd = 0;
92915f71 2846
70a10e25
AD
2847 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2848 u8 l4_hdr = 0;
2849 switch (skb->protocol) {
2850 case __constant_htons(ETH_P_IP):
2851 vlan_macip_lens |= skb_network_header_len(skb);
2852 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2853 l4_hdr = ip_hdr(skb)->protocol;
2854 break;
2855 case __constant_htons(ETH_P_IPV6):
2856 vlan_macip_lens |= skb_network_header_len(skb);
2857 l4_hdr = ipv6_hdr(skb)->nexthdr;
2858 break;
2859 default:
2860 if (unlikely(net_ratelimit())) {
2861 dev_warn(tx_ring->dev,
2862 "partial checksum but proto=%x!\n",
7ad1a093 2863 first->protocol);
70a10e25
AD
2864 }
2865 break;
2866 }
92915f71 2867
70a10e25
AD
2868 switch (l4_hdr) {
2869 case IPPROTO_TCP:
2870 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2871 mss_l4len_idx = tcp_hdrlen(skb) <<
2872 IXGBE_ADVTXD_L4LEN_SHIFT;
2873 break;
2874 case IPPROTO_SCTP:
2875 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2876 mss_l4len_idx = sizeof(struct sctphdr) <<
2877 IXGBE_ADVTXD_L4LEN_SHIFT;
2878 break;
2879 case IPPROTO_UDP:
2880 mss_l4len_idx = sizeof(struct udphdr) <<
2881 IXGBE_ADVTXD_L4LEN_SHIFT;
2882 break;
2883 default:
2884 if (unlikely(net_ratelimit())) {
2885 dev_warn(tx_ring->dev,
2886 "partial checksum but l4 proto=%x!\n",
2887 l4_hdr);
2888 }
2889 break;
2890 }
7ad1a093
ET
2891
2892 /* update TX checksum flag */
2893 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
92915f71
GR
2894 }
2895
70a10e25
AD
2896 /* vlan_macip_lens: MACLEN, VLAN tag */
2897 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7ad1a093 2898 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
70a10e25
AD
2899
2900 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2901 type_tucmd, mss_l4len_idx);
92915f71
GR
2902}
2903
70a10e25 2904static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
7ad1a093 2905 struct ixgbevf_tx_buffer *first)
92915f71 2906{
7ad1a093 2907 struct sk_buff *skb = first->skb;
92915f71
GR
2908 struct ixgbevf_tx_buffer *tx_buffer_info;
2909 unsigned int len;
2910 unsigned int total = skb->len;
2540ddb5
KV
2911 unsigned int offset = 0, size;
2912 int count = 0;
92915f71
GR
2913 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2914 unsigned int f;
65deeed7 2915 int i;
92915f71
GR
2916
2917 i = tx_ring->next_to_use;
2918
2919 len = min(skb_headlen(skb), total);
2920 while (len) {
2921 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2922 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2923
2924 tx_buffer_info->length = size;
7ad1a093 2925 tx_buffer_info->tx_flags = first->tx_flags;
70a10e25 2926 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
92915f71 2927 skb->data + offset,
2a1f8794 2928 size, DMA_TO_DEVICE);
70a10e25 2929 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
92915f71 2930 goto dma_error;
92915f71
GR
2931
2932 len -= size;
2933 total -= size;
2934 offset += size;
2935 count++;
2936 i++;
2937 if (i == tx_ring->count)
2938 i = 0;
2939 }
2940
2941 for (f = 0; f < nr_frags; f++) {
9e903e08 2942 const struct skb_frag_struct *frag;
92915f71
GR
2943
2944 frag = &skb_shinfo(skb)->frags[f];
9e903e08 2945 len = min((unsigned int)skb_frag_size(frag), total);
877749bf 2946 offset = 0;
92915f71
GR
2947
2948 while (len) {
2949 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2950 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2951
2952 tx_buffer_info->length = size;
877749bf 2953 tx_buffer_info->dma =
70a10e25 2954 skb_frag_dma_map(tx_ring->dev, frag,
877749bf 2955 offset, size, DMA_TO_DEVICE);
7ad1a093
ET
2956 tx_buffer_info->tx_flags |=
2957 IXGBE_TX_FLAGS_MAPPED_AS_PAGE;
70a10e25
AD
2958 if (dma_mapping_error(tx_ring->dev,
2959 tx_buffer_info->dma))
92915f71 2960 goto dma_error;
92915f71
GR
2961
2962 len -= size;
2963 total -= size;
2964 offset += size;
2965 count++;
2966 i++;
2967 if (i == tx_ring->count)
2968 i = 0;
2969 }
2970 if (total == 0)
2971 break;
2972 }
2973
2974 if (i == 0)
2975 i = tx_ring->count - 1;
2976 else
2977 i = i - 1;
7ad1a093
ET
2978
2979 first->next_to_watch = IXGBEVF_TX_DESC(tx_ring, i);
2980 first->time_stamp = jiffies;
92915f71
GR
2981
2982 return count;
2983
2984dma_error:
70a10e25 2985 dev_err(tx_ring->dev, "TX DMA map failed\n");
92915f71
GR
2986
2987 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2988 tx_buffer_info->dma = 0;
92915f71
GR
2989 count--;
2990
2991 /* clear timestamp and dma mappings for remaining portion of packet */
2992 while (count >= 0) {
2993 count--;
2994 i--;
2995 if (i < 0)
2996 i += tx_ring->count;
2997 tx_buffer_info = &tx_ring->tx_buffer_info[i];
70a10e25 2998 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
92915f71
GR
2999 }
3000
3001 return count;
3002}
3003
7ad1a093
ET
3004static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring,
3005 struct ixgbevf_tx_buffer *first,
3006 int count, u8 hdr_len)
92915f71
GR
3007{
3008 union ixgbe_adv_tx_desc *tx_desc = NULL;
7ad1a093 3009 struct sk_buff *skb = first->skb;
92915f71
GR
3010 struct ixgbevf_tx_buffer *tx_buffer_info;
3011 u32 olinfo_status = 0, cmd_type_len = 0;
7ad1a093 3012 u32 tx_flags = first->tx_flags;
92915f71
GR
3013 unsigned int i;
3014
3015 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3016
3017 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3018
3019 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3020
3021 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3022 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3023
70a10e25
AD
3024 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3025 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
3026
92915f71
GR
3027 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3028 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3029
92915f71
GR
3030 /* use index 1 context for tso */
3031 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3032 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
70a10e25 3033 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
70a10e25 3034 }
92915f71 3035
70a10e25
AD
3036 /*
3037 * Check Context must be set if Tx switch is enabled, which it
3038 * always is for case where virtual functions are running
3039 */
3040 olinfo_status |= IXGBE_ADVTXD_CC;
92915f71 3041
7ad1a093 3042 olinfo_status |= ((skb->len - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
92915f71
GR
3043
3044 i = tx_ring->next_to_use;
3045 while (count--) {
3046 tx_buffer_info = &tx_ring->tx_buffer_info[i];
908421f6 3047 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
92915f71
GR
3048 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3049 tx_desc->read.cmd_type_len =
3050 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3051 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3052 i++;
3053 if (i == tx_ring->count)
3054 i = 0;
3055 }
3056
3057 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3058
92915f71 3059 tx_ring->next_to_use = i;
92915f71
GR
3060}
3061
fb40195c 3062static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3063{
fb40195c 3064 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
92915f71
GR
3065 /* Herbert's original patch had:
3066 * smp_mb__after_netif_stop_queue();
3067 * but since that doesn't exist yet, just open code it. */
3068 smp_mb();
3069
3070 /* We need to check again in a case another CPU has just
3071 * made room available. */
f880d07b 3072 if (likely(ixgbevf_desc_unused(tx_ring) < size))
92915f71
GR
3073 return -EBUSY;
3074
3075 /* A reprieve! - use start_queue because it doesn't call schedule */
fb40195c 3076 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
095e2617
ET
3077 ++tx_ring->tx_stats.restart_queue;
3078
92915f71
GR
3079 return 0;
3080}
3081
fb40195c 3082static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3083{
f880d07b 3084 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
92915f71 3085 return 0;
fb40195c 3086 return __ixgbevf_maybe_stop_tx(tx_ring, size);
92915f71
GR
3087}
3088
3089static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3090{
3091 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
7ad1a093 3092 struct ixgbevf_tx_buffer *first;
92915f71 3093 struct ixgbevf_ring *tx_ring;
7ad1a093
ET
3094 int tso;
3095 u32 tx_flags = 0;
3595990a
AD
3096 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3097#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3098 unsigned short f;
3099#endif
7ad1a093 3100 u8 hdr_len = 0;
f9d08f16 3101 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
7ad1a093 3102
46acc460 3103 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
f9d08f16
GR
3104 dev_kfree_skb(skb);
3105 return NETDEV_TX_OK;
3106 }
92915f71 3107
7ad1a093 3108 tx_ring = adapter->tx_ring[skb->queue_mapping];
92915f71 3109
3595990a
AD
3110 /*
3111 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3112 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3113 * + 2 desc gap to keep tail from touching head,
3114 * + 1 desc for context descriptor,
3115 * otherwise try next time
3116 */
3117#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3118 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3119 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3120#else
3121 count += skb_shinfo(skb)->nr_frags;
3122#endif
fb40195c 3123 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
095e2617 3124 tx_ring->tx_stats.tx_busy++;
3595990a
AD
3125 return NETDEV_TX_BUSY;
3126 }
3127
7ad1a093
ET
3128 /* record the location of the first descriptor for this packet */
3129 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3130 first->skb = skb;
3131 first->bytecount = skb->len;
3132 first->gso_segs = 1;
3133
eab6d18d 3134 if (vlan_tx_tag_present(skb)) {
92915f71
GR
3135 tx_flags |= vlan_tx_tag_get(skb);
3136 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3137 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3138 }
3139
7ad1a093
ET
3140 /* record initial flags and protocol */
3141 first->tx_flags = tx_flags;
3142 first->protocol = vlan_get_protocol(skb);
92915f71 3143
7ad1a093
ET
3144 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3145 if (tso < 0)
3146 goto out_drop;
3147 else
3148 ixgbevf_tx_csum(tx_ring, first);
92915f71 3149
7ad1a093
ET
3150 ixgbevf_tx_queue(tx_ring, first,
3151 ixgbevf_tx_map(tx_ring, first), hdr_len);
92915f71 3152
7ad1a093
ET
3153 /* Force memory writes to complete before letting h/w
3154 * know there are new descriptors to fetch. (Only
3155 * applicable for weak-ordered memory model archs,
3156 * such as IA-64).
3157 */
3158 wmb();
70a10e25 3159
5cdab2f6 3160 writel(tx_ring->next_to_use, tx_ring->tail);
fb40195c 3161 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
92915f71 3162
7ad1a093
ET
3163 return NETDEV_TX_OK;
3164
3165out_drop:
3166 dev_kfree_skb_any(first->skb);
3167 first->skb = NULL;
3168
92915f71
GR
3169 return NETDEV_TX_OK;
3170}
3171
92915f71
GR
3172/**
3173 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3174 * @netdev: network interface device structure
3175 * @p: pointer to an address structure
3176 *
3177 * Returns 0 on success, negative on failure
3178 **/
3179static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3180{
3181 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3182 struct ixgbe_hw *hw = &adapter->hw;
3183 struct sockaddr *addr = p;
3184
3185 if (!is_valid_ether_addr(addr->sa_data))
3186 return -EADDRNOTAVAIL;
3187
3188 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3189 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3190
55fdd45b 3191 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 3192
92fe0bf7 3193 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
92915f71 3194
55fdd45b 3195 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 3196
92915f71
GR
3197 return 0;
3198}
3199
3200/**
3201 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3202 * @netdev: network interface device structure
3203 * @new_mtu: new value for maximum frame size
3204 *
3205 * Returns 0 on success, negative on failure
3206 **/
3207static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3208{
3209 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3210 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
69bfbec4 3211 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
69bfbec4 3212
56e94095
AD
3213 switch (adapter->hw.api_version) {
3214 case ixgbe_mbox_api_11:
69bfbec4 3215 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
56e94095
AD
3216 break;
3217 default:
3218 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3219 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3220 break;
3221 }
92915f71
GR
3222
3223 /* MTU < 68 is an error and causes problems on some kernels */
69bfbec4 3224 if ((new_mtu < 68) || (max_frame > max_possible_frame))
92915f71
GR
3225 return -EINVAL;
3226
3227 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3228 netdev->mtu, new_mtu);
3229 /* must set new MTU before calling down or up */
3230 netdev->mtu = new_mtu;
3231
3232 if (netif_running(netdev))
3233 ixgbevf_reinit_locked(adapter);
3234
3235 return 0;
3236}
3237
0ac1e8ce 3238static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
92915f71
GR
3239{
3240 struct net_device *netdev = pci_get_drvdata(pdev);
3241 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0ac1e8ce
AD
3242#ifdef CONFIG_PM
3243 int retval = 0;
3244#endif
92915f71
GR
3245
3246 netif_device_detach(netdev);
3247
3248 if (netif_running(netdev)) {
0ac1e8ce 3249 rtnl_lock();
92915f71
GR
3250 ixgbevf_down(adapter);
3251 ixgbevf_free_irq(adapter);
3252 ixgbevf_free_all_tx_resources(adapter);
3253 ixgbevf_free_all_rx_resources(adapter);
0ac1e8ce 3254 rtnl_unlock();
92915f71
GR
3255 }
3256
0ac1e8ce 3257 ixgbevf_clear_interrupt_scheme(adapter);
92915f71 3258
0ac1e8ce
AD
3259#ifdef CONFIG_PM
3260 retval = pci_save_state(pdev);
3261 if (retval)
3262 return retval;
92915f71 3263
0ac1e8ce 3264#endif
92915f71 3265 pci_disable_device(pdev);
0ac1e8ce
AD
3266
3267 return 0;
3268}
3269
3270#ifdef CONFIG_PM
3271static int ixgbevf_resume(struct pci_dev *pdev)
3272{
27ae2967
WY
3273 struct net_device *netdev = pci_get_drvdata(pdev);
3274 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0ac1e8ce
AD
3275 u32 err;
3276
3277 pci_set_power_state(pdev, PCI_D0);
3278 pci_restore_state(pdev);
3279 /*
3280 * pci_restore_state clears dev->state_saved so call
3281 * pci_save_state to restore it.
3282 */
3283 pci_save_state(pdev);
3284
3285 err = pci_enable_device_mem(pdev);
3286 if (err) {
3287 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3288 return err;
3289 }
3290 pci_set_master(pdev);
3291
798e381a
DS
3292 ixgbevf_reset(adapter);
3293
0ac1e8ce
AD
3294 rtnl_lock();
3295 err = ixgbevf_init_interrupt_scheme(adapter);
3296 rtnl_unlock();
3297 if (err) {
3298 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3299 return err;
3300 }
3301
0ac1e8ce
AD
3302 if (netif_running(netdev)) {
3303 err = ixgbevf_open(netdev);
3304 if (err)
3305 return err;
3306 }
3307
3308 netif_device_attach(netdev);
3309
3310 return err;
3311}
3312
3313#endif /* CONFIG_PM */
3314static void ixgbevf_shutdown(struct pci_dev *pdev)
3315{
3316 ixgbevf_suspend(pdev, PMSG_SUSPEND);
92915f71
GR
3317}
3318
4197aa7b
ED
3319static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3320 struct rtnl_link_stats64 *stats)
3321{
3322 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3323 unsigned int start;
3324 u64 bytes, packets;
3325 const struct ixgbevf_ring *ring;
3326 int i;
3327
3328 ixgbevf_update_stats(adapter);
3329
3330 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3331
3332 for (i = 0; i < adapter->num_rx_queues; i++) {
87e70ab9 3333 ring = adapter->rx_ring[i];
4197aa7b
ED
3334 do {
3335 start = u64_stats_fetch_begin_bh(&ring->syncp);
095e2617
ET
3336 bytes = ring->stats.bytes;
3337 packets = ring->stats.packets;
4197aa7b
ED
3338 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3339 stats->rx_bytes += bytes;
3340 stats->rx_packets += packets;
3341 }
3342
3343 for (i = 0; i < adapter->num_tx_queues; i++) {
87e70ab9 3344 ring = adapter->tx_ring[i];
4197aa7b
ED
3345 do {
3346 start = u64_stats_fetch_begin_bh(&ring->syncp);
095e2617
ET
3347 bytes = ring->stats.bytes;
3348 packets = ring->stats.packets;
4197aa7b
ED
3349 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3350 stats->tx_bytes += bytes;
3351 stats->tx_packets += packets;
3352 }
3353
3354 return stats;
3355}
3356
0ac1e8ce 3357static const struct net_device_ops ixgbevf_netdev_ops = {
c12db769
SH
3358 .ndo_open = ixgbevf_open,
3359 .ndo_stop = ixgbevf_close,
3360 .ndo_start_xmit = ixgbevf_xmit_frame,
3361 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4197aa7b 3362 .ndo_get_stats64 = ixgbevf_get_stats,
92915f71 3363 .ndo_validate_addr = eth_validate_addr,
c12db769
SH
3364 .ndo_set_mac_address = ixgbevf_set_mac,
3365 .ndo_change_mtu = ixgbevf_change_mtu,
3366 .ndo_tx_timeout = ixgbevf_tx_timeout,
c12db769
SH
3367 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3368 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
c777cdfa
JK
3369#ifdef CONFIG_NET_RX_BUSY_POLL
3370 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3371#endif
92915f71 3372};
92915f71
GR
3373
3374static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3375{
0ac1e8ce 3376 dev->netdev_ops = &ixgbevf_netdev_ops;
92915f71
GR
3377 ixgbevf_set_ethtool_ops(dev);
3378 dev->watchdog_timeo = 5 * HZ;
3379}
3380
3381/**
3382 * ixgbevf_probe - Device Initialization Routine
3383 * @pdev: PCI device information struct
3384 * @ent: entry in ixgbevf_pci_tbl
3385 *
3386 * Returns 0 on success, negative on failure
3387 *
3388 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3389 * The OS initialization, configuring of the adapter private structure,
3390 * and a hardware reset occur.
3391 **/
1dd06ae8 3392static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
92915f71
GR
3393{
3394 struct net_device *netdev;
3395 struct ixgbevf_adapter *adapter = NULL;
3396 struct ixgbe_hw *hw = NULL;
3397 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3398 static int cards_found;
3399 int err, pci_using_dac;
3400
3401 err = pci_enable_device(pdev);
3402 if (err)
3403 return err;
3404
53567aa4 3405 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
92915f71
GR
3406 pci_using_dac = 1;
3407 } else {
53567aa4 3408 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
92915f71 3409 if (err) {
53567aa4
RK
3410 dev_err(&pdev->dev, "No usable DMA "
3411 "configuration, aborting\n");
3412 goto err_dma;
92915f71
GR
3413 }
3414 pci_using_dac = 0;
3415 }
3416
3417 err = pci_request_regions(pdev, ixgbevf_driver_name);
3418 if (err) {
3419 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3420 goto err_pci_reg;
3421 }
3422
3423 pci_set_master(pdev);
3424
92915f71
GR
3425 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3426 MAX_TX_QUEUES);
92915f71
GR
3427 if (!netdev) {
3428 err = -ENOMEM;
3429 goto err_alloc_etherdev;
3430 }
3431
3432 SET_NETDEV_DEV(netdev, &pdev->dev);
3433
3434 pci_set_drvdata(pdev, netdev);
3435 adapter = netdev_priv(netdev);
3436
3437 adapter->netdev = netdev;
3438 adapter->pdev = pdev;
3439 hw = &adapter->hw;
3440 hw->back = adapter;
b3f4d599 3441 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
92915f71
GR
3442
3443 /*
3444 * call save state here in standalone driver because it relies on
3445 * adapter struct to exist, and needs to call netdev_priv
3446 */
3447 pci_save_state(pdev);
3448
3449 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3450 pci_resource_len(pdev, 0));
3451 if (!hw->hw_addr) {
3452 err = -EIO;
3453 goto err_ioremap;
3454 }
3455
3456 ixgbevf_assign_netdev_ops(netdev);
3457
3458 adapter->bd_number = cards_found;
3459
3460 /* Setup hw api */
3461 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3462 hw->mac.type = ii->mac;
3463
3464 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
f416dfc0 3465 sizeof(struct ixgbe_mbx_operations));
92915f71 3466
92915f71
GR
3467 /* setup the private structure */
3468 err = ixgbevf_sw_init(adapter);
1a0d6ae5
DK
3469 if (err)
3470 goto err_sw_init;
3471
3472 /* The HW MAC address was set and/or determined in sw_init */
1a0d6ae5
DK
3473 if (!is_valid_ether_addr(netdev->dev_addr)) {
3474 pr_err("invalid MAC address\n");
3475 err = -EIO;
3476 goto err_sw_init;
3477 }
92915f71 3478
471a76de 3479 netdev->hw_features = NETIF_F_SG |
92915f71 3480 NETIF_F_IP_CSUM |
471a76de
MM
3481 NETIF_F_IPV6_CSUM |
3482 NETIF_F_TSO |
3483 NETIF_F_TSO6 |
3484 NETIF_F_RXCSUM;
3485
3486 netdev->features = netdev->hw_features |
f646968f
PM
3487 NETIF_F_HW_VLAN_CTAG_TX |
3488 NETIF_F_HW_VLAN_CTAG_RX |
3489 NETIF_F_HW_VLAN_CTAG_FILTER;
92915f71 3490
92915f71
GR
3491 netdev->vlan_features |= NETIF_F_TSO;
3492 netdev->vlan_features |= NETIF_F_TSO6;
3493 netdev->vlan_features |= NETIF_F_IP_CSUM;
3bfacf96 3494 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
92915f71
GR
3495 netdev->vlan_features |= NETIF_F_SG;
3496
3497 if (pci_using_dac)
3498 netdev->features |= NETIF_F_HIGHDMA;
3499
01789349
JP
3500 netdev->priv_flags |= IFF_UNICAST_FLT;
3501
92915f71 3502 init_timer(&adapter->watchdog_timer);
c061b18d 3503 adapter->watchdog_timer.function = ixgbevf_watchdog;
92915f71
GR
3504 adapter->watchdog_timer.data = (unsigned long)adapter;
3505
3506 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3507 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3508
3509 err = ixgbevf_init_interrupt_scheme(adapter);
3510 if (err)
3511 goto err_sw_init;
3512
92915f71
GR
3513 strcpy(netdev->name, "eth%d");
3514
3515 err = register_netdev(netdev);
3516 if (err)
3517 goto err_register;
3518
5d426ad1
GR
3519 netif_carrier_off(netdev);
3520
33bd9f60
GR
3521 ixgbevf_init_last_counter_stats(adapter);
3522
92915f71 3523 /* print the MAC address */
f794e7ef 3524 hw_dbg(hw, "%pM\n", netdev->dev_addr);
92915f71
GR
3525
3526 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3527
92915f71
GR
3528 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3529 cards_found++;
3530 return 0;
3531
3532err_register:
0ac1e8ce 3533 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3534err_sw_init:
3535 ixgbevf_reset_interrupt_capability(adapter);
3536 iounmap(hw->hw_addr);
3537err_ioremap:
3538 free_netdev(netdev);
3539err_alloc_etherdev:
3540 pci_release_regions(pdev);
3541err_pci_reg:
3542err_dma:
3543 pci_disable_device(pdev);
3544 return err;
3545}
3546
3547/**
3548 * ixgbevf_remove - Device Removal Routine
3549 * @pdev: PCI device information struct
3550 *
3551 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3552 * that it should release a PCI device. The could be caused by a
3553 * Hot-Plug event, or because the driver is going to be removed from
3554 * memory.
3555 **/
9f9a12f8 3556static void ixgbevf_remove(struct pci_dev *pdev)
92915f71
GR
3557{
3558 struct net_device *netdev = pci_get_drvdata(pdev);
3559 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3560
3561 set_bit(__IXGBEVF_DOWN, &adapter->state);
3562
3563 del_timer_sync(&adapter->watchdog_timer);
3564
23f333a2 3565 cancel_work_sync(&adapter->reset_task);
92915f71
GR
3566 cancel_work_sync(&adapter->watchdog_task);
3567
fd13a9ab 3568 if (netdev->reg_state == NETREG_REGISTERED)
92915f71 3569 unregister_netdev(netdev);
92915f71 3570
0ac1e8ce 3571 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3572 ixgbevf_reset_interrupt_capability(adapter);
3573
3574 iounmap(adapter->hw.hw_addr);
3575 pci_release_regions(pdev);
3576
3577 hw_dbg(&adapter->hw, "Remove complete\n");
3578
92915f71
GR
3579 free_netdev(netdev);
3580
3581 pci_disable_device(pdev);
3582}
3583
9f19f31d
AD
3584/**
3585 * ixgbevf_io_error_detected - called when PCI error is detected
3586 * @pdev: Pointer to PCI device
3587 * @state: The current pci connection state
3588 *
3589 * This function is called after a PCI bus error affecting
3590 * this device has been detected.
3591 */
3592static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3593 pci_channel_state_t state)
3594{
3595 struct net_device *netdev = pci_get_drvdata(pdev);
3596 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3597
3598 netif_device_detach(netdev);
3599
3600 if (state == pci_channel_io_perm_failure)
3601 return PCI_ERS_RESULT_DISCONNECT;
3602
3603 if (netif_running(netdev))
3604 ixgbevf_down(adapter);
3605
3606 pci_disable_device(pdev);
3607
3608 /* Request a slot slot reset. */
3609 return PCI_ERS_RESULT_NEED_RESET;
3610}
3611
3612/**
3613 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3614 * @pdev: Pointer to PCI device
3615 *
3616 * Restart the card from scratch, as if from a cold-boot. Implementation
3617 * resembles the first-half of the ixgbevf_resume routine.
3618 */
3619static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3620{
3621 struct net_device *netdev = pci_get_drvdata(pdev);
3622 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3623
3624 if (pci_enable_device_mem(pdev)) {
3625 dev_err(&pdev->dev,
3626 "Cannot re-enable PCI device after reset.\n");
3627 return PCI_ERS_RESULT_DISCONNECT;
3628 }
3629
3630 pci_set_master(pdev);
3631
3632 ixgbevf_reset(adapter);
3633
3634 return PCI_ERS_RESULT_RECOVERED;
3635}
3636
3637/**
3638 * ixgbevf_io_resume - called when traffic can start flowing again.
3639 * @pdev: Pointer to PCI device
3640 *
3641 * This callback is called when the error recovery driver tells us that
3642 * its OK to resume normal operation. Implementation resembles the
3643 * second-half of the ixgbevf_resume routine.
3644 */
3645static void ixgbevf_io_resume(struct pci_dev *pdev)
3646{
3647 struct net_device *netdev = pci_get_drvdata(pdev);
3648 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3649
3650 if (netif_running(netdev))
3651 ixgbevf_up(adapter);
3652
3653 netif_device_attach(netdev);
3654}
3655
3656/* PCI Error Recovery (ERS) */
3646f0e5 3657static const struct pci_error_handlers ixgbevf_err_handler = {
9f19f31d
AD
3658 .error_detected = ixgbevf_io_error_detected,
3659 .slot_reset = ixgbevf_io_slot_reset,
3660 .resume = ixgbevf_io_resume,
3661};
3662
92915f71
GR
3663static struct pci_driver ixgbevf_driver = {
3664 .name = ixgbevf_driver_name,
3665 .id_table = ixgbevf_pci_tbl,
3666 .probe = ixgbevf_probe,
9f9a12f8 3667 .remove = ixgbevf_remove,
0ac1e8ce
AD
3668#ifdef CONFIG_PM
3669 /* Power Management Hooks */
3670 .suspend = ixgbevf_suspend,
3671 .resume = ixgbevf_resume,
3672#endif
92915f71 3673 .shutdown = ixgbevf_shutdown,
9f19f31d 3674 .err_handler = &ixgbevf_err_handler
92915f71
GR
3675};
3676
3677/**
65d676c8 3678 * ixgbevf_init_module - Driver Registration Routine
92915f71 3679 *
65d676c8 3680 * ixgbevf_init_module is the first routine called when the driver is
92915f71
GR
3681 * loaded. All it does is register with the PCI subsystem.
3682 **/
3683static int __init ixgbevf_init_module(void)
3684{
3685 int ret;
dbd9636e
JK
3686 pr_info("%s - version %s\n", ixgbevf_driver_string,
3687 ixgbevf_driver_version);
92915f71 3688
dbd9636e 3689 pr_info("%s\n", ixgbevf_copyright);
92915f71
GR
3690
3691 ret = pci_register_driver(&ixgbevf_driver);
3692 return ret;
3693}
3694
3695module_init(ixgbevf_init_module);
3696
3697/**
65d676c8 3698 * ixgbevf_exit_module - Driver Exit Cleanup Routine
92915f71 3699 *
65d676c8 3700 * ixgbevf_exit_module is called just before the driver is removed
92915f71
GR
3701 * from memory.
3702 **/
3703static void __exit ixgbevf_exit_module(void)
3704{
3705 pci_unregister_driver(&ixgbevf_driver);
3706}
3707
3708#ifdef DEBUG
3709/**
65d676c8 3710 * ixgbevf_get_hw_dev_name - return device name string
92915f71
GR
3711 * used by hardware layer to print debugging information
3712 **/
3713char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3714{
3715 struct ixgbevf_adapter *adapter = hw->back;
3716 return adapter->netdev->name;
3717}
3718
3719#endif
3720module_exit(ixgbevf_exit_module);
3721
3722/* ixgbevf_main.c */
This page took 0.91546 seconds and 5 git commands to generate.