ixgbevf: add support for reporting RSS key and hash table for X550
[deliverable/linux.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
CommitLineData
92915f71
GR
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
dec0d8e4 4 Copyright(c) 1999 - 2015 Intel Corporation.
92915f71
GR
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
dec0d8e4 16 this program; if not, see <http://www.gnu.org/licenses/>.
92915f71
GR
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26
92915f71
GR
27/******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29******************************************************************************/
dbd9636e
JK
30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
92915f71 33#include <linux/types.h>
dadcd65f 34#include <linux/bitops.h>
92915f71
GR
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39#include <linux/string.h>
40#include <linux/in.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
70a10e25 43#include <linux/sctp.h>
92915f71 44#include <linux/ipv6.h>
5a0e3ad6 45#include <linux/slab.h>
92915f71
GR
46#include <net/checksum.h>
47#include <net/ip6_checksum.h>
48#include <linux/ethtool.h>
01789349 49#include <linux/if.h>
92915f71 50#include <linux/if_vlan.h>
70c71606 51#include <linux/prefetch.h>
92915f71
GR
52
53#include "ixgbevf.h"
54
3d8fe98f 55const char ixgbevf_driver_name[] = "ixgbevf";
92915f71 56static const char ixgbevf_driver_string[] =
422e05d1 57 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
92915f71 58
86f359f6 59#define DRV_VERSION "2.12.1-k"
92915f71 60const char ixgbevf_driver_version[] = DRV_VERSION;
66c87bd5 61static char ixgbevf_copyright[] =
5c47a2b6 62 "Copyright (c) 2009 - 2012 Intel Corporation.";
92915f71
GR
63
64static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
2316aa2a
GR
65 [board_82599_vf] = &ixgbevf_82599_vf_info,
66 [board_X540_vf] = &ixgbevf_X540_vf_info,
47068b0d
ET
67 [board_X550_vf] = &ixgbevf_X550_vf_info,
68 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
92915f71
GR
69};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
9baa3c34 79static const struct pci_device_id ixgbevf_pci_tbl[] = {
39ba22b4
SH
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
47068b0d
ET
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
92915f71
GR
84 /* required last entry */
85 {0, }
86};
87MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
88
89MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
b8ce18cd 90MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
92915f71
GR
91MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION);
93
b3f4d599 94#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
95static int debug = -1;
96module_param(debug, int, 0);
97MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
92915f71 98
9ac5c5cc
ET
99static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
100{
101 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
102 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
103 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
104 schedule_work(&adapter->service_task);
105}
106
107static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
108{
109 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
110
111 /* flush memory to make sure state is correct before next watchdog */
112 smp_mb__before_atomic();
113 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
114}
115
92915f71 116/* forward decls */
220fe050 117static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
fa71ae27 118static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
56e94095 119static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
92915f71 120
dbf8b0d8
MR
121static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
122{
123 struct ixgbevf_adapter *adapter = hw->back;
124
125 if (!hw->hw_addr)
126 return;
127 hw->hw_addr = NULL;
128 dev_err(&adapter->pdev->dev, "Adapter removed\n");
9ac5c5cc
ET
129 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
130 ixgbevf_service_event_schedule(adapter);
dbf8b0d8
MR
131}
132
133static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
134{
135 u32 value;
136
137 /* The following check not only optimizes a bit by not
138 * performing a read on the status register when the
139 * register just read was a status register read that
140 * returned IXGBE_FAILED_READ_REG. It also blocks any
141 * potential recursion.
142 */
143 if (reg == IXGBE_VFSTATUS) {
144 ixgbevf_remove_adapter(hw);
145 return;
146 }
32c74949 147 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
dbf8b0d8
MR
148 if (value == IXGBE_FAILED_READ_REG)
149 ixgbevf_remove_adapter(hw);
150}
151
32c74949 152u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
dbf8b0d8
MR
153{
154 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
155 u32 value;
156
157 if (IXGBE_REMOVED(reg_addr))
158 return IXGBE_FAILED_READ_REG;
159 value = readl(reg_addr + reg);
160 if (unlikely(value == IXGBE_FAILED_READ_REG))
161 ixgbevf_check_remove(hw, reg);
162 return value;
163}
164
49ce9c2c 165/**
65d676c8 166 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
92915f71
GR
167 * @adapter: pointer to adapter struct
168 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
169 * @queue: queue to map the corresponding interrupt to
170 * @msix_vector: the vector to map to the corresponding queue
dec0d8e4 171 **/
92915f71
GR
172static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
173 u8 queue, u8 msix_vector)
174{
175 u32 ivar, index;
176 struct ixgbe_hw *hw = &adapter->hw;
dec0d8e4 177
92915f71
GR
178 if (direction == -1) {
179 /* other causes */
180 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
181 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
182 ivar &= ~0xFF;
183 ivar |= msix_vector;
184 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
185 } else {
dec0d8e4 186 /* Tx or Rx causes */
92915f71
GR
187 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
188 index = ((16 * (queue & 1)) + (8 * direction));
189 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
190 ivar &= ~(0xFF << index);
191 ivar |= (msix_vector << index);
192 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
193 }
194}
195
70a10e25 196static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
9bdfefd2
ET
197 struct ixgbevf_tx_buffer *tx_buffer)
198{
199 if (tx_buffer->skb) {
200 dev_kfree_skb_any(tx_buffer->skb);
201 if (dma_unmap_len(tx_buffer, len))
70a10e25 202 dma_unmap_single(tx_ring->dev,
9bdfefd2
ET
203 dma_unmap_addr(tx_buffer, dma),
204 dma_unmap_len(tx_buffer, len),
2a1f8794 205 DMA_TO_DEVICE);
9bdfefd2
ET
206 } else if (dma_unmap_len(tx_buffer, len)) {
207 dma_unmap_page(tx_ring->dev,
208 dma_unmap_addr(tx_buffer, dma),
209 dma_unmap_len(tx_buffer, len),
210 DMA_TO_DEVICE);
92915f71 211 }
9bdfefd2
ET
212 tx_buffer->next_to_watch = NULL;
213 tx_buffer->skb = NULL;
214 dma_unmap_len_set(tx_buffer, len, 0);
215 /* tx_buffer must be completely set up in the transmit path */
92915f71
GR
216}
217
e08400b7
ET
218static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
219{
220 return ring->stats.packets;
221}
92915f71 222
e08400b7
ET
223static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
224{
225 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
226 struct ixgbe_hw *hw = &adapter->hw;
92915f71 227
e08400b7
ET
228 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
229 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
230
231 if (head != tail)
232 return (head < tail) ?
233 tail - head : (tail + ring->count - head);
234
235 return 0;
236}
237
238static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
239{
240 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
241 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
242 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
243
244 clear_check_for_tx_hang(tx_ring);
245
246 /* Check for a hung queue, but be thorough. This verifies
247 * that a transmit has been completed since the previous
248 * check AND there is at least one packet pending. The
249 * ARMED bit is set to indicate a potential hang.
250 */
251 if ((tx_done_old == tx_done) && tx_pending) {
252 /* make sure it is true for two checks in a row */
253 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
254 &tx_ring->state);
255 }
256 /* reset the countdown */
257 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
258
259 /* update completed stats and continue */
260 tx_ring->tx_stats.tx_done_old = tx_done;
261
262 return false;
263}
264
9ac5c5cc
ET
265static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
266{
267 /* Do the reset outside of interrupt context */
268 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
269 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
270 ixgbevf_service_event_schedule(adapter);
271 }
272}
273
e08400b7
ET
274/**
275 * ixgbevf_tx_timeout - Respond to a Tx Hang
276 * @netdev: network interface device structure
277 **/
278static void ixgbevf_tx_timeout(struct net_device *netdev)
279{
280 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
281
9ac5c5cc 282 ixgbevf_tx_timeout_reset(adapter);
e08400b7 283}
92915f71
GR
284
285/**
286 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
fa71ae27 287 * @q_vector: board private structure
92915f71
GR
288 * @tx_ring: tx ring to clean
289 **/
fa71ae27 290static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
92915f71
GR
291 struct ixgbevf_ring *tx_ring)
292{
fa71ae27 293 struct ixgbevf_adapter *adapter = q_vector->adapter;
7ad1a093
ET
294 struct ixgbevf_tx_buffer *tx_buffer;
295 union ixgbe_adv_tx_desc *tx_desc;
92915f71 296 unsigned int total_bytes = 0, total_packets = 0;
7ad1a093
ET
297 unsigned int budget = tx_ring->count / 2;
298 unsigned int i = tx_ring->next_to_clean;
92915f71 299
10cc1bdd
AD
300 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
301 return true;
302
7ad1a093
ET
303 tx_buffer = &tx_ring->tx_buffer_info[i];
304 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
305 i -= tx_ring->count;
92915f71 306
e757e3e1 307 do {
7ad1a093 308 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
e757e3e1
AD
309
310 /* if next_to_watch is not set then there is no work pending */
311 if (!eop_desc)
312 break;
313
314 /* prevent any other reads prior to eop_desc */
315 read_barrier_depends();
316
317 /* if DD is not set pending work has not been completed */
318 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
319 break;
320
321 /* clear next_to_watch to prevent false hangs */
7ad1a093 322 tx_buffer->next_to_watch = NULL;
e757e3e1 323
7ad1a093
ET
324 /* update the statistics for this packet */
325 total_bytes += tx_buffer->bytecount;
326 total_packets += tx_buffer->gso_segs;
92915f71 327
9bdfefd2
ET
328 /* free the skb */
329 dev_kfree_skb_any(tx_buffer->skb);
330
331 /* unmap skb header data */
332 dma_unmap_single(tx_ring->dev,
333 dma_unmap_addr(tx_buffer, dma),
334 dma_unmap_len(tx_buffer, len),
335 DMA_TO_DEVICE);
336
7ad1a093 337 /* clear tx_buffer data */
9bdfefd2
ET
338 tx_buffer->skb = NULL;
339 dma_unmap_len_set(tx_buffer, len, 0);
92915f71 340
7ad1a093
ET
341 /* unmap remaining buffers */
342 while (tx_desc != eop_desc) {
7ad1a093
ET
343 tx_buffer++;
344 tx_desc++;
92915f71 345 i++;
7ad1a093
ET
346 if (unlikely(!i)) {
347 i -= tx_ring->count;
348 tx_buffer = tx_ring->tx_buffer_info;
349 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
350 }
e757e3e1 351
9bdfefd2
ET
352 /* unmap any remaining paged data */
353 if (dma_unmap_len(tx_buffer, len)) {
354 dma_unmap_page(tx_ring->dev,
355 dma_unmap_addr(tx_buffer, dma),
356 dma_unmap_len(tx_buffer, len),
357 DMA_TO_DEVICE);
358 dma_unmap_len_set(tx_buffer, len, 0);
359 }
92915f71
GR
360 }
361
7ad1a093
ET
362 /* move us one more past the eop_desc for start of next pkt */
363 tx_buffer++;
364 tx_desc++;
365 i++;
366 if (unlikely(!i)) {
367 i -= tx_ring->count;
368 tx_buffer = tx_ring->tx_buffer_info;
369 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
370 }
371
372 /* issue prefetch for next Tx descriptor */
373 prefetch(tx_desc);
374
375 /* update budget accounting */
376 budget--;
377 } while (likely(budget));
378
379 i += tx_ring->count;
92915f71 380 tx_ring->next_to_clean = i;
7ad1a093
ET
381 u64_stats_update_begin(&tx_ring->syncp);
382 tx_ring->stats.bytes += total_bytes;
383 tx_ring->stats.packets += total_packets;
384 u64_stats_update_end(&tx_ring->syncp);
385 q_vector->tx.total_bytes += total_bytes;
386 q_vector->tx.total_packets += total_packets;
92915f71 387
e08400b7
ET
388 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
389 struct ixgbe_hw *hw = &adapter->hw;
390 union ixgbe_adv_tx_desc *eop_desc;
391
392 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
393
394 pr_err("Detected Tx Unit Hang\n"
395 " Tx Queue <%d>\n"
396 " TDH, TDT <%x>, <%x>\n"
397 " next_to_use <%x>\n"
398 " next_to_clean <%x>\n"
399 "tx_buffer_info[next_to_clean]\n"
400 " next_to_watch <%p>\n"
401 " eop_desc->wb.status <%x>\n"
402 " time_stamp <%lx>\n"
403 " jiffies <%lx>\n",
404 tx_ring->queue_index,
405 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
406 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
407 tx_ring->next_to_use, i,
408 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
409 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
410
411 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
412
413 /* schedule immediate reset if we believe we hung */
9ac5c5cc 414 ixgbevf_tx_timeout_reset(adapter);
e08400b7
ET
415
416 return true;
417 }
418
92915f71 419#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7ad1a093 420 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
f880d07b 421 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
92915f71
GR
422 /* Make sure that anybody stopping the queue after this
423 * sees the new next_to_clean.
424 */
425 smp_mb();
7ad1a093 426
fb40195c
AD
427 if (__netif_subqueue_stopped(tx_ring->netdev,
428 tx_ring->queue_index) &&
92915f71 429 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
fb40195c
AD
430 netif_wake_subqueue(tx_ring->netdev,
431 tx_ring->queue_index);
7ad1a093 432 ++tx_ring->tx_stats.restart_queue;
92915f71 433 }
92915f71
GR
434 }
435
7ad1a093 436 return !!budget;
92915f71
GR
437}
438
08681618
JK
439/**
440 * ixgbevf_rx_skb - Helper function to determine proper Rx method
441 * @q_vector: structure containing interrupt and ring information
442 * @skb: packet to send up
08681618
JK
443 **/
444static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
dff80520 445 struct sk_buff *skb)
08681618 446{
c777cdfa
JK
447#ifdef CONFIG_NET_RX_BUSY_POLL
448 skb_mark_napi_id(skb, &q_vector->napi);
449
450 if (ixgbevf_qv_busy_polling(q_vector)) {
451 netif_receive_skb(skb);
452 /* exit early if we busy polled */
453 return;
454 }
455#endif /* CONFIG_NET_RX_BUSY_POLL */
688ff32d
ET
456
457 napi_gro_receive(&q_vector->napi, skb);
08681618
JK
458}
459
1e1429d6
FD
460#define IXGBE_RSS_L4_TYPES_MASK \
461 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
462 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
463 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
464 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
465
466static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
467 union ixgbe_adv_rx_desc *rx_desc,
468 struct sk_buff *skb)
469{
470 u16 rss_type;
471
472 if (!(ring->netdev->features & NETIF_F_RXHASH))
473 return;
474
475 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
476 IXGBE_RXDADV_RSSTYPE_MASK;
477
478 if (!rss_type)
479 return;
480
481 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
482 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
483 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
484}
485
dec0d8e4
JK
486/**
487 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
ec62fe26
ET
488 * @ring: structure containig ring specific data
489 * @rx_desc: current Rx descriptor being processed
92915f71 490 * @skb: skb currently being received and modified
dec0d8e4 491 **/
55fb277c 492static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
ec62fe26
ET
493 union ixgbe_adv_rx_desc *rx_desc,
494 struct sk_buff *skb)
92915f71 495{
bc8acf2c 496 skb_checksum_none_assert(skb);
92915f71
GR
497
498 /* Rx csum disabled */
fb40195c 499 if (!(ring->netdev->features & NETIF_F_RXCSUM))
92915f71
GR
500 return;
501
502 /* if IP and error */
ec62fe26
ET
503 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
504 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
095e2617 505 ring->rx_stats.csum_err++;
92915f71
GR
506 return;
507 }
508
ec62fe26 509 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
92915f71
GR
510 return;
511
ec62fe26 512 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
095e2617 513 ring->rx_stats.csum_err++;
92915f71
GR
514 return;
515 }
516
517 /* It must be a TCP or UDP packet with a valid checksum */
518 skb->ip_summed = CHECKSUM_UNNECESSARY;
92915f71
GR
519}
520
dec0d8e4
JK
521/**
522 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
dff80520
ET
523 * @rx_ring: rx descriptor ring packet is being transacted on
524 * @rx_desc: pointer to the EOP Rx descriptor
525 * @skb: pointer to current skb being populated
526 *
527 * This function checks the ring, descriptor, and packet information in
528 * order to populate the checksum, VLAN, protocol, and other fields within
529 * the skb.
dec0d8e4 530 **/
dff80520
ET
531static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
532 union ixgbe_adv_rx_desc *rx_desc,
533 struct sk_buff *skb)
534{
1e1429d6 535 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
dff80520
ET
536 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
537
538 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
539 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
540 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
541
542 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
543 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
544 }
545
546 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
547}
548
4b95fe3d
ET
549/**
550 * ixgbevf_is_non_eop - process handling of non-EOP buffers
551 * @rx_ring: Rx ring being processed
552 * @rx_desc: Rx descriptor for current buffer
553 * @skb: current socket buffer containing buffer in progress
554 *
555 * This function updates next to clean. If the buffer is an EOP buffer
556 * this function exits returning false, otherwise it will place the
557 * sk_buff in the next buffer to be chained and return true indicating
558 * that this is in fact a non-EOP buffer.
559 **/
560static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
bad17234 561 union ixgbe_adv_rx_desc *rx_desc)
4b95fe3d
ET
562{
563 u32 ntc = rx_ring->next_to_clean + 1;
564
565 /* fetch, update, and store next to clean */
566 ntc = (ntc < rx_ring->count) ? ntc : 0;
567 rx_ring->next_to_clean = ntc;
568
569 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
570
571 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
572 return false;
573
574 return true;
575}
576
bad17234
ET
577static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
578 struct ixgbevf_rx_buffer *bi)
bafa578f 579{
bad17234 580 struct page *page = bi->page;
bafa578f
ET
581 dma_addr_t dma = bi->dma;
582
bad17234
ET
583 /* since we are recycling buffers we should seldom need to alloc */
584 if (likely(page))
bafa578f
ET
585 return true;
586
bad17234
ET
587 /* alloc new page for storage */
588 page = dev_alloc_page();
589 if (unlikely(!page)) {
590 rx_ring->rx_stats.alloc_rx_page_failed++;
bafa578f
ET
591 return false;
592 }
593
bad17234
ET
594 /* map page for use */
595 dma = dma_map_page(rx_ring->dev, page, 0,
596 PAGE_SIZE, DMA_FROM_DEVICE);
bafa578f
ET
597
598 /* if mapping failed free memory back to system since
599 * there isn't much point in holding memory we can't use
600 */
601 if (dma_mapping_error(rx_ring->dev, dma)) {
bad17234 602 __free_page(page);
bafa578f
ET
603
604 rx_ring->rx_stats.alloc_rx_buff_failed++;
605 return false;
606 }
607
bafa578f 608 bi->dma = dma;
bad17234
ET
609 bi->page = page;
610 bi->page_offset = 0;
bafa578f
ET
611
612 return true;
613}
614
92915f71
GR
615/**
616 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
095e2617 617 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
bafa578f 618 * @cleaned_count: number of buffers to replace
92915f71 619 **/
095e2617 620static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
bafa578f 621 u16 cleaned_count)
92915f71 622{
92915f71
GR
623 union ixgbe_adv_rx_desc *rx_desc;
624 struct ixgbevf_rx_buffer *bi;
fb40195c 625 unsigned int i = rx_ring->next_to_use;
92915f71 626
bafa578f
ET
627 /* nothing to do or no valid netdev defined */
628 if (!cleaned_count || !rx_ring->netdev)
629 return;
b9dd245b 630
bafa578f
ET
631 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
632 bi = &rx_ring->rx_buffer_info[i];
633 i -= rx_ring->count;
05d063aa 634
bafa578f 635 do {
bad17234 636 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
bafa578f 637 break;
b9dd245b 638
bafa578f
ET
639 /* Refresh the desc even if pkt_addr didn't change
640 * because each write-back erases this info.
641 */
bad17234 642 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
92915f71 643
bafa578f
ET
644 rx_desc++;
645 bi++;
92915f71 646 i++;
bafa578f
ET
647 if (unlikely(!i)) {
648 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
649 bi = rx_ring->rx_buffer_info;
650 i -= rx_ring->count;
651 }
652
653 /* clear the hdr_addr for the next_to_use descriptor */
654 rx_desc->read.hdr_addr = 0;
655
656 cleaned_count--;
657 } while (cleaned_count);
658
659 i += rx_ring->count;
92915f71 660
bafa578f
ET
661 if (rx_ring->next_to_use != i) {
662 /* record the next descriptor to use */
663 rx_ring->next_to_use = i;
664
bad17234
ET
665 /* update next to alloc since we have filled the ring */
666 rx_ring->next_to_alloc = i;
667
bafa578f
ET
668 /* Force memory writes to complete before letting h/w
669 * know there are new descriptors to fetch. (Only
670 * applicable for weak-ordered memory model archs,
671 * such as IA-64).
672 */
673 wmb();
674 ixgbevf_write_tail(rx_ring, i);
675 }
92915f71
GR
676}
677
dec0d8e4
JK
678/**
679 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
bad17234
ET
680 * @rx_ring: rx descriptor ring packet is being transacted on
681 * @rx_desc: pointer to the EOP Rx descriptor
682 * @skb: pointer to current skb being fixed
683 *
684 * Check for corrupted packet headers caused by senders on the local L2
685 * embedded NIC switch not setting up their Tx Descriptors right. These
686 * should be very rare.
687 *
688 * Also address the case where we are pulling data in on pages only
689 * and as such no data is present in the skb header.
690 *
691 * In addition if skb is not at least 60 bytes we need to pad it so that
692 * it is large enough to qualify as a valid Ethernet frame.
693 *
694 * Returns true if an error was encountered and skb was freed.
dec0d8e4 695 **/
bad17234
ET
696static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
697 union ixgbe_adv_rx_desc *rx_desc,
698 struct sk_buff *skb)
699{
700 /* verify that the packet does not have any known errors */
701 if (unlikely(ixgbevf_test_staterr(rx_desc,
702 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
703 struct net_device *netdev = rx_ring->netdev;
704
705 if (!(netdev->features & NETIF_F_RXALL)) {
706 dev_kfree_skb_any(skb);
707 return true;
708 }
709 }
710
a94d9e22
AD
711 /* if eth_skb_pad returns an error the skb was freed */
712 if (eth_skb_pad(skb))
713 return true;
bad17234
ET
714
715 return false;
716}
717
dec0d8e4
JK
718/**
719 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
bad17234
ET
720 * @rx_ring: rx descriptor ring to store buffers on
721 * @old_buff: donor buffer to have page reused
722 *
723 * Synchronizes page for reuse by the adapter
dec0d8e4 724 **/
bad17234
ET
725static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
726 struct ixgbevf_rx_buffer *old_buff)
727{
728 struct ixgbevf_rx_buffer *new_buff;
729 u16 nta = rx_ring->next_to_alloc;
730
731 new_buff = &rx_ring->rx_buffer_info[nta];
732
733 /* update, and store next to alloc */
734 nta++;
735 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
736
737 /* transfer page from old buffer to new buffer */
738 new_buff->page = old_buff->page;
739 new_buff->dma = old_buff->dma;
740 new_buff->page_offset = old_buff->page_offset;
741
742 /* sync the buffer for use by the device */
743 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
744 new_buff->page_offset,
745 IXGBEVF_RX_BUFSZ,
746 DMA_FROM_DEVICE);
747}
748
749static inline bool ixgbevf_page_is_reserved(struct page *page)
750{
751 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
752}
753
dec0d8e4
JK
754/**
755 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
bad17234
ET
756 * @rx_ring: rx descriptor ring to transact packets on
757 * @rx_buffer: buffer containing page to add
758 * @rx_desc: descriptor containing length of buffer written by hardware
759 * @skb: sk_buff to place the data into
760 *
761 * This function will add the data contained in rx_buffer->page to the skb.
762 * This is done either through a direct copy if the data in the buffer is
763 * less than the skb header size, otherwise it will just attach the page as
764 * a frag to the skb.
765 *
766 * The function will then update the page offset if necessary and return
767 * true if the buffer can be reused by the adapter.
dec0d8e4 768 **/
bad17234
ET
769static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
770 struct ixgbevf_rx_buffer *rx_buffer,
771 union ixgbe_adv_rx_desc *rx_desc,
772 struct sk_buff *skb)
773{
774 struct page *page = rx_buffer->page;
5505bdb5 775 unsigned char *va = page_address(page) + rx_buffer->page_offset;
bad17234
ET
776 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
777#if (PAGE_SIZE < 8192)
778 unsigned int truesize = IXGBEVF_RX_BUFSZ;
779#else
780 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
781#endif
5505bdb5 782 unsigned int pull_len;
bad17234 783
5505bdb5
AD
784 if (unlikely(skb_is_nonlinear(skb)))
785 goto add_tail_frag;
bad17234 786
5505bdb5 787 if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
bad17234
ET
788 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
789
790 /* page is not reserved, we can reuse buffer as is */
791 if (likely(!ixgbevf_page_is_reserved(page)))
792 return true;
793
794 /* this page cannot be reused so discard it */
795 put_page(page);
796 return false;
797 }
798
5505bdb5
AD
799 /* we need the header to contain the greater of either ETH_HLEN or
800 * 60 bytes if the skb->len is less than 60 for skb_pad.
801 */
802 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
803
804 /* align pull length to size of long to optimize memcpy performance */
805 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
806
807 /* update all of the pointers */
808 va += pull_len;
809 size -= pull_len;
810
811add_tail_frag:
bad17234 812 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
5505bdb5 813 (unsigned long)va & ~PAGE_MASK, size, truesize);
bad17234
ET
814
815 /* avoid re-using remote pages */
816 if (unlikely(ixgbevf_page_is_reserved(page)))
817 return false;
818
819#if (PAGE_SIZE < 8192)
820 /* if we are only owner of page we can reuse it */
821 if (unlikely(page_count(page) != 1))
822 return false;
823
824 /* flip page offset to other buffer */
825 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
826
827#else
828 /* move offset up to the next cache line */
829 rx_buffer->page_offset += truesize;
830
831 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
832 return false;
833
834#endif
835 /* Even if we own the page, we are not allowed to use atomic_set()
836 * This would break get_page_unless_zero() users.
837 */
838 atomic_inc(&page->_count);
839
840 return true;
841}
842
843static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
844 union ixgbe_adv_rx_desc *rx_desc,
845 struct sk_buff *skb)
846{
847 struct ixgbevf_rx_buffer *rx_buffer;
848 struct page *page;
849
850 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
851 page = rx_buffer->page;
852 prefetchw(page);
853
854 if (likely(!skb)) {
855 void *page_addr = page_address(page) +
856 rx_buffer->page_offset;
857
858 /* prefetch first cache line of first page */
859 prefetch(page_addr);
860#if L1_CACHE_BYTES < 128
861 prefetch(page_addr + L1_CACHE_BYTES);
862#endif
863
864 /* allocate a skb to store the frags */
865 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
866 IXGBEVF_RX_HDR_SIZE);
867 if (unlikely(!skb)) {
868 rx_ring->rx_stats.alloc_rx_buff_failed++;
869 return NULL;
870 }
871
872 /* we will be copying header into skb->data in
873 * pskb_may_pull so it is in our interest to prefetch
874 * it now to avoid a possible cache miss
875 */
876 prefetchw(skb->data);
877 }
878
879 /* we are reusing so sync this buffer for CPU use */
880 dma_sync_single_range_for_cpu(rx_ring->dev,
881 rx_buffer->dma,
882 rx_buffer->page_offset,
883 IXGBEVF_RX_BUFSZ,
884 DMA_FROM_DEVICE);
885
886 /* pull page into skb */
887 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
888 /* hand second half of page back to the ring */
889 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
890 } else {
891 /* we are not reusing the buffer so unmap it */
892 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
893 PAGE_SIZE, DMA_FROM_DEVICE);
894 }
895
896 /* clear contents of buffer_info */
897 rx_buffer->dma = 0;
898 rx_buffer->page = NULL;
899
900 return skb;
901}
902
92915f71 903static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
5f3600eb 904 u32 qmask)
92915f71 905{
92915f71
GR
906 struct ixgbe_hw *hw = &adapter->hw;
907
5f3600eb 908 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
92915f71
GR
909}
910
08e50a20
JK
911static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
912 struct ixgbevf_ring *rx_ring,
913 int budget)
92915f71 914{
92915f71 915 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
bafa578f 916 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
bad17234 917 struct sk_buff *skb = rx_ring->skb;
92915f71 918
6622402a 919 while (likely(total_rx_packets < budget)) {
4b95fe3d 920 union ixgbe_adv_rx_desc *rx_desc;
b97fe3b1 921
0579eefc
ET
922 /* return some buffers to hardware, one at a time is too slow */
923 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
924 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
925 cleaned_count = 0;
926 }
927
bad17234 928 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
0579eefc
ET
929
930 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
92915f71 931 break;
92915f71 932
0579eefc
ET
933 /* This memory barrier is needed to keep us from reading
934 * any other fields out of the rx_desc until we know the
935 * RXD_STAT_DD bit is set
936 */
937 rmb();
ec62fe26 938
bad17234
ET
939 /* retrieve a buffer from the ring */
940 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
0579eefc 941
bad17234
ET
942 /* exit if we failed to retrieve a buffer */
943 if (!skb)
944 break;
92915f71 945
b97fe3b1
ET
946 cleaned_count++;
947
bad17234
ET
948 /* fetch next buffer in frame if non-eop */
949 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
0579eefc 950 continue;
5c60f81a 951
bad17234
ET
952 /* verify the packet layout is correct */
953 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
954 skb = NULL;
0579eefc 955 continue;
92915f71
GR
956 }
957
92915f71
GR
958 /* probably a little skewed due to removing CRC */
959 total_rx_bytes += skb->len;
92915f71 960
815cccbf
JF
961 /* Workaround hardware that can't do proper VEPA multicast
962 * source pruning.
963 */
bd9d5592 964 if ((skb->pkt_type == PACKET_BROADCAST ||
dec0d8e4 965 skb->pkt_type == PACKET_MULTICAST) &&
095e2617 966 ether_addr_equal(rx_ring->netdev->dev_addr,
7367d0b5 967 eth_hdr(skb)->h_source)) {
815cccbf 968 dev_kfree_skb_irq(skb);
0579eefc 969 continue;
815cccbf
JF
970 }
971
dff80520
ET
972 /* populate checksum, VLAN, and protocol */
973 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
974
975 ixgbevf_rx_skb(q_vector, skb);
92915f71 976
bad17234
ET
977 /* reset skb pointer */
978 skb = NULL;
979
0579eefc 980 /* update budget accounting */
6622402a
ET
981 total_rx_packets++;
982 }
92915f71 983
bad17234
ET
984 /* place incomplete frames back on ring for completion */
985 rx_ring->skb = skb;
986
4197aa7b 987 u64_stats_update_begin(&rx_ring->syncp);
095e2617
ET
988 rx_ring->stats.packets += total_rx_packets;
989 rx_ring->stats.bytes += total_rx_bytes;
4197aa7b 990 u64_stats_update_end(&rx_ring->syncp);
ac6ed8f0
GR
991 q_vector->rx.total_packets += total_rx_packets;
992 q_vector->rx.total_bytes += total_rx_bytes;
92915f71 993
08e50a20 994 return total_rx_packets;
92915f71
GR
995}
996
997/**
fa71ae27 998 * ixgbevf_poll - NAPI polling calback
92915f71
GR
999 * @napi: napi struct with our devices info in it
1000 * @budget: amount of work driver is allowed to do this pass, in packets
1001 *
fa71ae27 1002 * This function will clean more than one or more rings associated with a
92915f71
GR
1003 * q_vector.
1004 **/
fa71ae27 1005static int ixgbevf_poll(struct napi_struct *napi, int budget)
92915f71
GR
1006{
1007 struct ixgbevf_q_vector *q_vector =
1008 container_of(napi, struct ixgbevf_q_vector, napi);
1009 struct ixgbevf_adapter *adapter = q_vector->adapter;
fa71ae27
AD
1010 struct ixgbevf_ring *ring;
1011 int per_ring_budget;
1012 bool clean_complete = true;
1013
1014 ixgbevf_for_each_ring(ring, q_vector->tx)
1015 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
92915f71 1016
c777cdfa
JK
1017#ifdef CONFIG_NET_RX_BUSY_POLL
1018 if (!ixgbevf_qv_lock_napi(q_vector))
1019 return budget;
1020#endif
1021
92915f71 1022 /* attempt to distribute budget to each queue fairly, but don't allow
dec0d8e4
JK
1023 * the budget to go below 1 because we'll exit polling
1024 */
fa71ae27
AD
1025 if (q_vector->rx.count > 1)
1026 per_ring_budget = max(budget/q_vector->rx.count, 1);
1027 else
1028 per_ring_budget = budget;
1029
1030 ixgbevf_for_each_ring(ring, q_vector->rx)
08e50a20
JK
1031 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
1032 per_ring_budget)
1033 < per_ring_budget);
fa71ae27 1034
c777cdfa
JK
1035#ifdef CONFIG_NET_RX_BUSY_POLL
1036 ixgbevf_qv_unlock_napi(q_vector);
1037#endif
1038
fa71ae27
AD
1039 /* If all work not completed, return budget and keep polling */
1040 if (!clean_complete)
1041 return budget;
1042 /* all work done, exit the polling mode */
1043 napi_complete(napi);
1044 if (adapter->rx_itr_setting & 1)
1045 ixgbevf_set_itr(q_vector);
2e7cfbdd
MR
1046 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1047 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
fa71ae27
AD
1048 ixgbevf_irq_enable_queues(adapter,
1049 1 << q_vector->v_idx);
92915f71 1050
fa71ae27 1051 return 0;
92915f71
GR
1052}
1053
ce422606
GR
1054/**
1055 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1056 * @q_vector: structure containing interrupt and ring information
dec0d8e4 1057 **/
3849623e 1058void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
ce422606
GR
1059{
1060 struct ixgbevf_adapter *adapter = q_vector->adapter;
1061 struct ixgbe_hw *hw = &adapter->hw;
1062 int v_idx = q_vector->v_idx;
1063 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1064
dec0d8e4 1065 /* set the WDIS bit to not clear the timer bits and cause an
ce422606
GR
1066 * immediate assertion of the interrupt
1067 */
1068 itr_reg |= IXGBE_EITR_CNT_WDIS;
1069
1070 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1071}
92915f71 1072
c777cdfa
JK
1073#ifdef CONFIG_NET_RX_BUSY_POLL
1074/* must be called with local_bh_disable()d */
1075static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
1076{
1077 struct ixgbevf_q_vector *q_vector =
1078 container_of(napi, struct ixgbevf_q_vector, napi);
1079 struct ixgbevf_adapter *adapter = q_vector->adapter;
1080 struct ixgbevf_ring *ring;
1081 int found = 0;
1082
1083 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
1084 return LL_FLUSH_FAILED;
1085
1086 if (!ixgbevf_qv_lock_poll(q_vector))
1087 return LL_FLUSH_BUSY;
1088
1089 ixgbevf_for_each_ring(ring, q_vector->rx) {
1090 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
3b5dca26
JK
1091#ifdef BP_EXTENDED_STATS
1092 if (found)
095e2617 1093 ring->stats.cleaned += found;
3b5dca26 1094 else
095e2617 1095 ring->stats.misses++;
3b5dca26 1096#endif
c777cdfa
JK
1097 if (found)
1098 break;
1099 }
1100
1101 ixgbevf_qv_unlock_poll(q_vector);
1102
1103 return found;
1104}
1105#endif /* CONFIG_NET_RX_BUSY_POLL */
1106
92915f71
GR
1107/**
1108 * ixgbevf_configure_msix - Configure MSI-X hardware
1109 * @adapter: board private structure
1110 *
1111 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1112 * interrupts.
1113 **/
1114static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1115{
1116 struct ixgbevf_q_vector *q_vector;
6b43c446 1117 int q_vectors, v_idx;
92915f71
GR
1118
1119 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5f3600eb 1120 adapter->eims_enable_mask = 0;
92915f71 1121
dec0d8e4 1122 /* Populate the IVAR table and set the ITR values to the
92915f71
GR
1123 * corresponding register.
1124 */
1125 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
6b43c446 1126 struct ixgbevf_ring *ring;
dec0d8e4 1127
92915f71 1128 q_vector = adapter->q_vector[v_idx];
6b43c446
AD
1129
1130 ixgbevf_for_each_ring(ring, q_vector->rx)
1131 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1132
1133 ixgbevf_for_each_ring(ring, q_vector->tx)
1134 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
92915f71 1135
5f3600eb 1136 if (q_vector->tx.ring && !q_vector->rx.ring) {
dec0d8e4 1137 /* Tx only vector */
5f3600eb
AD
1138 if (adapter->tx_itr_setting == 1)
1139 q_vector->itr = IXGBE_10K_ITR;
1140 else
1141 q_vector->itr = adapter->tx_itr_setting;
1142 } else {
dec0d8e4 1143 /* Rx or Rx/Tx vector */
5f3600eb
AD
1144 if (adapter->rx_itr_setting == 1)
1145 q_vector->itr = IXGBE_20K_ITR;
1146 else
1147 q_vector->itr = adapter->rx_itr_setting;
1148 }
1149
1150 /* add q_vector eims value to global eims_enable_mask */
1151 adapter->eims_enable_mask |= 1 << v_idx;
92915f71 1152
5f3600eb 1153 ixgbevf_write_eitr(q_vector);
92915f71
GR
1154 }
1155
1156 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
5f3600eb
AD
1157 /* setup eims_other and add value to global eims_enable_mask */
1158 adapter->eims_other = 1 << v_idx;
1159 adapter->eims_enable_mask |= adapter->eims_other;
92915f71
GR
1160}
1161
1162enum latency_range {
1163 lowest_latency = 0,
1164 low_latency = 1,
1165 bulk_latency = 2,
1166 latency_invalid = 255
1167};
1168
1169/**
1170 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
5f3600eb
AD
1171 * @q_vector: structure containing interrupt and ring information
1172 * @ring_container: structure containing ring performance data
92915f71 1173 *
dec0d8e4
JK
1174 * Stores a new ITR value based on packets and byte
1175 * counts during the last interrupt. The advantage of per interrupt
1176 * computation is faster updates and more accurate ITR for the current
1177 * traffic pattern. Constants in this function were computed
1178 * based on theoretical maximum wire speed and thresholds were set based
1179 * on testing data as well as attempting to minimize response time
1180 * while increasing bulk throughput.
92915f71 1181 **/
5f3600eb
AD
1182static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1183 struct ixgbevf_ring_container *ring_container)
92915f71 1184{
5f3600eb
AD
1185 int bytes = ring_container->total_bytes;
1186 int packets = ring_container->total_packets;
92915f71
GR
1187 u32 timepassed_us;
1188 u64 bytes_perint;
5f3600eb 1189 u8 itr_setting = ring_container->itr;
92915f71
GR
1190
1191 if (packets == 0)
5f3600eb 1192 return;
92915f71 1193
dec0d8e4 1194 /* simple throttle rate management
92915f71
GR
1195 * 0-20MB/s lowest (100000 ints/s)
1196 * 20-100MB/s low (20000 ints/s)
1197 * 100-1249MB/s bulk (8000 ints/s)
1198 */
1199 /* what was last interrupt timeslice? */
5f3600eb 1200 timepassed_us = q_vector->itr >> 2;
92915f71
GR
1201 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1202
1203 switch (itr_setting) {
1204 case lowest_latency:
e2c28ce7 1205 if (bytes_perint > 10)
5f3600eb 1206 itr_setting = low_latency;
92915f71
GR
1207 break;
1208 case low_latency:
e2c28ce7 1209 if (bytes_perint > 20)
5f3600eb 1210 itr_setting = bulk_latency;
e2c28ce7 1211 else if (bytes_perint <= 10)
5f3600eb 1212 itr_setting = lowest_latency;
92915f71
GR
1213 break;
1214 case bulk_latency:
e2c28ce7 1215 if (bytes_perint <= 20)
5f3600eb 1216 itr_setting = low_latency;
92915f71
GR
1217 break;
1218 }
1219
5f3600eb
AD
1220 /* clear work counters since we have the values we need */
1221 ring_container->total_bytes = 0;
1222 ring_container->total_packets = 0;
1223
1224 /* write updated itr to ring container */
1225 ring_container->itr = itr_setting;
92915f71
GR
1226}
1227
fa71ae27 1228static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
92915f71 1229{
5f3600eb
AD
1230 u32 new_itr = q_vector->itr;
1231 u8 current_itr;
92915f71 1232
5f3600eb
AD
1233 ixgbevf_update_itr(q_vector, &q_vector->tx);
1234 ixgbevf_update_itr(q_vector, &q_vector->rx);
92915f71 1235
6b43c446 1236 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
92915f71
GR
1237
1238 switch (current_itr) {
1239 /* counts and packets in update_itr are dependent on these numbers */
1240 case lowest_latency:
5f3600eb 1241 new_itr = IXGBE_100K_ITR;
92915f71
GR
1242 break;
1243 case low_latency:
5f3600eb 1244 new_itr = IXGBE_20K_ITR;
92915f71
GR
1245 break;
1246 case bulk_latency:
1247 default:
5f3600eb 1248 new_itr = IXGBE_8K_ITR;
92915f71
GR
1249 break;
1250 }
1251
5f3600eb 1252 if (new_itr != q_vector->itr) {
92915f71 1253 /* do an exponential smoothing */
5f3600eb
AD
1254 new_itr = (10 * new_itr * q_vector->itr) /
1255 ((9 * new_itr) + q_vector->itr);
1256
1257 /* save the algorithm value here */
1258 q_vector->itr = new_itr;
1259
1260 ixgbevf_write_eitr(q_vector);
92915f71 1261 }
92915f71
GR
1262}
1263
4b2cd27f 1264static irqreturn_t ixgbevf_msix_other(int irq, void *data)
92915f71 1265{
fa71ae27 1266 struct ixgbevf_adapter *adapter = data;
92915f71 1267 struct ixgbe_hw *hw = &adapter->hw;
08259594 1268
4b2cd27f 1269 hw->mac.get_link_status = 1;
1e72bfc3 1270
9ac5c5cc 1271 ixgbevf_service_event_schedule(adapter);
3a2c4033 1272
5f3600eb
AD
1273 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1274
92915f71
GR
1275 return IRQ_HANDLED;
1276}
1277
92915f71 1278/**
fa71ae27 1279 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
92915f71
GR
1280 * @irq: unused
1281 * @data: pointer to our q_vector struct for this interrupt vector
1282 **/
fa71ae27 1283static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
92915f71
GR
1284{
1285 struct ixgbevf_q_vector *q_vector = data;
92915f71 1286
5f3600eb 1287 /* EIAM disabled interrupts (on this vector) for us */
fa71ae27
AD
1288 if (q_vector->rx.ring || q_vector->tx.ring)
1289 napi_schedule(&q_vector->napi);
92915f71
GR
1290
1291 return IRQ_HANDLED;
1292}
1293
1294static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1295 int r_idx)
1296{
1297 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1298
87e70ab9
DS
1299 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1300 q_vector->rx.ring = a->rx_ring[r_idx];
6b43c446 1301 q_vector->rx.count++;
92915f71
GR
1302}
1303
1304static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1305 int t_idx)
1306{
1307 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1308
87e70ab9
DS
1309 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1310 q_vector->tx.ring = a->tx_ring[t_idx];
6b43c446 1311 q_vector->tx.count++;
92915f71
GR
1312}
1313
1314/**
1315 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1316 * @adapter: board private structure to initialize
1317 *
1318 * This function maps descriptor rings to the queue-specific vectors
1319 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1320 * one vector per ring/queue, but on a constrained vector budget, we
1321 * group the rings as "efficiently" as possible. You would add new
1322 * mapping configurations in here.
1323 **/
1324static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1325{
1326 int q_vectors;
1327 int v_start = 0;
1328 int rxr_idx = 0, txr_idx = 0;
1329 int rxr_remaining = adapter->num_rx_queues;
1330 int txr_remaining = adapter->num_tx_queues;
1331 int i, j;
1332 int rqpv, tqpv;
1333 int err = 0;
1334
1335 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1336
dec0d8e4 1337 /* The ideal configuration...
92915f71
GR
1338 * We have enough vectors to map one per queue.
1339 */
1340 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1341 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1342 map_vector_to_rxq(adapter, v_start, rxr_idx);
1343
1344 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1345 map_vector_to_txq(adapter, v_start, txr_idx);
1346 goto out;
1347 }
1348
dec0d8e4 1349 /* If we don't have enough vectors for a 1-to-1
92915f71
GR
1350 * mapping, we'll have to group them so there are
1351 * multiple queues per vector.
1352 */
1353 /* Re-adjusting *qpv takes care of the remainder. */
1354 for (i = v_start; i < q_vectors; i++) {
1355 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1356 for (j = 0; j < rqpv; j++) {
1357 map_vector_to_rxq(adapter, i, rxr_idx);
1358 rxr_idx++;
1359 rxr_remaining--;
1360 }
1361 }
1362 for (i = v_start; i < q_vectors; i++) {
1363 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1364 for (j = 0; j < tqpv; j++) {
1365 map_vector_to_txq(adapter, i, txr_idx);
1366 txr_idx++;
1367 txr_remaining--;
1368 }
1369 }
1370
1371out:
1372 return err;
1373}
1374
1375/**
1376 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1377 * @adapter: board private structure
1378 *
1379 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1380 * interrupts from the kernel.
1381 **/
1382static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1383{
1384 struct net_device *netdev = adapter->netdev;
fa71ae27
AD
1385 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1386 int vector, err;
92915f71
GR
1387 int ri = 0, ti = 0;
1388
92915f71 1389 for (vector = 0; vector < q_vectors; vector++) {
fa71ae27
AD
1390 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1391 struct msix_entry *entry = &adapter->msix_entries[vector];
1392
1393 if (q_vector->tx.ring && q_vector->rx.ring) {
1394 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1395 "%s-%s-%d", netdev->name, "TxRx", ri++);
1396 ti++;
1397 } else if (q_vector->rx.ring) {
1398 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1399 "%s-%s-%d", netdev->name, "rx", ri++);
1400 } else if (q_vector->tx.ring) {
1401 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1402 "%s-%s-%d", netdev->name, "tx", ti++);
92915f71
GR
1403 } else {
1404 /* skip this unused q_vector */
1405 continue;
1406 }
fa71ae27
AD
1407 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1408 q_vector->name, q_vector);
92915f71
GR
1409 if (err) {
1410 hw_dbg(&adapter->hw,
dec0d8e4
JK
1411 "request_irq failed for MSIX interrupt Error: %d\n",
1412 err);
92915f71
GR
1413 goto free_queue_irqs;
1414 }
1415 }
1416
92915f71 1417 err = request_irq(adapter->msix_entries[vector].vector,
4b2cd27f 1418 &ixgbevf_msix_other, 0, netdev->name, adapter);
92915f71 1419 if (err) {
dec0d8e4
JK
1420 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1421 err);
92915f71
GR
1422 goto free_queue_irqs;
1423 }
1424
1425 return 0;
1426
1427free_queue_irqs:
fa71ae27
AD
1428 while (vector) {
1429 vector--;
1430 free_irq(adapter->msix_entries[vector].vector,
1431 adapter->q_vector[vector]);
1432 }
a1f6c6b1 1433 /* This failure is non-recoverable - it indicates the system is
1434 * out of MSIX vector resources and the VF driver cannot run
1435 * without them. Set the number of msix vectors to zero
1436 * indicating that not enough can be allocated. The error
1437 * will be returned to the user indicating device open failed.
1438 * Any further attempts to force the driver to open will also
1439 * fail. The only way to recover is to unload the driver and
1440 * reload it again. If the system has recovered some MSIX
1441 * vectors then it may succeed.
1442 */
1443 adapter->num_msix_vectors = 0;
92915f71
GR
1444 return err;
1445}
1446
1447static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1448{
1449 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1450
1451 for (i = 0; i < q_vectors; i++) {
1452 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
dec0d8e4 1453
6b43c446
AD
1454 q_vector->rx.ring = NULL;
1455 q_vector->tx.ring = NULL;
1456 q_vector->rx.count = 0;
1457 q_vector->tx.count = 0;
92915f71
GR
1458 }
1459}
1460
1461/**
1462 * ixgbevf_request_irq - initialize interrupts
1463 * @adapter: board private structure
1464 *
1465 * Attempts to configure interrupts using the best available
1466 * capabilities of the hardware and kernel.
1467 **/
1468static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1469{
1470 int err = 0;
1471
1472 err = ixgbevf_request_msix_irqs(adapter);
1473
1474 if (err)
dec0d8e4 1475 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
92915f71
GR
1476
1477 return err;
1478}
1479
1480static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1481{
92915f71
GR
1482 int i, q_vectors;
1483
1484 q_vectors = adapter->num_msix_vectors;
92915f71
GR
1485 i = q_vectors - 1;
1486
fa71ae27 1487 free_irq(adapter->msix_entries[i].vector, adapter);
92915f71
GR
1488 i--;
1489
1490 for (; i >= 0; i--) {
fa71ae27
AD
1491 /* free only the irqs that were actually requested */
1492 if (!adapter->q_vector[i]->rx.ring &&
1493 !adapter->q_vector[i]->tx.ring)
1494 continue;
1495
92915f71
GR
1496 free_irq(adapter->msix_entries[i].vector,
1497 adapter->q_vector[i]);
1498 }
1499
1500 ixgbevf_reset_q_vectors(adapter);
1501}
1502
1503/**
1504 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1505 * @adapter: board private structure
1506 **/
1507static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1508{
92915f71 1509 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 1510 int i;
92915f71 1511
5f3600eb 1512 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
92915f71 1513 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
5f3600eb 1514 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
92915f71
GR
1515
1516 IXGBE_WRITE_FLUSH(hw);
1517
1518 for (i = 0; i < adapter->num_msix_vectors; i++)
1519 synchronize_irq(adapter->msix_entries[i].vector);
1520}
1521
1522/**
1523 * ixgbevf_irq_enable - Enable default interrupt generation settings
1524 * @adapter: board private structure
1525 **/
5f3600eb 1526static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
92915f71
GR
1527{
1528 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1529
5f3600eb
AD
1530 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1531 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1532 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
92915f71
GR
1533}
1534
de02decb
DS
1535/**
1536 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1537 * @adapter: board private structure
1538 * @ring: structure containing ring specific data
1539 *
1540 * Configure the Tx descriptor ring after a reset.
1541 **/
1542static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1543 struct ixgbevf_ring *ring)
1544{
1545 struct ixgbe_hw *hw = &adapter->hw;
1546 u64 tdba = ring->dma;
1547 int wait_loop = 10;
1548 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1549 u8 reg_idx = ring->reg_idx;
1550
1551 /* disable queue to avoid issues while updating state */
1552 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1553 IXGBE_WRITE_FLUSH(hw);
1554
1555 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1556 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1557 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1558 ring->count * sizeof(union ixgbe_adv_tx_desc));
1559
1560 /* disable head writeback */
1561 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1562 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1563
1564 /* enable relaxed ordering */
1565 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1566 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1567 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1568
1569 /* reset head and tail pointers */
1570 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1571 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
dbf8b0d8 1572 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
de02decb
DS
1573
1574 /* reset ntu and ntc to place SW in sync with hardwdare */
1575 ring->next_to_clean = 0;
1576 ring->next_to_use = 0;
1577
1578 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1579 * to or less than the number of on chip descriptors, which is
1580 * currently 40.
1581 */
1582 txdctl |= (8 << 16); /* WTHRESH = 8 */
1583
1584 /* Setting PTHRESH to 32 both improves performance */
1585 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1586 32; /* PTHRESH = 32 */
1587
e08400b7
ET
1588 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1589
de02decb
DS
1590 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1591
1592 /* poll to verify queue is enabled */
1593 do {
1594 usleep_range(1000, 2000);
1595 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1596 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1597 if (!wait_loop)
1598 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1599}
1600
92915f71
GR
1601/**
1602 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1603 * @adapter: board private structure
1604 *
1605 * Configure the Tx unit of the MAC after a reset.
1606 **/
1607static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1608{
de02decb 1609 u32 i;
92915f71
GR
1610
1611 /* Setup the HW Tx Head and Tail descriptor pointers */
de02decb
DS
1612 for (i = 0; i < adapter->num_tx_queues; i++)
1613 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
92915f71
GR
1614}
1615
1616#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1617
1618static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1619{
92915f71
GR
1620 struct ixgbe_hw *hw = &adapter->hw;
1621 u32 srrctl;
1622
92915f71
GR
1623 srrctl = IXGBE_SRRCTL_DROP_EN;
1624
bad17234
ET
1625 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1626 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
77d5dfca 1627 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
92915f71 1628
92915f71
GR
1629 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1630}
1631
1bb9c639
DS
1632static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1633{
1634 struct ixgbe_hw *hw = &adapter->hw;
1635
1636 /* PSRTYPE must be initialized in 82599 */
1637 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1638 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1639 IXGBE_PSRTYPE_L2HDR;
1640
1641 if (adapter->num_rx_queues > 1)
1642 psrtype |= 1 << 29;
1643
1644 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1645}
1646
de02decb
DS
1647#define IXGBEVF_MAX_RX_DESC_POLL 10
1648static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1649 struct ixgbevf_ring *ring)
1650{
1651 struct ixgbe_hw *hw = &adapter->hw;
1652 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1653 u32 rxdctl;
1654 u8 reg_idx = ring->reg_idx;
1655
26597802
MR
1656 if (IXGBE_REMOVED(hw->hw_addr))
1657 return;
de02decb
DS
1658 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1659 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1660
1661 /* write value back with RXDCTL.ENABLE bit cleared */
1662 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1663
dec0d8e4 1664 /* the hardware may take up to 100us to really disable the Rx queue */
de02decb
DS
1665 do {
1666 udelay(10);
1667 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1668 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1669
1670 if (!wait_loop)
1671 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1672 reg_idx);
1673}
1674
1675static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1676 struct ixgbevf_ring *ring)
1677{
1678 struct ixgbe_hw *hw = &adapter->hw;
1679 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1680 u32 rxdctl;
1681 u8 reg_idx = ring->reg_idx;
1682
26597802
MR
1683 if (IXGBE_REMOVED(hw->hw_addr))
1684 return;
de02decb
DS
1685 do {
1686 usleep_range(1000, 2000);
1687 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1688 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1689
1690 if (!wait_loop)
1691 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1692 reg_idx);
1693}
1694
9295edb4
ET
1695static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1696{
1697 struct ixgbe_hw *hw = &adapter->hw;
1698 u32 vfmrqc = 0, vfreta = 0;
9295edb4 1699 u16 rss_i = adapter->num_rx_queues;
9cba434f 1700 u8 i, j;
9295edb4
ET
1701
1702 /* Fill out hash function seeds */
9cba434f
ET
1703 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
1704 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1705 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
9295edb4 1706
9cba434f 1707 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
9295edb4
ET
1708 if (j == rss_i)
1709 j = 0;
9cba434f
ET
1710
1711 adapter->rss_indir_tbl[i] = j;
1712
1713 vfreta |= j << (i & 0x3) * 8;
1714 if ((i & 3) == 3) {
9295edb4 1715 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
9cba434f
ET
1716 vfreta = 0;
1717 }
9295edb4
ET
1718 }
1719
1720 /* Perform hash on these packet types */
1721 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1722 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1723 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1724 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1725
1726 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1727
1728 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1729}
1730
de02decb
DS
1731static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1732 struct ixgbevf_ring *ring)
1733{
1734 struct ixgbe_hw *hw = &adapter->hw;
1735 u64 rdba = ring->dma;
1736 u32 rxdctl;
1737 u8 reg_idx = ring->reg_idx;
1738
1739 /* disable queue to avoid issues while updating state */
1740 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1741 ixgbevf_disable_rx_queue(adapter, ring);
1742
1743 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1744 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1745 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1746 ring->count * sizeof(union ixgbe_adv_rx_desc));
1747
1748 /* enable relaxed ordering */
1749 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1750 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1751
1752 /* reset head and tail pointers */
1753 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1754 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
dbf8b0d8 1755 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
de02decb
DS
1756
1757 /* reset ntu and ntc to place SW in sync with hardwdare */
1758 ring->next_to_clean = 0;
1759 ring->next_to_use = 0;
bad17234 1760 ring->next_to_alloc = 0;
de02decb
DS
1761
1762 ixgbevf_configure_srrctl(adapter, reg_idx);
1763
bad17234
ET
1764 /* allow any size packet since we can handle overflow */
1765 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1766
de02decb
DS
1767 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1768 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1769
1770 ixgbevf_rx_desc_queue_enable(adapter, ring);
095e2617 1771 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
de02decb
DS
1772}
1773
92915f71
GR
1774/**
1775 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1776 * @adapter: board private structure
1777 *
1778 * Configure the Rx unit of the MAC after a reset.
1779 **/
1780static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1781{
de02decb 1782 int i;
bad17234
ET
1783 struct ixgbe_hw *hw = &adapter->hw;
1784 struct net_device *netdev = adapter->netdev;
92915f71 1785
1bb9c639 1786 ixgbevf_setup_psrtype(adapter);
9295edb4
ET
1787 if (hw->mac.type >= ixgbe_mac_X550_vf)
1788 ixgbevf_setup_vfmrqc(adapter);
dd1fe113 1789
bad17234
ET
1790 /* notify the PF of our intent to use this size of frame */
1791 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
92915f71 1792
92915f71 1793 /* Setup the HW Rx Head and Tail Descriptor Pointers and
dec0d8e4
JK
1794 * the Base and Length of the Rx Descriptor Ring
1795 */
de02decb
DS
1796 for (i = 0; i < adapter->num_rx_queues; i++)
1797 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
92915f71
GR
1798}
1799
80d5c368
PM
1800static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1801 __be16 proto, u16 vid)
92915f71
GR
1802{
1803 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1804 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1
AD
1805 int err;
1806
55fdd45b 1807 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1808
92915f71 1809 /* add VID to filter table */
2ddc7fe1 1810 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1c55ed76 1811
55fdd45b 1812 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1813
2ddc7fe1
AD
1814 /* translate error return types so error makes sense */
1815 if (err == IXGBE_ERR_MBX)
1816 return -EIO;
1817
1818 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1819 return -EACCES;
1820
dadcd65f 1821 set_bit(vid, adapter->active_vlans);
8e586137 1822
2ddc7fe1 1823 return err;
92915f71
GR
1824}
1825
80d5c368
PM
1826static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1827 __be16 proto, u16 vid)
92915f71
GR
1828{
1829 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1830 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1 1831 int err = -EOPNOTSUPP;
92915f71 1832
55fdd45b 1833 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1834
92915f71 1835 /* remove VID from filter table */
92fe0bf7 1836 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1c55ed76 1837
55fdd45b 1838 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1839
dadcd65f 1840 clear_bit(vid, adapter->active_vlans);
8e586137 1841
2ddc7fe1 1842 return err;
92915f71
GR
1843}
1844
1845static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1846{
dadcd65f 1847 u16 vid;
92915f71 1848
dadcd65f 1849 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
80d5c368
PM
1850 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1851 htons(ETH_P_8021Q), vid);
92915f71
GR
1852}
1853
46ec20ff
GR
1854static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1855{
1856 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1857 struct ixgbe_hw *hw = &adapter->hw;
1858 int count = 0;
1859
1860 if ((netdev_uc_count(netdev)) > 10) {
dbd9636e 1861 pr_err("Too many unicast filters - No Space\n");
46ec20ff
GR
1862 return -ENOSPC;
1863 }
1864
1865 if (!netdev_uc_empty(netdev)) {
1866 struct netdev_hw_addr *ha;
dec0d8e4 1867
46ec20ff
GR
1868 netdev_for_each_uc_addr(ha, netdev) {
1869 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1870 udelay(200);
1871 }
1872 } else {
dec0d8e4
JK
1873 /* If the list is empty then send message to PF driver to
1874 * clear all MAC VLANs on this VF.
46ec20ff
GR
1875 */
1876 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1877 }
1878
1879 return count;
1880}
1881
92915f71 1882/**
dee847f5 1883 * ixgbevf_set_rx_mode - Multicast and unicast set
92915f71
GR
1884 * @netdev: network interface device structure
1885 *
1886 * The set_rx_method entry point is called whenever the multicast address
dee847f5
GR
1887 * list, unicast address list or the network interface flags are updated.
1888 * This routine is responsible for configuring the hardware for proper
1889 * multicast mode and configuring requested unicast filters.
92915f71
GR
1890 **/
1891static void ixgbevf_set_rx_mode(struct net_device *netdev)
1892{
1893 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1894 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1895
55fdd45b 1896 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1897
92915f71 1898 /* reprogram multicast list */
92fe0bf7 1899 hw->mac.ops.update_mc_addr_list(hw, netdev);
46ec20ff
GR
1900
1901 ixgbevf_write_uc_addr_list(netdev);
1c55ed76 1902
55fdd45b 1903 spin_unlock_bh(&adapter->mbx_lock);
92915f71
GR
1904}
1905
1906static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1907{
1908 int q_idx;
1909 struct ixgbevf_q_vector *q_vector;
1910 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1911
1912 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
92915f71 1913 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
1914#ifdef CONFIG_NET_RX_BUSY_POLL
1915 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1916#endif
fa71ae27 1917 napi_enable(&q_vector->napi);
92915f71
GR
1918 }
1919}
1920
1921static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1922{
1923 int q_idx;
1924 struct ixgbevf_q_vector *q_vector;
1925 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1926
1927 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1928 q_vector = adapter->q_vector[q_idx];
92915f71 1929 napi_disable(&q_vector->napi);
c777cdfa
JK
1930#ifdef CONFIG_NET_RX_BUSY_POLL
1931 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1932 pr_info("QV %d locked\n", q_idx);
1933 usleep_range(1000, 20000);
1934 }
1935#endif /* CONFIG_NET_RX_BUSY_POLL */
92915f71
GR
1936 }
1937}
1938
220fe050
DS
1939static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1940{
1941 struct ixgbe_hw *hw = &adapter->hw;
1942 unsigned int def_q = 0;
1943 unsigned int num_tcs = 0;
2dc571aa
ET
1944 unsigned int num_rx_queues = adapter->num_rx_queues;
1945 unsigned int num_tx_queues = adapter->num_tx_queues;
220fe050
DS
1946 int err;
1947
1948 spin_lock_bh(&adapter->mbx_lock);
1949
1950 /* fetch queue configuration from the PF */
1951 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1952
1953 spin_unlock_bh(&adapter->mbx_lock);
1954
1955 if (err)
1956 return err;
1957
1958 if (num_tcs > 1) {
2dc571aa
ET
1959 /* we need only one Tx queue */
1960 num_tx_queues = 1;
1961
220fe050 1962 /* update default Tx ring register index */
87e70ab9 1963 adapter->tx_ring[0]->reg_idx = def_q;
220fe050
DS
1964
1965 /* we need as many queues as traffic classes */
1966 num_rx_queues = num_tcs;
1967 }
1968
1969 /* if we have a bad config abort request queue reset */
2dc571aa
ET
1970 if ((adapter->num_rx_queues != num_rx_queues) ||
1971 (adapter->num_tx_queues != num_tx_queues)) {
220fe050
DS
1972 /* force mailbox timeout to prevent further messages */
1973 hw->mbx.timeout = 0;
1974
1975 /* wait for watchdog to come around and bail us out */
1976 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1977 }
1978
1979 return 0;
1980}
1981
92915f71
GR
1982static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1983{
220fe050
DS
1984 ixgbevf_configure_dcb(adapter);
1985
de02decb 1986 ixgbevf_set_rx_mode(adapter->netdev);
92915f71
GR
1987
1988 ixgbevf_restore_vlan(adapter);
1989
1990 ixgbevf_configure_tx(adapter);
1991 ixgbevf_configure_rx(adapter);
92915f71
GR
1992}
1993
33bd9f60
GR
1994static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1995{
1996 /* Only save pre-reset stats if there are some */
1997 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1998 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1999 adapter->stats.base_vfgprc;
2000 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2001 adapter->stats.base_vfgptc;
2002 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2003 adapter->stats.base_vfgorc;
2004 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2005 adapter->stats.base_vfgotc;
2006 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2007 adapter->stats.base_vfmprc;
2008 }
2009}
2010
2011static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2012{
2013 struct ixgbe_hw *hw = &adapter->hw;
2014
2015 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2016 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2017 adapter->stats.last_vfgorc |=
2018 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2019 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2020 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2021 adapter->stats.last_vfgotc |=
2022 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2023 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2024
2025 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2026 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2027 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2028 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2029 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2030}
2031
31186785
AD
2032static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2033{
2034 struct ixgbe_hw *hw = &adapter->hw;
94cf66f8
VZ
2035 int api[] = { ixgbe_mbox_api_12,
2036 ixgbe_mbox_api_11,
56e94095 2037 ixgbe_mbox_api_10,
31186785
AD
2038 ixgbe_mbox_api_unknown };
2039 int err = 0, idx = 0;
2040
55fdd45b 2041 spin_lock_bh(&adapter->mbx_lock);
31186785
AD
2042
2043 while (api[idx] != ixgbe_mbox_api_unknown) {
2044 err = ixgbevf_negotiate_api_version(hw, api[idx]);
2045 if (!err)
2046 break;
2047 idx++;
2048 }
2049
55fdd45b 2050 spin_unlock_bh(&adapter->mbx_lock);
31186785
AD
2051}
2052
795180d8 2053static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
92915f71
GR
2054{
2055 struct net_device *netdev = adapter->netdev;
2056 struct ixgbe_hw *hw = &adapter->hw;
92915f71
GR
2057
2058 ixgbevf_configure_msix(adapter);
2059
55fdd45b 2060 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 2061
92fe0bf7
GR
2062 if (is_valid_ether_addr(hw->mac.addr))
2063 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2064 else
2065 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
92915f71 2066
55fdd45b 2067 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 2068
4e857c58 2069 smp_mb__before_atomic();
92915f71
GR
2070 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2071 ixgbevf_napi_enable_all(adapter);
2072
d9bdb57f
ET
2073 /* clear any pending interrupts, may auto mask */
2074 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2075 ixgbevf_irq_enable(adapter);
2076
92915f71
GR
2077 /* enable transmits */
2078 netif_tx_start_all_queues(netdev);
2079
33bd9f60
GR
2080 ixgbevf_save_reset_stats(adapter);
2081 ixgbevf_init_last_counter_stats(adapter);
2082
4b2cd27f 2083 hw->mac.get_link_status = 1;
9ac5c5cc 2084 mod_timer(&adapter->service_timer, jiffies);
92915f71
GR
2085}
2086
795180d8 2087void ixgbevf_up(struct ixgbevf_adapter *adapter)
92915f71 2088{
92915f71
GR
2089 ixgbevf_configure(adapter);
2090
795180d8 2091 ixgbevf_up_complete(adapter);
92915f71
GR
2092}
2093
2094/**
2095 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
92915f71
GR
2096 * @rx_ring: ring to free buffers from
2097 **/
05d063aa 2098static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
92915f71 2099{
bad17234 2100 struct device *dev = rx_ring->dev;
92915f71
GR
2101 unsigned long size;
2102 unsigned int i;
2103
bad17234
ET
2104 /* Free Rx ring sk_buff */
2105 if (rx_ring->skb) {
2106 dev_kfree_skb(rx_ring->skb);
2107 rx_ring->skb = NULL;
2108 }
2109
2110 /* ring already cleared, nothing to do */
c0456c23
GR
2111 if (!rx_ring->rx_buffer_info)
2112 return;
92915f71 2113
bad17234 2114 /* Free all the Rx ring pages */
92915f71 2115 for (i = 0; i < rx_ring->count; i++) {
bad17234 2116 struct ixgbevf_rx_buffer *rx_buffer;
92915f71 2117
bad17234
ET
2118 rx_buffer = &rx_ring->rx_buffer_info[i];
2119 if (rx_buffer->dma)
2120 dma_unmap_page(dev, rx_buffer->dma,
2121 PAGE_SIZE, DMA_FROM_DEVICE);
2122 rx_buffer->dma = 0;
2123 if (rx_buffer->page)
2124 __free_page(rx_buffer->page);
2125 rx_buffer->page = NULL;
92915f71
GR
2126 }
2127
2128 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2129 memset(rx_ring->rx_buffer_info, 0, size);
2130
2131 /* Zero out the descriptor ring */
2132 memset(rx_ring->desc, 0, rx_ring->size);
92915f71
GR
2133}
2134
2135/**
2136 * ixgbevf_clean_tx_ring - Free Tx Buffers
92915f71
GR
2137 * @tx_ring: ring to be cleaned
2138 **/
05d063aa 2139static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
92915f71
GR
2140{
2141 struct ixgbevf_tx_buffer *tx_buffer_info;
2142 unsigned long size;
2143 unsigned int i;
2144
c0456c23
GR
2145 if (!tx_ring->tx_buffer_info)
2146 return;
2147
92915f71 2148 /* Free all the Tx ring sk_buffs */
92915f71
GR
2149 for (i = 0; i < tx_ring->count; i++) {
2150 tx_buffer_info = &tx_ring->tx_buffer_info[i];
70a10e25 2151 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
92915f71
GR
2152 }
2153
2154 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2155 memset(tx_ring->tx_buffer_info, 0, size);
2156
2157 memset(tx_ring->desc, 0, tx_ring->size);
92915f71
GR
2158}
2159
2160/**
2161 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2162 * @adapter: board private structure
2163 **/
2164static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2165{
2166 int i;
2167
2168 for (i = 0; i < adapter->num_rx_queues; i++)
05d063aa 2169 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
92915f71
GR
2170}
2171
2172/**
2173 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2174 * @adapter: board private structure
2175 **/
2176static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2177{
2178 int i;
2179
2180 for (i = 0; i < adapter->num_tx_queues; i++)
05d063aa 2181 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
92915f71
GR
2182}
2183
2184void ixgbevf_down(struct ixgbevf_adapter *adapter)
2185{
2186 struct net_device *netdev = adapter->netdev;
2187 struct ixgbe_hw *hw = &adapter->hw;
de02decb 2188 int i;
92915f71
GR
2189
2190 /* signal that we are down to the interrupt handler */
5b346dc9
MR
2191 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2192 return; /* do nothing if already down */
858c3dda 2193
dec0d8e4 2194 /* disable all enabled Rx queues */
858c3dda 2195 for (i = 0; i < adapter->num_rx_queues; i++)
87e70ab9 2196 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
92915f71 2197
d9bdb57f 2198 usleep_range(10000, 20000);
92915f71
GR
2199
2200 netif_tx_stop_all_queues(netdev);
2201
d9bdb57f
ET
2202 /* call carrier off first to avoid false dev_watchdog timeouts */
2203 netif_carrier_off(netdev);
2204 netif_tx_disable(netdev);
2205
92915f71
GR
2206 ixgbevf_irq_disable(adapter);
2207
2208 ixgbevf_napi_disable_all(adapter);
2209
9ac5c5cc 2210 del_timer_sync(&adapter->service_timer);
92915f71
GR
2211
2212 /* disable transmits in the hardware now that interrupts are off */
2213 for (i = 0; i < adapter->num_tx_queues; i++) {
de02decb
DS
2214 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2215
2216 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2217 IXGBE_TXDCTL_SWFLSH);
92915f71
GR
2218 }
2219
92915f71
GR
2220 if (!pci_channel_offline(adapter->pdev))
2221 ixgbevf_reset(adapter);
2222
2223 ixgbevf_clean_all_tx_rings(adapter);
2224 ixgbevf_clean_all_rx_rings(adapter);
2225}
2226
2227void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2228{
2229 WARN_ON(in_interrupt());
c0456c23 2230
92915f71
GR
2231 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2232 msleep(1);
2233
4b2cd27f
AD
2234 ixgbevf_down(adapter);
2235 ixgbevf_up(adapter);
92915f71
GR
2236
2237 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2238}
2239
2240void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2241{
2242 struct ixgbe_hw *hw = &adapter->hw;
2243 struct net_device *netdev = adapter->netdev;
2244
798e381a 2245 if (hw->mac.ops.reset_hw(hw)) {
92915f71 2246 hw_dbg(hw, "PF still resetting\n");
798e381a 2247 } else {
92915f71 2248 hw->mac.ops.init_hw(hw);
798e381a
DS
2249 ixgbevf_negotiate_api(adapter);
2250 }
92915f71
GR
2251
2252 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2253 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2254 netdev->addr_len);
2255 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2256 netdev->addr_len);
2257 }
e66c92ad
ET
2258
2259 adapter->last_reset = jiffies;
92915f71
GR
2260}
2261
e45dd5fe
JK
2262static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2263 int vectors)
92915f71 2264{
a5f9337b 2265 int vector_threshold;
92915f71 2266
fa71ae27
AD
2267 /* We'll want at least 2 (vector_threshold):
2268 * 1) TxQ[0] + RxQ[0] handler
2269 * 2) Other (Link Status Change, etc.)
92915f71
GR
2270 */
2271 vector_threshold = MIN_MSIX_COUNT;
2272
2273 /* The more we get, the more we will assign to Tx/Rx Cleanup
2274 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2275 * Right now, we simply care about how many we'll get; we'll
2276 * set them up later while requesting irq's.
2277 */
5c1e3588
AG
2278 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2279 vector_threshold, vectors);
92915f71 2280
5c1e3588 2281 if (vectors < 0) {
e45dd5fe
JK
2282 dev_err(&adapter->pdev->dev,
2283 "Unable to allocate MSI-X interrupts\n");
92915f71
GR
2284 kfree(adapter->msix_entries);
2285 adapter->msix_entries = NULL;
5c1e3588 2286 return vectors;
92915f71 2287 }
dee847f5 2288
5c1e3588
AG
2289 /* Adjust for only the vectors we'll use, which is minimum
2290 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2291 * vectors we were allocated.
2292 */
2293 adapter->num_msix_vectors = vectors;
2294
2295 return 0;
92915f71
GR
2296}
2297
49ce9c2c
BH
2298/**
2299 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
92915f71
GR
2300 * @adapter: board private structure to initialize
2301 *
2302 * This is the top level queue allocation routine. The order here is very
2303 * important, starting with the "most" number of features turned on at once,
2304 * and ending with the smallest set of features. This way large combinations
2305 * can be allocated if they're turned on, and smaller combinations are the
2306 * fallthrough conditions.
2307 *
2308 **/
2309static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2310{
220fe050
DS
2311 struct ixgbe_hw *hw = &adapter->hw;
2312 unsigned int def_q = 0;
2313 unsigned int num_tcs = 0;
2314 int err;
2315
92915f71
GR
2316 /* Start with base case */
2317 adapter->num_rx_queues = 1;
2318 adapter->num_tx_queues = 1;
220fe050
DS
2319
2320 spin_lock_bh(&adapter->mbx_lock);
2321
2322 /* fetch queue configuration from the PF */
2323 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2324
2325 spin_unlock_bh(&adapter->mbx_lock);
2326
2327 if (err)
2328 return;
2329
2330 /* we need as many queues as traffic classes */
2dc571aa 2331 if (num_tcs > 1) {
220fe050 2332 adapter->num_rx_queues = num_tcs;
2dc571aa
ET
2333 } else {
2334 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2335
2336 switch (hw->api_version) {
2337 case ixgbe_mbox_api_11:
94cf66f8 2338 case ixgbe_mbox_api_12:
2dc571aa
ET
2339 adapter->num_rx_queues = rss;
2340 adapter->num_tx_queues = rss;
2341 default:
2342 break;
2343 }
2344 }
92915f71
GR
2345}
2346
2347/**
2348 * ixgbevf_alloc_queues - Allocate memory for all rings
2349 * @adapter: board private structure to initialize
2350 *
2351 * We allocate one ring per queue at run-time since we don't know the
2352 * number of queues at compile-time. The polling_netdev array is
2353 * intended for Multiqueue, but should work fine with a single queue.
2354 **/
2355static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2356{
87e70ab9
DS
2357 struct ixgbevf_ring *ring;
2358 int rx = 0, tx = 0;
92915f71 2359
87e70ab9
DS
2360 for (; tx < adapter->num_tx_queues; tx++) {
2361 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2362 if (!ring)
2363 goto err_allocation;
92915f71 2364
87e70ab9
DS
2365 ring->dev = &adapter->pdev->dev;
2366 ring->netdev = adapter->netdev;
2367 ring->count = adapter->tx_ring_count;
2368 ring->queue_index = tx;
2369 ring->reg_idx = tx;
92915f71 2370
87e70ab9 2371 adapter->tx_ring[tx] = ring;
92915f71
GR
2372 }
2373
87e70ab9
DS
2374 for (; rx < adapter->num_rx_queues; rx++) {
2375 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2376 if (!ring)
2377 goto err_allocation;
2378
2379 ring->dev = &adapter->pdev->dev;
2380 ring->netdev = adapter->netdev;
2381
2382 ring->count = adapter->rx_ring_count;
2383 ring->queue_index = rx;
2384 ring->reg_idx = rx;
2385
2386 adapter->rx_ring[rx] = ring;
92915f71
GR
2387 }
2388
2389 return 0;
2390
87e70ab9
DS
2391err_allocation:
2392 while (tx) {
2393 kfree(adapter->tx_ring[--tx]);
2394 adapter->tx_ring[tx] = NULL;
2395 }
2396
2397 while (rx) {
2398 kfree(adapter->rx_ring[--rx]);
2399 adapter->rx_ring[rx] = NULL;
2400 }
92915f71
GR
2401 return -ENOMEM;
2402}
2403
2404/**
2405 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2406 * @adapter: board private structure to initialize
2407 *
2408 * Attempt to configure the interrupts using the best available
2409 * capabilities of the hardware and the kernel.
2410 **/
2411static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2412{
91e2b89b 2413 struct net_device *netdev = adapter->netdev;
92915f71
GR
2414 int err = 0;
2415 int vector, v_budget;
2416
dec0d8e4 2417 /* It's easy to be greedy for MSI-X vectors, but it really
92915f71
GR
2418 * doesn't do us much good if we have a lot more vectors
2419 * than CPU's. So let's be conservative and only ask for
fa71ae27
AD
2420 * (roughly) the same number of vectors as there are CPU's.
2421 * The default is to use pairs of vectors.
92915f71 2422 */
fa71ae27
AD
2423 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2424 v_budget = min_t(int, v_budget, num_online_cpus());
2425 v_budget += NON_Q_VECTORS;
92915f71
GR
2426
2427 /* A failure in MSI-X entry allocation isn't fatal, but it does
dec0d8e4
JK
2428 * mean we disable MSI-X capabilities of the adapter.
2429 */
92915f71
GR
2430 adapter->msix_entries = kcalloc(v_budget,
2431 sizeof(struct msix_entry), GFP_KERNEL);
2432 if (!adapter->msix_entries) {
2433 err = -ENOMEM;
2434 goto out;
2435 }
2436
2437 for (vector = 0; vector < v_budget; vector++)
2438 adapter->msix_entries[vector].entry = vector;
2439
e45dd5fe
JK
2440 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2441 if (err)
2442 goto out;
92915f71 2443
91e2b89b
GR
2444 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2445 if (err)
2446 goto out;
2447
2448 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2449
92915f71
GR
2450out:
2451 return err;
2452}
2453
2454/**
2455 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2456 * @adapter: board private structure to initialize
2457 *
2458 * We allocate one q_vector per queue interrupt. If allocation fails we
2459 * return -ENOMEM.
2460 **/
2461static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2462{
2463 int q_idx, num_q_vectors;
2464 struct ixgbevf_q_vector *q_vector;
92915f71
GR
2465
2466 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
2467
2468 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2469 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2470 if (!q_vector)
2471 goto err_out;
2472 q_vector->adapter = adapter;
2473 q_vector->v_idx = q_idx;
fa71ae27
AD
2474 netif_napi_add(adapter->netdev, &q_vector->napi,
2475 ixgbevf_poll, 64);
c777cdfa
JK
2476#ifdef CONFIG_NET_RX_BUSY_POLL
2477 napi_hash_add(&q_vector->napi);
2478#endif
92915f71
GR
2479 adapter->q_vector[q_idx] = q_vector;
2480 }
2481
2482 return 0;
2483
2484err_out:
2485 while (q_idx) {
2486 q_idx--;
2487 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
2488#ifdef CONFIG_NET_RX_BUSY_POLL
2489 napi_hash_del(&q_vector->napi);
2490#endif
92915f71
GR
2491 netif_napi_del(&q_vector->napi);
2492 kfree(q_vector);
2493 adapter->q_vector[q_idx] = NULL;
2494 }
2495 return -ENOMEM;
2496}
2497
2498/**
2499 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2500 * @adapter: board private structure to initialize
2501 *
2502 * This function frees the memory allocated to the q_vectors. In addition if
2503 * NAPI is enabled it will delete any references to the NAPI struct prior
2504 * to freeing the q_vector.
2505 **/
2506static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2507{
f4477702 2508 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
2509
2510 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2511 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2512
2513 adapter->q_vector[q_idx] = NULL;
c777cdfa
JK
2514#ifdef CONFIG_NET_RX_BUSY_POLL
2515 napi_hash_del(&q_vector->napi);
2516#endif
f4477702 2517 netif_napi_del(&q_vector->napi);
92915f71
GR
2518 kfree(q_vector);
2519 }
2520}
2521
2522/**
2523 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2524 * @adapter: board private structure
2525 *
2526 **/
2527static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2528{
2529 pci_disable_msix(adapter->pdev);
2530 kfree(adapter->msix_entries);
2531 adapter->msix_entries = NULL;
92915f71
GR
2532}
2533
2534/**
2535 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2536 * @adapter: board private structure to initialize
2537 *
2538 **/
2539static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2540{
2541 int err;
2542
2543 /* Number of supported queues */
2544 ixgbevf_set_num_queues(adapter);
2545
2546 err = ixgbevf_set_interrupt_capability(adapter);
2547 if (err) {
2548 hw_dbg(&adapter->hw,
2549 "Unable to setup interrupt capabilities\n");
2550 goto err_set_interrupt;
2551 }
2552
2553 err = ixgbevf_alloc_q_vectors(adapter);
2554 if (err) {
dec0d8e4 2555 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
92915f71
GR
2556 goto err_alloc_q_vectors;
2557 }
2558
2559 err = ixgbevf_alloc_queues(adapter);
2560 if (err) {
dbd9636e 2561 pr_err("Unable to allocate memory for queues\n");
92915f71
GR
2562 goto err_alloc_queues;
2563 }
2564
dec0d8e4 2565 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
92915f71
GR
2566 (adapter->num_rx_queues > 1) ? "Enabled" :
2567 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2568
2569 set_bit(__IXGBEVF_DOWN, &adapter->state);
2570
2571 return 0;
2572err_alloc_queues:
2573 ixgbevf_free_q_vectors(adapter);
2574err_alloc_q_vectors:
2575 ixgbevf_reset_interrupt_capability(adapter);
2576err_set_interrupt:
2577 return err;
2578}
2579
0ac1e8ce
AD
2580/**
2581 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2582 * @adapter: board private structure to clear interrupt scheme on
2583 *
2584 * We go through and clear interrupt specific resources and reset the structure
2585 * to pre-load conditions
2586 **/
2587static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2588{
87e70ab9
DS
2589 int i;
2590
2591 for (i = 0; i < adapter->num_tx_queues; i++) {
2592 kfree(adapter->tx_ring[i]);
2593 adapter->tx_ring[i] = NULL;
2594 }
2595 for (i = 0; i < adapter->num_rx_queues; i++) {
2596 kfree(adapter->rx_ring[i]);
2597 adapter->rx_ring[i] = NULL;
2598 }
2599
0ac1e8ce
AD
2600 adapter->num_tx_queues = 0;
2601 adapter->num_rx_queues = 0;
2602
2603 ixgbevf_free_q_vectors(adapter);
2604 ixgbevf_reset_interrupt_capability(adapter);
2605}
2606
92915f71
GR
2607/**
2608 * ixgbevf_sw_init - Initialize general software structures
92915f71
GR
2609 * @adapter: board private structure to initialize
2610 *
2611 * ixgbevf_sw_init initializes the Adapter private data structure.
2612 * Fields are initialized based on PCI device information and
2613 * OS network device settings (MTU size).
2614 **/
9f9a12f8 2615static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
92915f71
GR
2616{
2617 struct ixgbe_hw *hw = &adapter->hw;
2618 struct pci_dev *pdev = adapter->pdev;
e1941a74 2619 struct net_device *netdev = adapter->netdev;
92915f71
GR
2620 int err;
2621
2622 /* PCI config space info */
92915f71
GR
2623 hw->vendor_id = pdev->vendor;
2624 hw->device_id = pdev->device;
ff938e43 2625 hw->revision_id = pdev->revision;
92915f71
GR
2626 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2627 hw->subsystem_device_id = pdev->subsystem_device;
2628
2629 hw->mbx.ops.init_params(hw);
56e94095
AD
2630
2631 /* assume legacy case in which PF would only give VF 2 queues */
2632 hw->mac.max_tx_queues = 2;
2633 hw->mac.max_rx_queues = 2;
2634
798e381a
DS
2635 /* lock to protect mailbox accesses */
2636 spin_lock_init(&adapter->mbx_lock);
2637
92915f71
GR
2638 err = hw->mac.ops.reset_hw(hw);
2639 if (err) {
2640 dev_info(&pdev->dev,
e1941a74 2641 "PF still in reset state. Is the PF interface up?\n");
92915f71
GR
2642 } else {
2643 err = hw->mac.ops.init_hw(hw);
2644 if (err) {
dbd9636e 2645 pr_err("init_shared_code failed: %d\n", err);
92915f71
GR
2646 goto out;
2647 }
798e381a 2648 ixgbevf_negotiate_api(adapter);
e1941a74
GR
2649 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2650 if (err)
2651 dev_info(&pdev->dev, "Error reading MAC address\n");
2652 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2653 dev_info(&pdev->dev,
2654 "MAC address not assigned by administrator.\n");
2655 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2656 }
2657
2658 if (!is_valid_ether_addr(netdev->dev_addr)) {
2659 dev_info(&pdev->dev, "Assigning random MAC address\n");
2660 eth_hw_addr_random(netdev);
2661 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
92915f71
GR
2662 }
2663
2664 /* Enable dynamic interrupt throttling rates */
5f3600eb
AD
2665 adapter->rx_itr_setting = 1;
2666 adapter->tx_itr_setting = 1;
92915f71 2667
92915f71
GR
2668 /* set default ring sizes */
2669 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2670 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2671
92915f71 2672 set_bit(__IXGBEVF_DOWN, &adapter->state);
1a0d6ae5 2673 return 0;
92915f71
GR
2674
2675out:
2676 return err;
2677}
2678
92915f71
GR
2679#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2680 { \
2681 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2682 if (current_counter < last_counter) \
2683 counter += 0x100000000LL; \
2684 last_counter = current_counter; \
2685 counter &= 0xFFFFFFFF00000000LL; \
2686 counter |= current_counter; \
2687 }
2688
2689#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2690 { \
2691 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2692 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
dec0d8e4
JK
2693 u64 current_counter = (current_counter_msb << 32) | \
2694 current_counter_lsb; \
92915f71
GR
2695 if (current_counter < last_counter) \
2696 counter += 0x1000000000LL; \
2697 last_counter = current_counter; \
2698 counter &= 0xFFFFFFF000000000LL; \
2699 counter |= current_counter; \
2700 }
2701/**
2702 * ixgbevf_update_stats - Update the board statistics counters.
2703 * @adapter: board private structure
2704 **/
2705void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2706{
2707 struct ixgbe_hw *hw = &adapter->hw;
55fb277c 2708 int i;
92915f71 2709
e66c92ad
ET
2710 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2711 test_bit(__IXGBEVF_RESETTING, &adapter->state))
088245a3
GR
2712 return;
2713
92915f71
GR
2714 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2715 adapter->stats.vfgprc);
2716 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2717 adapter->stats.vfgptc);
2718 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2719 adapter->stats.last_vfgorc,
2720 adapter->stats.vfgorc);
2721 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2722 adapter->stats.last_vfgotc,
2723 adapter->stats.vfgotc);
2724 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2725 adapter->stats.vfmprc);
55fb277c
GR
2726
2727 for (i = 0; i < adapter->num_rx_queues; i++) {
2728 adapter->hw_csum_rx_error +=
87e70ab9 2729 adapter->rx_ring[i]->hw_csum_rx_error;
87e70ab9 2730 adapter->rx_ring[i]->hw_csum_rx_error = 0;
55fb277c 2731 }
92915f71
GR
2732}
2733
2734/**
9ac5c5cc 2735 * ixgbevf_service_timer - Timer Call-back
92915f71
GR
2736 * @data: pointer to adapter cast into an unsigned long
2737 **/
9ac5c5cc 2738static void ixgbevf_service_timer(unsigned long data)
92915f71
GR
2739{
2740 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
e66c92ad 2741
9ac5c5cc
ET
2742 /* Reset the timer */
2743 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2744
2745 ixgbevf_service_event_schedule(adapter);
e66c92ad
ET
2746}
2747
9ac5c5cc 2748static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
e66c92ad 2749{
9ac5c5cc
ET
2750 if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
2751 return;
e66c92ad 2752
9ac5c5cc 2753 adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
e66c92ad
ET
2754
2755 /* If we're already down or resetting, just bail */
2756 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2757 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2758 return;
2759
2760 adapter->tx_timeout_count++;
2761
2762 ixgbevf_reinit_locked(adapter);
2763}
2764
dec0d8e4
JK
2765/**
2766 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2767 * @adapter: pointer to the device adapter structure
e66c92ad
ET
2768 *
2769 * This function serves two purposes. First it strobes the interrupt lines
2770 * in order to make certain interrupts are occurring. Secondly it sets the
2771 * bits needed to check for TX hangs. As a result we should immediately
2772 * determine if a hang has occurred.
dec0d8e4 2773 **/
e66c92ad
ET
2774static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2775{
92915f71 2776 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 2777 u32 eics = 0;
92915f71
GR
2778 int i;
2779
e66c92ad
ET
2780 /* If we're down or resetting, just bail */
2781 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2782 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2783 return;
92915f71 2784
e08400b7
ET
2785 /* Force detection of hung controller */
2786 if (netif_carrier_ok(adapter->netdev)) {
2787 for (i = 0; i < adapter->num_tx_queues; i++)
2788 set_check_for_tx_hang(adapter->tx_ring[i]);
2789 }
2790
dec0d8e4 2791 /* get one bit for every active Tx/Rx interrupt vector */
92915f71
GR
2792 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2793 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
9ac5c5cc 2794
6b43c446 2795 if (qv->rx.ring || qv->tx.ring)
5f3600eb 2796 eics |= 1 << i;
92915f71
GR
2797 }
2798
e66c92ad 2799 /* Cause software interrupt to ensure rings are cleaned */
5f3600eb 2800 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
e66c92ad 2801}
92915f71 2802
e66c92ad
ET
2803/**
2804 * ixgbevf_watchdog_update_link - update the link status
dec0d8e4 2805 * @adapter: pointer to the device adapter structure
e66c92ad
ET
2806 **/
2807static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
2808{
2809 struct ixgbe_hw *hw = &adapter->hw;
2810 u32 link_speed = adapter->link_speed;
2811 bool link_up = adapter->link_up;
2812 s32 err;
2813
2814 spin_lock_bh(&adapter->mbx_lock);
2815
2816 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2817
2818 spin_unlock_bh(&adapter->mbx_lock);
2819
2820 /* if check for link returns error we will need to reset */
2821 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
9ac5c5cc 2822 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
e66c92ad
ET
2823 link_up = false;
2824 }
2825
2826 adapter->link_up = link_up;
2827 adapter->link_speed = link_speed;
92915f71
GR
2828}
2829
e66c92ad
ET
2830/**
2831 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2832 * print link up message
dec0d8e4 2833 * @adapter: pointer to the device adapter structure
e66c92ad
ET
2834 **/
2835static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
92915f71 2836{
e66c92ad 2837 struct net_device *netdev = adapter->netdev;
92915f71 2838
e66c92ad
ET
2839 /* only continue if link was previously down */
2840 if (netif_carrier_ok(netdev))
92915f71
GR
2841 return;
2842
e66c92ad
ET
2843 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2844 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2845 "10 Gbps" :
2846 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2847 "1 Gbps" :
2848 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2849 "100 Mbps" :
2850 "unknown speed");
92915f71 2851
e66c92ad
ET
2852 netif_carrier_on(netdev);
2853}
2854
2855/**
2856 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2857 * print link down message
dec0d8e4 2858 * @adapter: pointer to the adapter structure
e66c92ad
ET
2859 **/
2860static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2861{
2862 struct net_device *netdev = adapter->netdev;
2863
2864 adapter->link_speed = 0;
2865
2866 /* only continue if link was up previously */
2867 if (!netif_carrier_ok(netdev))
2868 return;
2869
2870 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2871
2872 netif_carrier_off(netdev);
92915f71
GR
2873}
2874
2875/**
9ac5c5cc
ET
2876 * ixgbevf_watchdog_subtask - worker thread to bring link up
2877 * @work: pointer to work_struct containing our data
2878 **/
2879static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2880{
2881 /* if interface is down do nothing */
2882 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2883 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2884 return;
2885
2886 ixgbevf_watchdog_update_link(adapter);
2887
2888 if (adapter->link_up)
2889 ixgbevf_watchdog_link_is_up(adapter);
2890 else
2891 ixgbevf_watchdog_link_is_down(adapter);
2892
2893 ixgbevf_update_stats(adapter);
2894}
2895
2896/**
2897 * ixgbevf_service_task - manages and runs subtasks
92915f71
GR
2898 * @work: pointer to work_struct containing our data
2899 **/
9ac5c5cc 2900static void ixgbevf_service_task(struct work_struct *work)
92915f71
GR
2901{
2902 struct ixgbevf_adapter *adapter = container_of(work,
2903 struct ixgbevf_adapter,
9ac5c5cc 2904 service_task);
92915f71 2905 struct ixgbe_hw *hw = &adapter->hw;
92915f71 2906
26597802
MR
2907 if (IXGBE_REMOVED(hw->hw_addr)) {
2908 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2909 rtnl_lock();
2910 ixgbevf_down(adapter);
2911 rtnl_unlock();
2912 }
2913 return;
2914 }
e66c92ad 2915
220fe050 2916 ixgbevf_queue_reset_subtask(adapter);
9ac5c5cc
ET
2917 ixgbevf_reset_subtask(adapter);
2918 ixgbevf_watchdog_subtask(adapter);
e66c92ad
ET
2919 ixgbevf_check_hang_subtask(adapter);
2920
9ac5c5cc 2921 ixgbevf_service_event_complete(adapter);
92915f71
GR
2922}
2923
2924/**
2925 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
92915f71
GR
2926 * @tx_ring: Tx descriptor ring for a specific queue
2927 *
2928 * Free all transmit software resources
2929 **/
05d063aa 2930void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
92915f71 2931{
05d063aa 2932 ixgbevf_clean_tx_ring(tx_ring);
92915f71
GR
2933
2934 vfree(tx_ring->tx_buffer_info);
2935 tx_ring->tx_buffer_info = NULL;
2936
de02decb
DS
2937 /* if not set, then don't free */
2938 if (!tx_ring->desc)
2939 return;
2940
05d063aa 2941 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2a1f8794 2942 tx_ring->dma);
92915f71
GR
2943
2944 tx_ring->desc = NULL;
2945}
2946
2947/**
2948 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2949 * @adapter: board private structure
2950 *
2951 * Free all transmit software resources
2952 **/
2953static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2954{
2955 int i;
2956
2957 for (i = 0; i < adapter->num_tx_queues; i++)
87e70ab9 2958 if (adapter->tx_ring[i]->desc)
05d063aa 2959 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
92915f71
GR
2960}
2961
2962/**
2963 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
dec0d8e4 2964 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
92915f71
GR
2965 *
2966 * Return 0 on success, negative on failure
2967 **/
05d063aa 2968int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
92915f71 2969{
92915f71
GR
2970 int size;
2971
2972 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
89bf67f1 2973 tx_ring->tx_buffer_info = vzalloc(size);
92915f71
GR
2974 if (!tx_ring->tx_buffer_info)
2975 goto err;
92915f71
GR
2976
2977 /* round up to nearest 4K */
2978 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2979 tx_ring->size = ALIGN(tx_ring->size, 4096);
2980
05d063aa 2981 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2a1f8794 2982 &tx_ring->dma, GFP_KERNEL);
92915f71
GR
2983 if (!tx_ring->desc)
2984 goto err;
2985
92915f71
GR
2986 return 0;
2987
2988err:
2989 vfree(tx_ring->tx_buffer_info);
2990 tx_ring->tx_buffer_info = NULL;
dec0d8e4 2991 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
92915f71
GR
2992 return -ENOMEM;
2993}
2994
2995/**
2996 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2997 * @adapter: board private structure
2998 *
2999 * If this function returns with an error, then it's possible one or
3000 * more of the rings is populated (while the rest are not). It is the
3001 * callers duty to clean those orphaned rings.
3002 *
3003 * Return 0 on success, negative on failure
3004 **/
3005static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3006{
3007 int i, err = 0;
3008
3009 for (i = 0; i < adapter->num_tx_queues; i++) {
05d063aa 3010 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
92915f71
GR
3011 if (!err)
3012 continue;
dec0d8e4 3013 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
92915f71
GR
3014 break;
3015 }
3016
3017 return err;
3018}
3019
3020/**
3021 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
dec0d8e4 3022 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
92915f71
GR
3023 *
3024 * Returns 0 on success, negative on failure
3025 **/
05d063aa 3026int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
92915f71 3027{
92915f71
GR
3028 int size;
3029
3030 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
89bf67f1 3031 rx_ring->rx_buffer_info = vzalloc(size);
e404decb 3032 if (!rx_ring->rx_buffer_info)
05d063aa 3033 goto err;
92915f71
GR
3034
3035 /* Round up to nearest 4K */
3036 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3037 rx_ring->size = ALIGN(rx_ring->size, 4096);
3038
05d063aa 3039 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2a1f8794 3040 &rx_ring->dma, GFP_KERNEL);
92915f71 3041
05d063aa
ET
3042 if (!rx_ring->desc)
3043 goto err;
92915f71 3044
92915f71 3045 return 0;
05d063aa
ET
3046err:
3047 vfree(rx_ring->rx_buffer_info);
3048 rx_ring->rx_buffer_info = NULL;
3049 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
92915f71
GR
3050 return -ENOMEM;
3051}
3052
3053/**
3054 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3055 * @adapter: board private structure
3056 *
3057 * If this function returns with an error, then it's possible one or
3058 * more of the rings is populated (while the rest are not). It is the
3059 * callers duty to clean those orphaned rings.
3060 *
3061 * Return 0 on success, negative on failure
3062 **/
3063static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3064{
3065 int i, err = 0;
3066
3067 for (i = 0; i < adapter->num_rx_queues; i++) {
05d063aa 3068 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
92915f71
GR
3069 if (!err)
3070 continue;
dec0d8e4 3071 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
92915f71
GR
3072 break;
3073 }
3074 return err;
3075}
3076
3077/**
3078 * ixgbevf_free_rx_resources - Free Rx Resources
92915f71
GR
3079 * @rx_ring: ring to clean the resources from
3080 *
3081 * Free all receive software resources
3082 **/
05d063aa 3083void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
92915f71 3084{
05d063aa 3085 ixgbevf_clean_rx_ring(rx_ring);
92915f71
GR
3086
3087 vfree(rx_ring->rx_buffer_info);
3088 rx_ring->rx_buffer_info = NULL;
3089
05d063aa 3090 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2a1f8794 3091 rx_ring->dma);
92915f71
GR
3092
3093 rx_ring->desc = NULL;
3094}
3095
3096/**
3097 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3098 * @adapter: board private structure
3099 *
3100 * Free all receive software resources
3101 **/
3102static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3103{
3104 int i;
3105
3106 for (i = 0; i < adapter->num_rx_queues; i++)
87e70ab9 3107 if (adapter->rx_ring[i]->desc)
05d063aa 3108 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
92915f71
GR
3109}
3110
3111/**
3112 * ixgbevf_open - Called when a network interface is made active
3113 * @netdev: network interface device structure
3114 *
3115 * Returns 0 on success, negative value on failure
3116 *
3117 * The open entry point is called when a network interface is made
3118 * active by the system (IFF_UP). At this point all resources needed
3119 * for transmit and receive operations are allocated, the interrupt
3120 * handler is registered with the OS, the watchdog timer is started,
3121 * and the stack is notified that the interface is ready.
3122 **/
3123static int ixgbevf_open(struct net_device *netdev)
3124{
3125 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3126 struct ixgbe_hw *hw = &adapter->hw;
3127 int err;
3128
a1f6c6b1 3129 /* A previous failure to open the device because of a lack of
3130 * available MSIX vector resources may have reset the number
3131 * of msix vectors variable to zero. The only way to recover
3132 * is to unload/reload the driver and hope that the system has
3133 * been able to recover some MSIX vector resources.
3134 */
3135 if (!adapter->num_msix_vectors)
3136 return -ENOMEM;
3137
92915f71
GR
3138 if (hw->adapter_stopped) {
3139 ixgbevf_reset(adapter);
3140 /* if adapter is still stopped then PF isn't up and
dec0d8e4
JK
3141 * the VF can't start.
3142 */
92915f71
GR
3143 if (hw->adapter_stopped) {
3144 err = IXGBE_ERR_MBX;
dec0d8e4 3145 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
92915f71
GR
3146 goto err_setup_reset;
3147 }
3148 }
3149
d9bdb57f
ET
3150 /* disallow open during test */
3151 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3152 return -EBUSY;
3153
3154 netif_carrier_off(netdev);
3155
92915f71
GR
3156 /* allocate transmit descriptors */
3157 err = ixgbevf_setup_all_tx_resources(adapter);
3158 if (err)
3159 goto err_setup_tx;
3160
3161 /* allocate receive descriptors */
3162 err = ixgbevf_setup_all_rx_resources(adapter);
3163 if (err)
3164 goto err_setup_rx;
3165
3166 ixgbevf_configure(adapter);
3167
dec0d8e4 3168 /* Map the Tx/Rx rings to the vectors we were allotted.
92915f71
GR
3169 * if request_irq will be called in this function map_rings
3170 * must be called *before* up_complete
3171 */
3172 ixgbevf_map_rings_to_vectors(adapter);
3173
92915f71
GR
3174 err = ixgbevf_request_irq(adapter);
3175 if (err)
3176 goto err_req_irq;
3177
d9bdb57f 3178 ixgbevf_up_complete(adapter);
92915f71
GR
3179
3180 return 0;
3181
3182err_req_irq:
3183 ixgbevf_down(adapter);
92915f71
GR
3184err_setup_rx:
3185 ixgbevf_free_all_rx_resources(adapter);
3186err_setup_tx:
3187 ixgbevf_free_all_tx_resources(adapter);
3188 ixgbevf_reset(adapter);
3189
3190err_setup_reset:
3191
3192 return err;
3193}
3194
3195/**
3196 * ixgbevf_close - Disables a network interface
3197 * @netdev: network interface device structure
3198 *
3199 * Returns 0, this is not allowed to fail
3200 *
3201 * The close entry point is called when an interface is de-activated
3202 * by the OS. The hardware is still under the drivers control, but
3203 * needs to be disabled. A global MAC reset is issued to stop the
3204 * hardware, and all transmit and receive resources are freed.
3205 **/
3206static int ixgbevf_close(struct net_device *netdev)
3207{
3208 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3209
3210 ixgbevf_down(adapter);
3211 ixgbevf_free_irq(adapter);
3212
3213 ixgbevf_free_all_tx_resources(adapter);
3214 ixgbevf_free_all_rx_resources(adapter);
3215
3216 return 0;
3217}
3218
220fe050
DS
3219static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3220{
3221 struct net_device *dev = adapter->netdev;
3222
3223 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
3224 return;
3225
3226 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
3227
3228 /* if interface is down do nothing */
3229 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3230 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3231 return;
3232
3233 /* Hardware has to reinitialize queues and interrupts to
3234 * match packet buffer alignment. Unfortunately, the
3235 * hardware is not flexible enough to do this dynamically.
3236 */
3237 if (netif_running(dev))
3238 ixgbevf_close(dev);
3239
3240 ixgbevf_clear_interrupt_scheme(adapter);
3241 ixgbevf_init_interrupt_scheme(adapter);
3242
3243 if (netif_running(dev))
3244 ixgbevf_open(dev);
3245}
3246
70a10e25
AD
3247static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3248 u32 vlan_macip_lens, u32 type_tucmd,
3249 u32 mss_l4len_idx)
92915f71
GR
3250{
3251 struct ixgbe_adv_tx_context_desc *context_desc;
70a10e25 3252 u16 i = tx_ring->next_to_use;
92915f71 3253
70a10e25 3254 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
92915f71 3255
70a10e25
AD
3256 i++;
3257 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
92915f71 3258
70a10e25
AD
3259 /* set bits to identify this as an advanced context descriptor */
3260 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
92915f71 3261
70a10e25
AD
3262 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3263 context_desc->seqnum_seed = 0;
3264 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3265 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3266}
3267
3268static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
7ad1a093
ET
3269 struct ixgbevf_tx_buffer *first,
3270 u8 *hdr_len)
70a10e25 3271{
7ad1a093 3272 struct sk_buff *skb = first->skb;
70a10e25
AD
3273 u32 vlan_macip_lens, type_tucmd;
3274 u32 mss_l4len_idx, l4len;
8f12c034 3275 int err;
70a10e25 3276
01a545cf
ET
3277 if (skb->ip_summed != CHECKSUM_PARTIAL)
3278 return 0;
3279
70a10e25
AD
3280 if (!skb_is_gso(skb))
3281 return 0;
92915f71 3282
8f12c034
FR
3283 err = skb_cow_head(skb, 0);
3284 if (err < 0)
3285 return err;
92915f71 3286
70a10e25
AD
3287 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3288 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3289
10e4fb33 3290 if (first->protocol == htons(ETH_P_IP)) {
70a10e25 3291 struct iphdr *iph = ip_hdr(skb);
dec0d8e4 3292
70a10e25
AD
3293 iph->tot_len = 0;
3294 iph->check = 0;
3295 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3296 iph->daddr, 0,
3297 IPPROTO_TCP,
3298 0);
3299 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7ad1a093
ET
3300 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3301 IXGBE_TX_FLAGS_CSUM |
3302 IXGBE_TX_FLAGS_IPV4;
70a10e25
AD
3303 } else if (skb_is_gso_v6(skb)) {
3304 ipv6_hdr(skb)->payload_len = 0;
3305 tcp_hdr(skb)->check =
3306 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3307 &ipv6_hdr(skb)->daddr,
3308 0, IPPROTO_TCP, 0);
7ad1a093
ET
3309 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3310 IXGBE_TX_FLAGS_CSUM;
70a10e25
AD
3311 }
3312
3313 /* compute header lengths */
3314 l4len = tcp_hdrlen(skb);
3315 *hdr_len += l4len;
3316 *hdr_len = skb_transport_offset(skb) + l4len;
3317
dec0d8e4 3318 /* update GSO size and bytecount with header size */
7ad1a093
ET
3319 first->gso_segs = skb_shinfo(skb)->gso_segs;
3320 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3321
70a10e25
AD
3322 /* mss_l4len_id: use 1 as index for TSO */
3323 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
3324 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3325 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
3326
3327 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3328 vlan_macip_lens = skb_network_header_len(skb);
3329 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7ad1a093 3330 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
70a10e25
AD
3331
3332 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3333 type_tucmd, mss_l4len_idx);
3334
3335 return 1;
92915f71
GR
3336}
3337
7ad1a093
ET
3338static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3339 struct ixgbevf_tx_buffer *first)
92915f71 3340{
7ad1a093 3341 struct sk_buff *skb = first->skb;
70a10e25
AD
3342 u32 vlan_macip_lens = 0;
3343 u32 mss_l4len_idx = 0;
3344 u32 type_tucmd = 0;
92915f71 3345
70a10e25
AD
3346 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3347 u8 l4_hdr = 0;
dec0d8e4 3348
10e4fb33 3349 switch (first->protocol) {
0933ce4a 3350 case htons(ETH_P_IP):
70a10e25
AD
3351 vlan_macip_lens |= skb_network_header_len(skb);
3352 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3353 l4_hdr = ip_hdr(skb)->protocol;
3354 break;
0933ce4a 3355 case htons(ETH_P_IPV6):
70a10e25
AD
3356 vlan_macip_lens |= skb_network_header_len(skb);
3357 l4_hdr = ipv6_hdr(skb)->nexthdr;
3358 break;
3359 default:
3360 if (unlikely(net_ratelimit())) {
3361 dev_warn(tx_ring->dev,
dec0d8e4
JK
3362 "partial checksum but proto=%x!\n",
3363 first->protocol);
70a10e25
AD
3364 }
3365 break;
3366 }
92915f71 3367
70a10e25
AD
3368 switch (l4_hdr) {
3369 case IPPROTO_TCP:
3370 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3371 mss_l4len_idx = tcp_hdrlen(skb) <<
3372 IXGBE_ADVTXD_L4LEN_SHIFT;
3373 break;
3374 case IPPROTO_SCTP:
3375 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3376 mss_l4len_idx = sizeof(struct sctphdr) <<
3377 IXGBE_ADVTXD_L4LEN_SHIFT;
3378 break;
3379 case IPPROTO_UDP:
3380 mss_l4len_idx = sizeof(struct udphdr) <<
3381 IXGBE_ADVTXD_L4LEN_SHIFT;
3382 break;
3383 default:
3384 if (unlikely(net_ratelimit())) {
3385 dev_warn(tx_ring->dev,
dec0d8e4
JK
3386 "partial checksum but l4 proto=%x!\n",
3387 l4_hdr);
70a10e25
AD
3388 }
3389 break;
3390 }
7ad1a093
ET
3391
3392 /* update TX checksum flag */
3393 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
92915f71
GR
3394 }
3395
70a10e25
AD
3396 /* vlan_macip_lens: MACLEN, VLAN tag */
3397 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7ad1a093 3398 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
70a10e25
AD
3399
3400 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3401 type_tucmd, mss_l4len_idx);
92915f71
GR
3402}
3403
29d37fa1 3404static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
92915f71 3405{
29d37fa1
ET
3406 /* set type for advanced descriptor with frame checksum insertion */
3407 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3408 IXGBE_ADVTXD_DCMD_IFCS |
3409 IXGBE_ADVTXD_DCMD_DEXT);
92915f71 3410
dec0d8e4 3411 /* set HW VLAN bit if VLAN is present */
29d37fa1
ET
3412 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3413 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
92915f71 3414
29d37fa1
ET
3415 /* set segmentation enable bits for TSO/FSO */
3416 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3417 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
92915f71 3418
29d37fa1
ET
3419 return cmd_type;
3420}
92915f71 3421
29d37fa1
ET
3422static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3423 u32 tx_flags, unsigned int paylen)
3424{
3425 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
9bdfefd2 3426
29d37fa1
ET
3427 /* enable L4 checksum for TSO and TX checksum offload */
3428 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3429 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
92915f71 3430
29d37fa1
ET
3431 /* enble IPv4 checksum for TSO */
3432 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3433 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
92915f71 3434
29d37fa1
ET
3435 /* use index 1 context for TSO/FSO/FCOE */
3436 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3437 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
92915f71 3438
29d37fa1
ET
3439 /* Check Context must be set if Tx switch is enabled, which it
3440 * always is for case where virtual functions are running
3441 */
3442 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
92915f71 3443
29d37fa1
ET
3444 tx_desc->read.olinfo_status = olinfo_status;
3445}
92915f71 3446
29d37fa1
ET
3447static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3448 struct ixgbevf_tx_buffer *first,
3449 const u8 hdr_len)
3450{
3451 dma_addr_t dma;
3452 struct sk_buff *skb = first->skb;
3453 struct ixgbevf_tx_buffer *tx_buffer;
3454 union ixgbe_adv_tx_desc *tx_desc;
3455 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3456 unsigned int data_len = skb->data_len;
3457 unsigned int size = skb_headlen(skb);
3458 unsigned int paylen = skb->len - hdr_len;
3459 u32 tx_flags = first->tx_flags;
3460 __le32 cmd_type;
3461 u16 i = tx_ring->next_to_use;
9bdfefd2 3462
29d37fa1 3463 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
92915f71 3464
29d37fa1
ET
3465 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3466 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
7ad1a093 3467
29d37fa1
ET
3468 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3469 if (dma_mapping_error(tx_ring->dev, dma))
3470 goto dma_error;
92915f71 3471
29d37fa1
ET
3472 /* record length, and DMA address */
3473 dma_unmap_len_set(first, len, size);
3474 dma_unmap_addr_set(first, dma, dma);
92915f71 3475
29d37fa1 3476 tx_desc->read.buffer_addr = cpu_to_le64(dma);
92915f71 3477
29d37fa1
ET
3478 for (;;) {
3479 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3480 tx_desc->read.cmd_type_len =
3481 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
92915f71 3482
29d37fa1
ET
3483 i++;
3484 tx_desc++;
3485 if (i == tx_ring->count) {
3486 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3487 i = 0;
3488 }
92915f71 3489
29d37fa1
ET
3490 dma += IXGBE_MAX_DATA_PER_TXD;
3491 size -= IXGBE_MAX_DATA_PER_TXD;
92915f71 3492
29d37fa1
ET
3493 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3494 tx_desc->read.olinfo_status = 0;
3495 }
92915f71 3496
29d37fa1
ET
3497 if (likely(!data_len))
3498 break;
92915f71 3499
29d37fa1 3500 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
92915f71 3501
29d37fa1
ET
3502 i++;
3503 tx_desc++;
3504 if (i == tx_ring->count) {
3505 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3506 i = 0;
3507 }
92915f71 3508
29d37fa1
ET
3509 size = skb_frag_size(frag);
3510 data_len -= size;
92915f71 3511
29d37fa1
ET
3512 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3513 DMA_TO_DEVICE);
3514 if (dma_mapping_error(tx_ring->dev, dma))
3515 goto dma_error;
70a10e25 3516
29d37fa1
ET
3517 tx_buffer = &tx_ring->tx_buffer_info[i];
3518 dma_unmap_len_set(tx_buffer, len, size);
3519 dma_unmap_addr_set(tx_buffer, dma, dma);
92915f71 3520
29d37fa1
ET
3521 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3522 tx_desc->read.olinfo_status = 0;
3523
3524 frag++;
70a10e25 3525 }
92915f71 3526
29d37fa1
ET
3527 /* write last descriptor with RS and EOP bits */
3528 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3529 tx_desc->read.cmd_type_len = cmd_type;
3530
3531 /* set the timestamp */
3532 first->time_stamp = jiffies;
3533
3534 /* Force memory writes to complete before letting h/w know there
3535 * are new descriptors to fetch. (Only applicable for weak-ordered
3536 * memory model archs, such as IA-64).
3537 *
3538 * We also need this memory barrier (wmb) to make certain all of the
3539 * status bits have been updated before next_to_watch is written.
70a10e25 3540 */
29d37fa1 3541 wmb();
92915f71 3542
29d37fa1
ET
3543 /* set next_to_watch value indicating a packet is present */
3544 first->next_to_watch = tx_desc;
92915f71 3545
29d37fa1
ET
3546 i++;
3547 if (i == tx_ring->count)
3548 i = 0;
9bdfefd2 3549
29d37fa1 3550 tx_ring->next_to_use = i;
92915f71 3551
29d37fa1 3552 /* notify HW of packet */
06380db6 3553 ixgbevf_write_tail(tx_ring, i);
29d37fa1
ET
3554
3555 return;
3556dma_error:
3557 dev_err(tx_ring->dev, "TX DMA map failed\n");
3558
3559 /* clear dma mappings for failed tx_buffer_info map */
3560 for (;;) {
3561 tx_buffer = &tx_ring->tx_buffer_info[i];
3562 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3563 if (tx_buffer == first)
3564 break;
3565 if (i == 0)
3566 i = tx_ring->count;
3567 i--;
3568 }
92915f71 3569
92915f71 3570 tx_ring->next_to_use = i;
92915f71
GR
3571}
3572
fb40195c 3573static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3574{
fb40195c 3575 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
92915f71
GR
3576 /* Herbert's original patch had:
3577 * smp_mb__after_netif_stop_queue();
dec0d8e4
JK
3578 * but since that doesn't exist yet, just open code it.
3579 */
92915f71
GR
3580 smp_mb();
3581
3582 /* We need to check again in a case another CPU has just
dec0d8e4
JK
3583 * made room available.
3584 */
f880d07b 3585 if (likely(ixgbevf_desc_unused(tx_ring) < size))
92915f71
GR
3586 return -EBUSY;
3587
3588 /* A reprieve! - use start_queue because it doesn't call schedule */
fb40195c 3589 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
095e2617
ET
3590 ++tx_ring->tx_stats.restart_queue;
3591
92915f71
GR
3592 return 0;
3593}
3594
fb40195c 3595static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3596{
f880d07b 3597 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
92915f71 3598 return 0;
fb40195c 3599 return __ixgbevf_maybe_stop_tx(tx_ring, size);
92915f71
GR
3600}
3601
3602static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3603{
3604 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
7ad1a093 3605 struct ixgbevf_tx_buffer *first;
92915f71 3606 struct ixgbevf_ring *tx_ring;
7ad1a093
ET
3607 int tso;
3608 u32 tx_flags = 0;
3595990a
AD
3609 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3610#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3611 unsigned short f;
3612#endif
7ad1a093 3613 u8 hdr_len = 0;
f9d08f16 3614 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
7ad1a093 3615
46acc460 3616 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
e7fcd543 3617 dev_kfree_skb_any(skb);
f9d08f16
GR
3618 return NETDEV_TX_OK;
3619 }
92915f71 3620
7ad1a093 3621 tx_ring = adapter->tx_ring[skb->queue_mapping];
92915f71 3622
dec0d8e4 3623 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3595990a
AD
3624 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3625 * + 2 desc gap to keep tail from touching head,
3626 * + 1 desc for context descriptor,
3627 * otherwise try next time
3628 */
3629#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3630 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3631 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3632#else
3633 count += skb_shinfo(skb)->nr_frags;
3634#endif
fb40195c 3635 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
095e2617 3636 tx_ring->tx_stats.tx_busy++;
3595990a
AD
3637 return NETDEV_TX_BUSY;
3638 }
3639
7ad1a093
ET
3640 /* record the location of the first descriptor for this packet */
3641 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3642 first->skb = skb;
3643 first->bytecount = skb->len;
3644 first->gso_segs = 1;
3645
df8a39de
JP
3646 if (skb_vlan_tag_present(skb)) {
3647 tx_flags |= skb_vlan_tag_get(skb);
92915f71
GR
3648 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3649 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3650 }
3651
7ad1a093
ET
3652 /* record initial flags and protocol */
3653 first->tx_flags = tx_flags;
3654 first->protocol = vlan_get_protocol(skb);
92915f71 3655
7ad1a093
ET
3656 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3657 if (tso < 0)
3658 goto out_drop;
b5d217f3 3659 else if (!tso)
7ad1a093 3660 ixgbevf_tx_csum(tx_ring, first);
92915f71 3661
29d37fa1 3662 ixgbevf_tx_map(tx_ring, first, hdr_len);
70a10e25 3663
fb40195c 3664 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
92915f71 3665
7ad1a093
ET
3666 return NETDEV_TX_OK;
3667
3668out_drop:
3669 dev_kfree_skb_any(first->skb);
3670 first->skb = NULL;
3671
92915f71
GR
3672 return NETDEV_TX_OK;
3673}
3674
92915f71
GR
3675/**
3676 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3677 * @netdev: network interface device structure
3678 * @p: pointer to an address structure
3679 *
3680 * Returns 0 on success, negative on failure
3681 **/
3682static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3683{
3684 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3685 struct ixgbe_hw *hw = &adapter->hw;
3686 struct sockaddr *addr = p;
3687
3688 if (!is_valid_ether_addr(addr->sa_data))
3689 return -EADDRNOTAVAIL;
3690
3691 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3692 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3693
55fdd45b 3694 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 3695
92fe0bf7 3696 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
92915f71 3697
55fdd45b 3698 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 3699
92915f71
GR
3700 return 0;
3701}
3702
3703/**
3704 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3705 * @netdev: network interface device structure
3706 * @new_mtu: new value for maximum frame size
3707 *
3708 * Returns 0 on success, negative on failure
3709 **/
3710static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3711{
3712 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
bad17234 3713 struct ixgbe_hw *hw = &adapter->hw;
92915f71 3714 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
69bfbec4 3715 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
69bfbec4 3716
56e94095
AD
3717 switch (adapter->hw.api_version) {
3718 case ixgbe_mbox_api_11:
94cf66f8 3719 case ixgbe_mbox_api_12:
69bfbec4 3720 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
56e94095
AD
3721 break;
3722 default:
47068b0d 3723 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
56e94095
AD
3724 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3725 break;
3726 }
92915f71
GR
3727
3728 /* MTU < 68 is an error and causes problems on some kernels */
69bfbec4 3729 if ((new_mtu < 68) || (max_frame > max_possible_frame))
92915f71
GR
3730 return -EINVAL;
3731
bad17234 3732 hw_dbg(hw, "changing MTU from %d to %d\n",
92915f71
GR
3733 netdev->mtu, new_mtu);
3734 /* must set new MTU before calling down or up */
3735 netdev->mtu = new_mtu;
3736
bad17234
ET
3737 /* notify the PF of our intent to use this size of frame */
3738 ixgbevf_rlpml_set_vf(hw, max_frame);
92915f71
GR
3739
3740 return 0;
3741}
3742
688ff32d
ET
3743#ifdef CONFIG_NET_POLL_CONTROLLER
3744/* Polling 'interrupt' - used by things like netconsole to send skbs
3745 * without having to re-enable interrupts. It's not called while
3746 * the interrupt routine is executing.
3747 */
3748static void ixgbevf_netpoll(struct net_device *netdev)
3749{
3750 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3751 int i;
3752
3753 /* if interface is down do nothing */
3754 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3755 return;
3756 for (i = 0; i < adapter->num_rx_queues; i++)
3757 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3758}
3759#endif /* CONFIG_NET_POLL_CONTROLLER */
3760
0ac1e8ce 3761static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
92915f71
GR
3762{
3763 struct net_device *netdev = pci_get_drvdata(pdev);
3764 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0ac1e8ce
AD
3765#ifdef CONFIG_PM
3766 int retval = 0;
3767#endif
92915f71
GR
3768
3769 netif_device_detach(netdev);
3770
3771 if (netif_running(netdev)) {
0ac1e8ce 3772 rtnl_lock();
92915f71
GR
3773 ixgbevf_down(adapter);
3774 ixgbevf_free_irq(adapter);
3775 ixgbevf_free_all_tx_resources(adapter);
3776 ixgbevf_free_all_rx_resources(adapter);
0ac1e8ce 3777 rtnl_unlock();
92915f71
GR
3778 }
3779
0ac1e8ce 3780 ixgbevf_clear_interrupt_scheme(adapter);
92915f71 3781
0ac1e8ce
AD
3782#ifdef CONFIG_PM
3783 retval = pci_save_state(pdev);
3784 if (retval)
3785 return retval;
92915f71 3786
0ac1e8ce 3787#endif
bc0c7151
MR
3788 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3789 pci_disable_device(pdev);
0ac1e8ce
AD
3790
3791 return 0;
3792}
3793
3794#ifdef CONFIG_PM
3795static int ixgbevf_resume(struct pci_dev *pdev)
3796{
27ae2967
WY
3797 struct net_device *netdev = pci_get_drvdata(pdev);
3798 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0ac1e8ce
AD
3799 u32 err;
3800
0ac1e8ce 3801 pci_restore_state(pdev);
dec0d8e4 3802 /* pci_restore_state clears dev->state_saved so call
0ac1e8ce
AD
3803 * pci_save_state to restore it.
3804 */
3805 pci_save_state(pdev);
3806
3807 err = pci_enable_device_mem(pdev);
3808 if (err) {
3809 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3810 return err;
3811 }
4e857c58 3812 smp_mb__before_atomic();
bc0c7151 3813 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
0ac1e8ce
AD
3814 pci_set_master(pdev);
3815
798e381a
DS
3816 ixgbevf_reset(adapter);
3817
0ac1e8ce
AD
3818 rtnl_lock();
3819 err = ixgbevf_init_interrupt_scheme(adapter);
3820 rtnl_unlock();
3821 if (err) {
3822 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3823 return err;
3824 }
3825
0ac1e8ce
AD
3826 if (netif_running(netdev)) {
3827 err = ixgbevf_open(netdev);
3828 if (err)
3829 return err;
3830 }
3831
3832 netif_device_attach(netdev);
3833
3834 return err;
3835}
3836
3837#endif /* CONFIG_PM */
3838static void ixgbevf_shutdown(struct pci_dev *pdev)
3839{
3840 ixgbevf_suspend(pdev, PMSG_SUSPEND);
92915f71
GR
3841}
3842
4197aa7b
ED
3843static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3844 struct rtnl_link_stats64 *stats)
3845{
3846 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3847 unsigned int start;
3848 u64 bytes, packets;
3849 const struct ixgbevf_ring *ring;
3850 int i;
3851
3852 ixgbevf_update_stats(adapter);
3853
3854 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3855
3856 for (i = 0; i < adapter->num_rx_queues; i++) {
87e70ab9 3857 ring = adapter->rx_ring[i];
4197aa7b 3858 do {
57a7744e 3859 start = u64_stats_fetch_begin_irq(&ring->syncp);
095e2617
ET
3860 bytes = ring->stats.bytes;
3861 packets = ring->stats.packets;
57a7744e 3862 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4197aa7b
ED
3863 stats->rx_bytes += bytes;
3864 stats->rx_packets += packets;
3865 }
3866
3867 for (i = 0; i < adapter->num_tx_queues; i++) {
87e70ab9 3868 ring = adapter->tx_ring[i];
4197aa7b 3869 do {
57a7744e 3870 start = u64_stats_fetch_begin_irq(&ring->syncp);
095e2617
ET
3871 bytes = ring->stats.bytes;
3872 packets = ring->stats.packets;
57a7744e 3873 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4197aa7b
ED
3874 stats->tx_bytes += bytes;
3875 stats->tx_packets += packets;
3876 }
3877
3878 return stats;
3879}
3880
0ac1e8ce 3881static const struct net_device_ops ixgbevf_netdev_ops = {
c12db769
SH
3882 .ndo_open = ixgbevf_open,
3883 .ndo_stop = ixgbevf_close,
3884 .ndo_start_xmit = ixgbevf_xmit_frame,
3885 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4197aa7b 3886 .ndo_get_stats64 = ixgbevf_get_stats,
92915f71 3887 .ndo_validate_addr = eth_validate_addr,
c12db769
SH
3888 .ndo_set_mac_address = ixgbevf_set_mac,
3889 .ndo_change_mtu = ixgbevf_change_mtu,
3890 .ndo_tx_timeout = ixgbevf_tx_timeout,
c12db769
SH
3891 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3892 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
c777cdfa
JK
3893#ifdef CONFIG_NET_RX_BUSY_POLL
3894 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3895#endif
688ff32d
ET
3896#ifdef CONFIG_NET_POLL_CONTROLLER
3897 .ndo_poll_controller = ixgbevf_netpoll,
3898#endif
92915f71 3899};
92915f71
GR
3900
3901static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3902{
0ac1e8ce 3903 dev->netdev_ops = &ixgbevf_netdev_ops;
92915f71
GR
3904 ixgbevf_set_ethtool_ops(dev);
3905 dev->watchdog_timeo = 5 * HZ;
3906}
3907
3908/**
3909 * ixgbevf_probe - Device Initialization Routine
3910 * @pdev: PCI device information struct
3911 * @ent: entry in ixgbevf_pci_tbl
3912 *
3913 * Returns 0 on success, negative on failure
3914 *
3915 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3916 * The OS initialization, configuring of the adapter private structure,
3917 * and a hardware reset occur.
3918 **/
1dd06ae8 3919static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
92915f71
GR
3920{
3921 struct net_device *netdev;
3922 struct ixgbevf_adapter *adapter = NULL;
3923 struct ixgbe_hw *hw = NULL;
3924 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
92915f71 3925 int err, pci_using_dac;
0333464f 3926 bool disable_dev = false;
92915f71
GR
3927
3928 err = pci_enable_device(pdev);
3929 if (err)
3930 return err;
3931
53567aa4 3932 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
92915f71
GR
3933 pci_using_dac = 1;
3934 } else {
53567aa4 3935 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
92915f71 3936 if (err) {
dec0d8e4 3937 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
53567aa4 3938 goto err_dma;
92915f71
GR
3939 }
3940 pci_using_dac = 0;
3941 }
3942
3943 err = pci_request_regions(pdev, ixgbevf_driver_name);
3944 if (err) {
3945 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3946 goto err_pci_reg;
3947 }
3948
3949 pci_set_master(pdev);
3950
92915f71
GR
3951 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3952 MAX_TX_QUEUES);
92915f71
GR
3953 if (!netdev) {
3954 err = -ENOMEM;
3955 goto err_alloc_etherdev;
3956 }
3957
3958 SET_NETDEV_DEV(netdev, &pdev->dev);
3959
92915f71
GR
3960 adapter = netdev_priv(netdev);
3961
3962 adapter->netdev = netdev;
3963 adapter->pdev = pdev;
3964 hw = &adapter->hw;
3965 hw->back = adapter;
b3f4d599 3966 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
92915f71 3967
dec0d8e4 3968 /* call save state here in standalone driver because it relies on
92915f71
GR
3969 * adapter struct to exist, and needs to call netdev_priv
3970 */
3971 pci_save_state(pdev);
3972
3973 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3974 pci_resource_len(pdev, 0));
dbf8b0d8 3975 adapter->io_addr = hw->hw_addr;
92915f71
GR
3976 if (!hw->hw_addr) {
3977 err = -EIO;
3978 goto err_ioremap;
3979 }
3980
3981 ixgbevf_assign_netdev_ops(netdev);
3982
dec0d8e4 3983 /* Setup HW API */
92915f71
GR
3984 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3985 hw->mac.type = ii->mac;
3986
3987 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
f416dfc0 3988 sizeof(struct ixgbe_mbx_operations));
92915f71 3989
92915f71
GR
3990 /* setup the private structure */
3991 err = ixgbevf_sw_init(adapter);
1a0d6ae5
DK
3992 if (err)
3993 goto err_sw_init;
3994
3995 /* The HW MAC address was set and/or determined in sw_init */
1a0d6ae5
DK
3996 if (!is_valid_ether_addr(netdev->dev_addr)) {
3997 pr_err("invalid MAC address\n");
3998 err = -EIO;
3999 goto err_sw_init;
4000 }
92915f71 4001
471a76de 4002 netdev->hw_features = NETIF_F_SG |
dec0d8e4
JK
4003 NETIF_F_IP_CSUM |
4004 NETIF_F_IPV6_CSUM |
4005 NETIF_F_TSO |
4006 NETIF_F_TSO6 |
4007 NETIF_F_RXCSUM;
471a76de
MM
4008
4009 netdev->features = netdev->hw_features |
f646968f
PM
4010 NETIF_F_HW_VLAN_CTAG_TX |
4011 NETIF_F_HW_VLAN_CTAG_RX |
4012 NETIF_F_HW_VLAN_CTAG_FILTER;
92915f71 4013
39f35a37
ET
4014 netdev->vlan_features |= NETIF_F_TSO |
4015 NETIF_F_TSO6 |
4016 NETIF_F_IP_CSUM |
4017 NETIF_F_IPV6_CSUM |
4018 NETIF_F_SG;
92915f71
GR
4019
4020 if (pci_using_dac)
4021 netdev->features |= NETIF_F_HIGHDMA;
4022
01789349
JP
4023 netdev->priv_flags |= IFF_UNICAST_FLT;
4024
ea699569
MR
4025 if (IXGBE_REMOVED(hw->hw_addr)) {
4026 err = -EIO;
4027 goto err_sw_init;
4028 }
9ac5c5cc
ET
4029
4030 setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
4031 (unsigned long)adapter);
4032
4033 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4034 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4035 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
92915f71
GR
4036
4037 err = ixgbevf_init_interrupt_scheme(adapter);
4038 if (err)
4039 goto err_sw_init;
4040
92915f71
GR
4041 strcpy(netdev->name, "eth%d");
4042
4043 err = register_netdev(netdev);
4044 if (err)
4045 goto err_register;
4046
0333464f 4047 pci_set_drvdata(pdev, netdev);
5d426ad1
GR
4048 netif_carrier_off(netdev);
4049
33bd9f60
GR
4050 ixgbevf_init_last_counter_stats(adapter);
4051
47068b0d
ET
4052 /* print the VF info */
4053 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4054 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
92915f71 4055
47068b0d
ET
4056 switch (hw->mac.type) {
4057 case ixgbe_mac_X550_vf:
4058 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4059 break;
4060 case ixgbe_mac_X540_vf:
4061 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4062 break;
4063 case ixgbe_mac_82599_vf:
4064 default:
4065 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4066 break;
4067 }
92915f71 4068
92915f71
GR
4069 return 0;
4070
4071err_register:
0ac1e8ce 4072 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
4073err_sw_init:
4074 ixgbevf_reset_interrupt_capability(adapter);
dbf8b0d8 4075 iounmap(adapter->io_addr);
92915f71 4076err_ioremap:
0333464f 4077 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
92915f71
GR
4078 free_netdev(netdev);
4079err_alloc_etherdev:
4080 pci_release_regions(pdev);
4081err_pci_reg:
4082err_dma:
0333464f 4083 if (!adapter || disable_dev)
bc0c7151 4084 pci_disable_device(pdev);
92915f71
GR
4085 return err;
4086}
4087
4088/**
4089 * ixgbevf_remove - Device Removal Routine
4090 * @pdev: PCI device information struct
4091 *
4092 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4093 * that it should release a PCI device. The could be caused by a
4094 * Hot-Plug event, or because the driver is going to be removed from
4095 * memory.
4096 **/
9f9a12f8 4097static void ixgbevf_remove(struct pci_dev *pdev)
92915f71
GR
4098{
4099 struct net_device *netdev = pci_get_drvdata(pdev);
0333464f
ET
4100 struct ixgbevf_adapter *adapter;
4101 bool disable_dev;
4102
4103 if (!netdev)
4104 return;
4105
4106 adapter = netdev_priv(netdev);
92915f71 4107
2e7cfbdd 4108 set_bit(__IXGBEVF_REMOVING, &adapter->state);
9ac5c5cc 4109 cancel_work_sync(&adapter->service_task);
92915f71 4110
fd13a9ab 4111 if (netdev->reg_state == NETREG_REGISTERED)
92915f71 4112 unregister_netdev(netdev);
92915f71 4113
0ac1e8ce 4114 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
4115 ixgbevf_reset_interrupt_capability(adapter);
4116
dbf8b0d8 4117 iounmap(adapter->io_addr);
92915f71
GR
4118 pci_release_regions(pdev);
4119
4120 hw_dbg(&adapter->hw, "Remove complete\n");
4121
0333464f 4122 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
92915f71
GR
4123 free_netdev(netdev);
4124
0333464f 4125 if (disable_dev)
bc0c7151 4126 pci_disable_device(pdev);
92915f71
GR
4127}
4128
9f19f31d
AD
4129/**
4130 * ixgbevf_io_error_detected - called when PCI error is detected
4131 * @pdev: Pointer to PCI device
4132 * @state: The current pci connection state
4133 *
4134 * This function is called after a PCI bus error affecting
4135 * this device has been detected.
dec0d8e4 4136 **/
9f19f31d
AD
4137static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4138 pci_channel_state_t state)
4139{
4140 struct net_device *netdev = pci_get_drvdata(pdev);
4141 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4142
9ac5c5cc 4143 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
ea699569
MR
4144 return PCI_ERS_RESULT_DISCONNECT;
4145
bc0c7151 4146 rtnl_lock();
9f19f31d
AD
4147 netif_device_detach(netdev);
4148
bc0c7151
MR
4149 if (state == pci_channel_io_perm_failure) {
4150 rtnl_unlock();
9f19f31d 4151 return PCI_ERS_RESULT_DISCONNECT;
bc0c7151 4152 }
9f19f31d
AD
4153
4154 if (netif_running(netdev))
4155 ixgbevf_down(adapter);
4156
bc0c7151
MR
4157 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4158 pci_disable_device(pdev);
4159 rtnl_unlock();
9f19f31d
AD
4160
4161 /* Request a slot slot reset. */
4162 return PCI_ERS_RESULT_NEED_RESET;
4163}
4164
4165/**
4166 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4167 * @pdev: Pointer to PCI device
4168 *
4169 * Restart the card from scratch, as if from a cold-boot. Implementation
4170 * resembles the first-half of the ixgbevf_resume routine.
dec0d8e4 4171 **/
9f19f31d
AD
4172static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4173{
4174 struct net_device *netdev = pci_get_drvdata(pdev);
4175 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4176
4177 if (pci_enable_device_mem(pdev)) {
4178 dev_err(&pdev->dev,
4179 "Cannot re-enable PCI device after reset.\n");
4180 return PCI_ERS_RESULT_DISCONNECT;
4181 }
4182
4e857c58 4183 smp_mb__before_atomic();
bc0c7151 4184 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
9f19f31d
AD
4185 pci_set_master(pdev);
4186
4187 ixgbevf_reset(adapter);
4188
4189 return PCI_ERS_RESULT_RECOVERED;
4190}
4191
4192/**
4193 * ixgbevf_io_resume - called when traffic can start flowing again.
4194 * @pdev: Pointer to PCI device
4195 *
4196 * This callback is called when the error recovery driver tells us that
4197 * its OK to resume normal operation. Implementation resembles the
4198 * second-half of the ixgbevf_resume routine.
dec0d8e4 4199 **/
9f19f31d
AD
4200static void ixgbevf_io_resume(struct pci_dev *pdev)
4201{
4202 struct net_device *netdev = pci_get_drvdata(pdev);
4203 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4204
4205 if (netif_running(netdev))
4206 ixgbevf_up(adapter);
4207
4208 netif_device_attach(netdev);
4209}
4210
4211/* PCI Error Recovery (ERS) */
3646f0e5 4212static const struct pci_error_handlers ixgbevf_err_handler = {
9f19f31d
AD
4213 .error_detected = ixgbevf_io_error_detected,
4214 .slot_reset = ixgbevf_io_slot_reset,
4215 .resume = ixgbevf_io_resume,
4216};
4217
92915f71 4218static struct pci_driver ixgbevf_driver = {
dec0d8e4
JK
4219 .name = ixgbevf_driver_name,
4220 .id_table = ixgbevf_pci_tbl,
4221 .probe = ixgbevf_probe,
4222 .remove = ixgbevf_remove,
0ac1e8ce
AD
4223#ifdef CONFIG_PM
4224 /* Power Management Hooks */
dec0d8e4
JK
4225 .suspend = ixgbevf_suspend,
4226 .resume = ixgbevf_resume,
0ac1e8ce 4227#endif
dec0d8e4
JK
4228 .shutdown = ixgbevf_shutdown,
4229 .err_handler = &ixgbevf_err_handler
92915f71
GR
4230};
4231
4232/**
65d676c8 4233 * ixgbevf_init_module - Driver Registration Routine
92915f71 4234 *
65d676c8 4235 * ixgbevf_init_module is the first routine called when the driver is
92915f71
GR
4236 * loaded. All it does is register with the PCI subsystem.
4237 **/
4238static int __init ixgbevf_init_module(void)
4239{
4240 int ret;
dec0d8e4 4241
dbd9636e
JK
4242 pr_info("%s - version %s\n", ixgbevf_driver_string,
4243 ixgbevf_driver_version);
92915f71 4244
dbd9636e 4245 pr_info("%s\n", ixgbevf_copyright);
92915f71
GR
4246
4247 ret = pci_register_driver(&ixgbevf_driver);
4248 return ret;
4249}
4250
4251module_init(ixgbevf_init_module);
4252
4253/**
65d676c8 4254 * ixgbevf_exit_module - Driver Exit Cleanup Routine
92915f71 4255 *
65d676c8 4256 * ixgbevf_exit_module is called just before the driver is removed
92915f71
GR
4257 * from memory.
4258 **/
4259static void __exit ixgbevf_exit_module(void)
4260{
4261 pci_unregister_driver(&ixgbevf_driver);
4262}
4263
4264#ifdef DEBUG
4265/**
65d676c8 4266 * ixgbevf_get_hw_dev_name - return device name string
92915f71
GR
4267 * used by hardware layer to print debugging information
4268 **/
4269char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4270{
4271 struct ixgbevf_adapter *adapter = hw->back;
dec0d8e4 4272
92915f71
GR
4273 return adapter->netdev->name;
4274}
4275
4276#endif
4277module_exit(ixgbevf_exit_module);
4278
4279/* ixgbevf_main.c */
This page took 0.877975 seconds and 5 git commands to generate.