ixgbevf: move ring specific stats into ring specific structure
[deliverable/linux.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
1 /*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/ip.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
51 #include <linux/if.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
54
55 #include "ixgbevf.h"
56
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60
61 #define DRV_VERSION "2.12.1-k"
62 const char ixgbevf_driver_version[] = DRV_VERSION;
63 static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
65
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
69 };
70
71 /* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82 /* required last entry */
83 {0, }
84 };
85 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86
87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION);
91
92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93 static int debug = -1;
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
96
97 /* forward decls */
98 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
99 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
100 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
101
102 static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
103 u32 val)
104 {
105 rx_ring->next_to_use = val;
106
107 /*
108 * Force memory writes to complete before letting h/w
109 * know there are new descriptors to fetch. (Only
110 * applicable for weak-ordered memory model archs,
111 * such as IA-64).
112 */
113 wmb();
114 writel(val, rx_ring->tail);
115 }
116
117 /**
118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
119 * @adapter: pointer to adapter struct
120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
121 * @queue: queue to map the corresponding interrupt to
122 * @msix_vector: the vector to map to the corresponding queue
123 */
124 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
125 u8 queue, u8 msix_vector)
126 {
127 u32 ivar, index;
128 struct ixgbe_hw *hw = &adapter->hw;
129 if (direction == -1) {
130 /* other causes */
131 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
132 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
133 ivar &= ~0xFF;
134 ivar |= msix_vector;
135 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
136 } else {
137 /* tx or rx causes */
138 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
139 index = ((16 * (queue & 1)) + (8 * direction));
140 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
141 ivar &= ~(0xFF << index);
142 ivar |= (msix_vector << index);
143 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
144 }
145 }
146
147 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
148 struct ixgbevf_tx_buffer
149 *tx_buffer_info)
150 {
151 if (tx_buffer_info->dma) {
152 if (tx_buffer_info->mapped_as_page)
153 dma_unmap_page(tx_ring->dev,
154 tx_buffer_info->dma,
155 tx_buffer_info->length,
156 DMA_TO_DEVICE);
157 else
158 dma_unmap_single(tx_ring->dev,
159 tx_buffer_info->dma,
160 tx_buffer_info->length,
161 DMA_TO_DEVICE);
162 tx_buffer_info->dma = 0;
163 }
164 if (tx_buffer_info->skb) {
165 dev_kfree_skb_any(tx_buffer_info->skb);
166 tx_buffer_info->skb = NULL;
167 }
168 tx_buffer_info->time_stamp = 0;
169 /* tx_buffer_info must be completely set up in the transmit path */
170 }
171
172 #define IXGBE_MAX_TXD_PWR 14
173 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
174
175 /* Tx Descriptors needed, worst case */
176 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
177 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
178
179 static void ixgbevf_tx_timeout(struct net_device *netdev);
180
181 /**
182 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
183 * @q_vector: board private structure
184 * @tx_ring: tx ring to clean
185 **/
186 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
187 struct ixgbevf_ring *tx_ring)
188 {
189 struct ixgbevf_adapter *adapter = q_vector->adapter;
190 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
191 struct ixgbevf_tx_buffer *tx_buffer_info;
192 unsigned int i, count = 0;
193 unsigned int total_bytes = 0, total_packets = 0;
194
195 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
196 return true;
197
198 i = tx_ring->next_to_clean;
199 tx_buffer_info = &tx_ring->tx_buffer_info[i];
200 eop_desc = tx_buffer_info->next_to_watch;
201
202 do {
203 bool cleaned = false;
204
205 /* if next_to_watch is not set then there is no work pending */
206 if (!eop_desc)
207 break;
208
209 /* prevent any other reads prior to eop_desc */
210 read_barrier_depends();
211
212 /* if DD is not set pending work has not been completed */
213 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
214 break;
215
216 /* clear next_to_watch to prevent false hangs */
217 tx_buffer_info->next_to_watch = NULL;
218
219 for ( ; !cleaned; count++) {
220 struct sk_buff *skb;
221 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
222 cleaned = (tx_desc == eop_desc);
223 skb = tx_buffer_info->skb;
224
225 if (cleaned && skb) {
226 unsigned int segs, bytecount;
227
228 /* gso_segs is currently only valid for tcp */
229 segs = skb_shinfo(skb)->gso_segs ?: 1;
230 /* multiply data chunks by size of headers */
231 bytecount = ((segs - 1) * skb_headlen(skb)) +
232 skb->len;
233 total_packets += segs;
234 total_bytes += bytecount;
235 }
236
237 ixgbevf_unmap_and_free_tx_resource(tx_ring,
238 tx_buffer_info);
239
240 tx_desc->wb.status = 0;
241
242 i++;
243 if (i == tx_ring->count)
244 i = 0;
245
246 tx_buffer_info = &tx_ring->tx_buffer_info[i];
247 }
248
249 eop_desc = tx_buffer_info->next_to_watch;
250 } while (count < tx_ring->count);
251
252 tx_ring->next_to_clean = i;
253
254 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
255 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
256 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
257 /* Make sure that anybody stopping the queue after this
258 * sees the new next_to_clean.
259 */
260 smp_mb();
261 if (__netif_subqueue_stopped(tx_ring->netdev,
262 tx_ring->queue_index) &&
263 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
264 netif_wake_subqueue(tx_ring->netdev,
265 tx_ring->queue_index);
266 ++adapter->restart_queue;
267 }
268 }
269
270 u64_stats_update_begin(&tx_ring->syncp);
271 tx_ring->stats.bytes += total_bytes;
272 tx_ring->stats.packets += total_packets;
273 u64_stats_update_end(&tx_ring->syncp);
274 q_vector->tx.total_bytes += total_bytes;
275 q_vector->tx.total_packets += total_packets;
276
277 return count < tx_ring->count;
278 }
279
280 /**
281 * ixgbevf_receive_skb - Send a completed packet up the stack
282 * @q_vector: structure containing interrupt and ring information
283 * @skb: packet to send up
284 * @status: hardware indication of status of receive
285 * @rx_desc: rx descriptor
286 **/
287 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
288 struct sk_buff *skb, u8 status,
289 union ixgbe_adv_rx_desc *rx_desc)
290 {
291 struct ixgbevf_adapter *adapter = q_vector->adapter;
292 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
293 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
294
295 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
296 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
297
298 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
299 napi_gro_receive(&q_vector->napi, skb);
300 else
301 netif_rx(skb);
302 }
303
304 /**
305 * ixgbevf_rx_skb - Helper function to determine proper Rx method
306 * @q_vector: structure containing interrupt and ring information
307 * @skb: packet to send up
308 * @status: hardware indication of status of receive
309 * @rx_desc: rx descriptor
310 **/
311 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
312 struct sk_buff *skb, u8 status,
313 union ixgbe_adv_rx_desc *rx_desc)
314 {
315 #ifdef CONFIG_NET_RX_BUSY_POLL
316 skb_mark_napi_id(skb, &q_vector->napi);
317
318 if (ixgbevf_qv_busy_polling(q_vector)) {
319 netif_receive_skb(skb);
320 /* exit early if we busy polled */
321 return;
322 }
323 #endif /* CONFIG_NET_RX_BUSY_POLL */
324
325 ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
326 }
327
328 /**
329 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
330 * @ring: pointer to Rx descriptor ring structure
331 * @status_err: hardware indication of status of receive
332 * @skb: skb currently being received and modified
333 **/
334 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
335 u32 status_err, struct sk_buff *skb)
336 {
337 skb_checksum_none_assert(skb);
338
339 /* Rx csum disabled */
340 if (!(ring->netdev->features & NETIF_F_RXCSUM))
341 return;
342
343 /* if IP and error */
344 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
345 (status_err & IXGBE_RXDADV_ERR_IPE)) {
346 ring->rx_stats.csum_err++;
347 return;
348 }
349
350 if (!(status_err & IXGBE_RXD_STAT_L4CS))
351 return;
352
353 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
354 ring->rx_stats.csum_err++;
355 return;
356 }
357
358 /* It must be a TCP or UDP packet with a valid checksum */
359 skb->ip_summed = CHECKSUM_UNNECESSARY;
360 ring->hw_csum_rx_good++;
361 }
362
363 /**
364 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
365 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
366 **/
367 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
368 int cleaned_count)
369 {
370 union ixgbe_adv_rx_desc *rx_desc;
371 struct ixgbevf_rx_buffer *bi;
372 unsigned int i = rx_ring->next_to_use;
373
374 while (cleaned_count--) {
375 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
376 bi = &rx_ring->rx_buffer_info[i];
377
378 if (!bi->skb) {
379 struct sk_buff *skb;
380
381 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
382 rx_ring->rx_buf_len);
383 if (!skb)
384 goto no_buffers;
385
386 bi->skb = skb;
387
388 bi->dma = dma_map_single(rx_ring->dev, skb->data,
389 rx_ring->rx_buf_len,
390 DMA_FROM_DEVICE);
391 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
392 dev_kfree_skb(skb);
393 bi->skb = NULL;
394 dev_err(rx_ring->dev, "Rx DMA map failed\n");
395 break;
396 }
397 }
398 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
399
400 i++;
401 if (i == rx_ring->count)
402 i = 0;
403 }
404
405 no_buffers:
406 rx_ring->rx_stats.alloc_rx_buff_failed++;
407 if (rx_ring->next_to_use != i)
408 ixgbevf_release_rx_desc(rx_ring, i);
409 }
410
411 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
412 u32 qmask)
413 {
414 struct ixgbe_hw *hw = &adapter->hw;
415
416 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
417 }
418
419 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
420 struct ixgbevf_ring *rx_ring,
421 int budget)
422 {
423 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
424 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
425 struct sk_buff *skb;
426 unsigned int i;
427 u32 len, staterr;
428 int cleaned_count = 0;
429 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
430
431 i = rx_ring->next_to_clean;
432 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
433 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
434 rx_buffer_info = &rx_ring->rx_buffer_info[i];
435
436 while (staterr & IXGBE_RXD_STAT_DD) {
437 if (!budget)
438 break;
439 budget--;
440
441 rmb(); /* read descriptor and rx_buffer_info after status DD */
442 len = le16_to_cpu(rx_desc->wb.upper.length);
443 skb = rx_buffer_info->skb;
444 prefetch(skb->data - NET_IP_ALIGN);
445 rx_buffer_info->skb = NULL;
446
447 if (rx_buffer_info->dma) {
448 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
449 rx_ring->rx_buf_len,
450 DMA_FROM_DEVICE);
451 rx_buffer_info->dma = 0;
452 skb_put(skb, len);
453 }
454
455 i++;
456 if (i == rx_ring->count)
457 i = 0;
458
459 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
460 prefetch(next_rxd);
461 cleaned_count++;
462
463 next_buffer = &rx_ring->rx_buffer_info[i];
464
465 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
466 skb->next = next_buffer->skb;
467 IXGBE_CB(skb->next)->prev = skb;
468 rx_ring->rx_stats.non_eop_descs++;
469 goto next_desc;
470 }
471
472 /* we should not be chaining buffers, if we did drop the skb */
473 if (IXGBE_CB(skb)->prev) {
474 do {
475 struct sk_buff *this = skb;
476 skb = IXGBE_CB(skb)->prev;
477 dev_kfree_skb(this);
478 } while (skb);
479 goto next_desc;
480 }
481
482 /* ERR_MASK will only have valid bits if EOP set */
483 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
484 dev_kfree_skb_irq(skb);
485 goto next_desc;
486 }
487
488 ixgbevf_rx_checksum(rx_ring, staterr, skb);
489
490 /* probably a little skewed due to removing CRC */
491 total_rx_bytes += skb->len;
492 total_rx_packets++;
493
494 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
495
496 /* Workaround hardware that can't do proper VEPA multicast
497 * source pruning.
498 */
499 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
500 ether_addr_equal(rx_ring->netdev->dev_addr,
501 eth_hdr(skb)->h_source)) {
502 dev_kfree_skb_irq(skb);
503 goto next_desc;
504 }
505
506 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
507
508 next_desc:
509 rx_desc->wb.upper.status_error = 0;
510
511 /* return some buffers to hardware, one at a time is too slow */
512 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
513 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
514 cleaned_count = 0;
515 }
516
517 /* use prefetched values */
518 rx_desc = next_rxd;
519 rx_buffer_info = &rx_ring->rx_buffer_info[i];
520
521 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
522 }
523
524 rx_ring->next_to_clean = i;
525 cleaned_count = ixgbevf_desc_unused(rx_ring);
526
527 if (cleaned_count)
528 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
529
530 u64_stats_update_begin(&rx_ring->syncp);
531 rx_ring->stats.packets += total_rx_packets;
532 rx_ring->stats.bytes += total_rx_bytes;
533 u64_stats_update_end(&rx_ring->syncp);
534 q_vector->rx.total_packets += total_rx_packets;
535 q_vector->rx.total_bytes += total_rx_bytes;
536
537 return total_rx_packets;
538 }
539
540 /**
541 * ixgbevf_poll - NAPI polling calback
542 * @napi: napi struct with our devices info in it
543 * @budget: amount of work driver is allowed to do this pass, in packets
544 *
545 * This function will clean more than one or more rings associated with a
546 * q_vector.
547 **/
548 static int ixgbevf_poll(struct napi_struct *napi, int budget)
549 {
550 struct ixgbevf_q_vector *q_vector =
551 container_of(napi, struct ixgbevf_q_vector, napi);
552 struct ixgbevf_adapter *adapter = q_vector->adapter;
553 struct ixgbevf_ring *ring;
554 int per_ring_budget;
555 bool clean_complete = true;
556
557 ixgbevf_for_each_ring(ring, q_vector->tx)
558 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
559
560 #ifdef CONFIG_NET_RX_BUSY_POLL
561 if (!ixgbevf_qv_lock_napi(q_vector))
562 return budget;
563 #endif
564
565 /* attempt to distribute budget to each queue fairly, but don't allow
566 * the budget to go below 1 because we'll exit polling */
567 if (q_vector->rx.count > 1)
568 per_ring_budget = max(budget/q_vector->rx.count, 1);
569 else
570 per_ring_budget = budget;
571
572 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
573 ixgbevf_for_each_ring(ring, q_vector->rx)
574 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
575 per_ring_budget)
576 < per_ring_budget);
577 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
578
579 #ifdef CONFIG_NET_RX_BUSY_POLL
580 ixgbevf_qv_unlock_napi(q_vector);
581 #endif
582
583 /* If all work not completed, return budget and keep polling */
584 if (!clean_complete)
585 return budget;
586 /* all work done, exit the polling mode */
587 napi_complete(napi);
588 if (adapter->rx_itr_setting & 1)
589 ixgbevf_set_itr(q_vector);
590 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
591 ixgbevf_irq_enable_queues(adapter,
592 1 << q_vector->v_idx);
593
594 return 0;
595 }
596
597 /**
598 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
599 * @q_vector: structure containing interrupt and ring information
600 */
601 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
602 {
603 struct ixgbevf_adapter *adapter = q_vector->adapter;
604 struct ixgbe_hw *hw = &adapter->hw;
605 int v_idx = q_vector->v_idx;
606 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
607
608 /*
609 * set the WDIS bit to not clear the timer bits and cause an
610 * immediate assertion of the interrupt
611 */
612 itr_reg |= IXGBE_EITR_CNT_WDIS;
613
614 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
615 }
616
617 #ifdef CONFIG_NET_RX_BUSY_POLL
618 /* must be called with local_bh_disable()d */
619 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
620 {
621 struct ixgbevf_q_vector *q_vector =
622 container_of(napi, struct ixgbevf_q_vector, napi);
623 struct ixgbevf_adapter *adapter = q_vector->adapter;
624 struct ixgbevf_ring *ring;
625 int found = 0;
626
627 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
628 return LL_FLUSH_FAILED;
629
630 if (!ixgbevf_qv_lock_poll(q_vector))
631 return LL_FLUSH_BUSY;
632
633 ixgbevf_for_each_ring(ring, q_vector->rx) {
634 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
635 #ifdef BP_EXTENDED_STATS
636 if (found)
637 ring->stats.cleaned += found;
638 else
639 ring->stats.misses++;
640 #endif
641 if (found)
642 break;
643 }
644
645 ixgbevf_qv_unlock_poll(q_vector);
646
647 return found;
648 }
649 #endif /* CONFIG_NET_RX_BUSY_POLL */
650
651 /**
652 * ixgbevf_configure_msix - Configure MSI-X hardware
653 * @adapter: board private structure
654 *
655 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
656 * interrupts.
657 **/
658 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
659 {
660 struct ixgbevf_q_vector *q_vector;
661 int q_vectors, v_idx;
662
663 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
664 adapter->eims_enable_mask = 0;
665
666 /*
667 * Populate the IVAR table and set the ITR values to the
668 * corresponding register.
669 */
670 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
671 struct ixgbevf_ring *ring;
672 q_vector = adapter->q_vector[v_idx];
673
674 ixgbevf_for_each_ring(ring, q_vector->rx)
675 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
676
677 ixgbevf_for_each_ring(ring, q_vector->tx)
678 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
679
680 if (q_vector->tx.ring && !q_vector->rx.ring) {
681 /* tx only vector */
682 if (adapter->tx_itr_setting == 1)
683 q_vector->itr = IXGBE_10K_ITR;
684 else
685 q_vector->itr = adapter->tx_itr_setting;
686 } else {
687 /* rx or rx/tx vector */
688 if (adapter->rx_itr_setting == 1)
689 q_vector->itr = IXGBE_20K_ITR;
690 else
691 q_vector->itr = adapter->rx_itr_setting;
692 }
693
694 /* add q_vector eims value to global eims_enable_mask */
695 adapter->eims_enable_mask |= 1 << v_idx;
696
697 ixgbevf_write_eitr(q_vector);
698 }
699
700 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
701 /* setup eims_other and add value to global eims_enable_mask */
702 adapter->eims_other = 1 << v_idx;
703 adapter->eims_enable_mask |= adapter->eims_other;
704 }
705
706 enum latency_range {
707 lowest_latency = 0,
708 low_latency = 1,
709 bulk_latency = 2,
710 latency_invalid = 255
711 };
712
713 /**
714 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
715 * @q_vector: structure containing interrupt and ring information
716 * @ring_container: structure containing ring performance data
717 *
718 * Stores a new ITR value based on packets and byte
719 * counts during the last interrupt. The advantage of per interrupt
720 * computation is faster updates and more accurate ITR for the current
721 * traffic pattern. Constants in this function were computed
722 * based on theoretical maximum wire speed and thresholds were set based
723 * on testing data as well as attempting to minimize response time
724 * while increasing bulk throughput.
725 **/
726 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
727 struct ixgbevf_ring_container *ring_container)
728 {
729 int bytes = ring_container->total_bytes;
730 int packets = ring_container->total_packets;
731 u32 timepassed_us;
732 u64 bytes_perint;
733 u8 itr_setting = ring_container->itr;
734
735 if (packets == 0)
736 return;
737
738 /* simple throttlerate management
739 * 0-20MB/s lowest (100000 ints/s)
740 * 20-100MB/s low (20000 ints/s)
741 * 100-1249MB/s bulk (8000 ints/s)
742 */
743 /* what was last interrupt timeslice? */
744 timepassed_us = q_vector->itr >> 2;
745 bytes_perint = bytes / timepassed_us; /* bytes/usec */
746
747 switch (itr_setting) {
748 case lowest_latency:
749 if (bytes_perint > 10)
750 itr_setting = low_latency;
751 break;
752 case low_latency:
753 if (bytes_perint > 20)
754 itr_setting = bulk_latency;
755 else if (bytes_perint <= 10)
756 itr_setting = lowest_latency;
757 break;
758 case bulk_latency:
759 if (bytes_perint <= 20)
760 itr_setting = low_latency;
761 break;
762 }
763
764 /* clear work counters since we have the values we need */
765 ring_container->total_bytes = 0;
766 ring_container->total_packets = 0;
767
768 /* write updated itr to ring container */
769 ring_container->itr = itr_setting;
770 }
771
772 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
773 {
774 u32 new_itr = q_vector->itr;
775 u8 current_itr;
776
777 ixgbevf_update_itr(q_vector, &q_vector->tx);
778 ixgbevf_update_itr(q_vector, &q_vector->rx);
779
780 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
781
782 switch (current_itr) {
783 /* counts and packets in update_itr are dependent on these numbers */
784 case lowest_latency:
785 new_itr = IXGBE_100K_ITR;
786 break;
787 case low_latency:
788 new_itr = IXGBE_20K_ITR;
789 break;
790 case bulk_latency:
791 default:
792 new_itr = IXGBE_8K_ITR;
793 break;
794 }
795
796 if (new_itr != q_vector->itr) {
797 /* do an exponential smoothing */
798 new_itr = (10 * new_itr * q_vector->itr) /
799 ((9 * new_itr) + q_vector->itr);
800
801 /* save the algorithm value here */
802 q_vector->itr = new_itr;
803
804 ixgbevf_write_eitr(q_vector);
805 }
806 }
807
808 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
809 {
810 struct ixgbevf_adapter *adapter = data;
811 struct ixgbe_hw *hw = &adapter->hw;
812
813 hw->mac.get_link_status = 1;
814
815 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
816 mod_timer(&adapter->watchdog_timer, jiffies);
817
818 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
819
820 return IRQ_HANDLED;
821 }
822
823 /**
824 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
825 * @irq: unused
826 * @data: pointer to our q_vector struct for this interrupt vector
827 **/
828 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
829 {
830 struct ixgbevf_q_vector *q_vector = data;
831
832 /* EIAM disabled interrupts (on this vector) for us */
833 if (q_vector->rx.ring || q_vector->tx.ring)
834 napi_schedule(&q_vector->napi);
835
836 return IRQ_HANDLED;
837 }
838
839 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
840 int r_idx)
841 {
842 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
843
844 a->rx_ring[r_idx]->next = q_vector->rx.ring;
845 q_vector->rx.ring = a->rx_ring[r_idx];
846 q_vector->rx.count++;
847 }
848
849 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
850 int t_idx)
851 {
852 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
853
854 a->tx_ring[t_idx]->next = q_vector->tx.ring;
855 q_vector->tx.ring = a->tx_ring[t_idx];
856 q_vector->tx.count++;
857 }
858
859 /**
860 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
861 * @adapter: board private structure to initialize
862 *
863 * This function maps descriptor rings to the queue-specific vectors
864 * we were allotted through the MSI-X enabling code. Ideally, we'd have
865 * one vector per ring/queue, but on a constrained vector budget, we
866 * group the rings as "efficiently" as possible. You would add new
867 * mapping configurations in here.
868 **/
869 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
870 {
871 int q_vectors;
872 int v_start = 0;
873 int rxr_idx = 0, txr_idx = 0;
874 int rxr_remaining = adapter->num_rx_queues;
875 int txr_remaining = adapter->num_tx_queues;
876 int i, j;
877 int rqpv, tqpv;
878 int err = 0;
879
880 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
881
882 /*
883 * The ideal configuration...
884 * We have enough vectors to map one per queue.
885 */
886 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
887 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
888 map_vector_to_rxq(adapter, v_start, rxr_idx);
889
890 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
891 map_vector_to_txq(adapter, v_start, txr_idx);
892 goto out;
893 }
894
895 /*
896 * If we don't have enough vectors for a 1-to-1
897 * mapping, we'll have to group them so there are
898 * multiple queues per vector.
899 */
900 /* Re-adjusting *qpv takes care of the remainder. */
901 for (i = v_start; i < q_vectors; i++) {
902 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
903 for (j = 0; j < rqpv; j++) {
904 map_vector_to_rxq(adapter, i, rxr_idx);
905 rxr_idx++;
906 rxr_remaining--;
907 }
908 }
909 for (i = v_start; i < q_vectors; i++) {
910 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
911 for (j = 0; j < tqpv; j++) {
912 map_vector_to_txq(adapter, i, txr_idx);
913 txr_idx++;
914 txr_remaining--;
915 }
916 }
917
918 out:
919 return err;
920 }
921
922 /**
923 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
924 * @adapter: board private structure
925 *
926 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
927 * interrupts from the kernel.
928 **/
929 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
930 {
931 struct net_device *netdev = adapter->netdev;
932 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
933 int vector, err;
934 int ri = 0, ti = 0;
935
936 for (vector = 0; vector < q_vectors; vector++) {
937 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
938 struct msix_entry *entry = &adapter->msix_entries[vector];
939
940 if (q_vector->tx.ring && q_vector->rx.ring) {
941 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
942 "%s-%s-%d", netdev->name, "TxRx", ri++);
943 ti++;
944 } else if (q_vector->rx.ring) {
945 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
946 "%s-%s-%d", netdev->name, "rx", ri++);
947 } else if (q_vector->tx.ring) {
948 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
949 "%s-%s-%d", netdev->name, "tx", ti++);
950 } else {
951 /* skip this unused q_vector */
952 continue;
953 }
954 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
955 q_vector->name, q_vector);
956 if (err) {
957 hw_dbg(&adapter->hw,
958 "request_irq failed for MSIX interrupt "
959 "Error: %d\n", err);
960 goto free_queue_irqs;
961 }
962 }
963
964 err = request_irq(adapter->msix_entries[vector].vector,
965 &ixgbevf_msix_other, 0, netdev->name, adapter);
966 if (err) {
967 hw_dbg(&adapter->hw,
968 "request_irq for msix_other failed: %d\n", err);
969 goto free_queue_irqs;
970 }
971
972 return 0;
973
974 free_queue_irqs:
975 while (vector) {
976 vector--;
977 free_irq(adapter->msix_entries[vector].vector,
978 adapter->q_vector[vector]);
979 }
980 /* This failure is non-recoverable - it indicates the system is
981 * out of MSIX vector resources and the VF driver cannot run
982 * without them. Set the number of msix vectors to zero
983 * indicating that not enough can be allocated. The error
984 * will be returned to the user indicating device open failed.
985 * Any further attempts to force the driver to open will also
986 * fail. The only way to recover is to unload the driver and
987 * reload it again. If the system has recovered some MSIX
988 * vectors then it may succeed.
989 */
990 adapter->num_msix_vectors = 0;
991 return err;
992 }
993
994 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
995 {
996 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
997
998 for (i = 0; i < q_vectors; i++) {
999 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1000 q_vector->rx.ring = NULL;
1001 q_vector->tx.ring = NULL;
1002 q_vector->rx.count = 0;
1003 q_vector->tx.count = 0;
1004 }
1005 }
1006
1007 /**
1008 * ixgbevf_request_irq - initialize interrupts
1009 * @adapter: board private structure
1010 *
1011 * Attempts to configure interrupts using the best available
1012 * capabilities of the hardware and kernel.
1013 **/
1014 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1015 {
1016 int err = 0;
1017
1018 err = ixgbevf_request_msix_irqs(adapter);
1019
1020 if (err)
1021 hw_dbg(&adapter->hw,
1022 "request_irq failed, Error %d\n", err);
1023
1024 return err;
1025 }
1026
1027 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1028 {
1029 int i, q_vectors;
1030
1031 q_vectors = adapter->num_msix_vectors;
1032 i = q_vectors - 1;
1033
1034 free_irq(adapter->msix_entries[i].vector, adapter);
1035 i--;
1036
1037 for (; i >= 0; i--) {
1038 /* free only the irqs that were actually requested */
1039 if (!adapter->q_vector[i]->rx.ring &&
1040 !adapter->q_vector[i]->tx.ring)
1041 continue;
1042
1043 free_irq(adapter->msix_entries[i].vector,
1044 adapter->q_vector[i]);
1045 }
1046
1047 ixgbevf_reset_q_vectors(adapter);
1048 }
1049
1050 /**
1051 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1052 * @adapter: board private structure
1053 **/
1054 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1055 {
1056 struct ixgbe_hw *hw = &adapter->hw;
1057 int i;
1058
1059 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1060 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1061 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1062
1063 IXGBE_WRITE_FLUSH(hw);
1064
1065 for (i = 0; i < adapter->num_msix_vectors; i++)
1066 synchronize_irq(adapter->msix_entries[i].vector);
1067 }
1068
1069 /**
1070 * ixgbevf_irq_enable - Enable default interrupt generation settings
1071 * @adapter: board private structure
1072 **/
1073 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1074 {
1075 struct ixgbe_hw *hw = &adapter->hw;
1076
1077 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1078 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1079 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1080 }
1081
1082 /**
1083 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1084 * @adapter: board private structure
1085 * @ring: structure containing ring specific data
1086 *
1087 * Configure the Tx descriptor ring after a reset.
1088 **/
1089 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1090 struct ixgbevf_ring *ring)
1091 {
1092 struct ixgbe_hw *hw = &adapter->hw;
1093 u64 tdba = ring->dma;
1094 int wait_loop = 10;
1095 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1096 u8 reg_idx = ring->reg_idx;
1097
1098 /* disable queue to avoid issues while updating state */
1099 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1100 IXGBE_WRITE_FLUSH(hw);
1101
1102 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1103 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1104 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1105 ring->count * sizeof(union ixgbe_adv_tx_desc));
1106
1107 /* disable head writeback */
1108 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1109 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1110
1111 /* enable relaxed ordering */
1112 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1113 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1114 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1115
1116 /* reset head and tail pointers */
1117 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1118 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1119 ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
1120
1121 /* reset ntu and ntc to place SW in sync with hardwdare */
1122 ring->next_to_clean = 0;
1123 ring->next_to_use = 0;
1124
1125 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1126 * to or less than the number of on chip descriptors, which is
1127 * currently 40.
1128 */
1129 txdctl |= (8 << 16); /* WTHRESH = 8 */
1130
1131 /* Setting PTHRESH to 32 both improves performance */
1132 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1133 32; /* PTHRESH = 32 */
1134
1135 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1136
1137 /* poll to verify queue is enabled */
1138 do {
1139 usleep_range(1000, 2000);
1140 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1141 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1142 if (!wait_loop)
1143 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1144 }
1145
1146 /**
1147 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1148 * @adapter: board private structure
1149 *
1150 * Configure the Tx unit of the MAC after a reset.
1151 **/
1152 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1153 {
1154 u32 i;
1155
1156 /* Setup the HW Tx Head and Tail descriptor pointers */
1157 for (i = 0; i < adapter->num_tx_queues; i++)
1158 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1159 }
1160
1161 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1162
1163 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1164 {
1165 struct ixgbevf_ring *rx_ring;
1166 struct ixgbe_hw *hw = &adapter->hw;
1167 u32 srrctl;
1168
1169 rx_ring = adapter->rx_ring[index];
1170
1171 srrctl = IXGBE_SRRCTL_DROP_EN;
1172
1173 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1174
1175 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1176 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1177
1178 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1179 }
1180
1181 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1182 {
1183 struct ixgbe_hw *hw = &adapter->hw;
1184
1185 /* PSRTYPE must be initialized in 82599 */
1186 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1187 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1188 IXGBE_PSRTYPE_L2HDR;
1189
1190 if (adapter->num_rx_queues > 1)
1191 psrtype |= 1 << 29;
1192
1193 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1194 }
1195
1196 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1197 {
1198 struct ixgbe_hw *hw = &adapter->hw;
1199 struct net_device *netdev = adapter->netdev;
1200 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1201 int i;
1202 u16 rx_buf_len;
1203
1204 /* notify the PF of our intent to use this size of frame */
1205 ixgbevf_rlpml_set_vf(hw, max_frame);
1206
1207 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1208 max_frame += VLAN_HLEN;
1209
1210 /*
1211 * Allocate buffer sizes that fit well into 32K and
1212 * take into account max frame size of 9.5K
1213 */
1214 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1215 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1216 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1217 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1218 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1219 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1220 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1221 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1222 rx_buf_len = IXGBEVF_RXBUFFER_8K;
1223 else
1224 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1225
1226 for (i = 0; i < adapter->num_rx_queues; i++)
1227 adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
1228 }
1229
1230 #define IXGBEVF_MAX_RX_DESC_POLL 10
1231 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1232 struct ixgbevf_ring *ring)
1233 {
1234 struct ixgbe_hw *hw = &adapter->hw;
1235 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1236 u32 rxdctl;
1237 u8 reg_idx = ring->reg_idx;
1238
1239 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1240 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1241
1242 /* write value back with RXDCTL.ENABLE bit cleared */
1243 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1244
1245 /* the hardware may take up to 100us to really disable the rx queue */
1246 do {
1247 udelay(10);
1248 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1249 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1250
1251 if (!wait_loop)
1252 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1253 reg_idx);
1254 }
1255
1256 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1257 struct ixgbevf_ring *ring)
1258 {
1259 struct ixgbe_hw *hw = &adapter->hw;
1260 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1261 u32 rxdctl;
1262 u8 reg_idx = ring->reg_idx;
1263
1264 do {
1265 usleep_range(1000, 2000);
1266 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1267 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1268
1269 if (!wait_loop)
1270 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1271 reg_idx);
1272 }
1273
1274 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1275 struct ixgbevf_ring *ring)
1276 {
1277 struct ixgbe_hw *hw = &adapter->hw;
1278 u64 rdba = ring->dma;
1279 u32 rxdctl;
1280 u8 reg_idx = ring->reg_idx;
1281
1282 /* disable queue to avoid issues while updating state */
1283 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1284 ixgbevf_disable_rx_queue(adapter, ring);
1285
1286 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1287 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1288 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1289 ring->count * sizeof(union ixgbe_adv_rx_desc));
1290
1291 /* enable relaxed ordering */
1292 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1293 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1294
1295 /* reset head and tail pointers */
1296 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1297 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1298 ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
1299
1300 /* reset ntu and ntc to place SW in sync with hardwdare */
1301 ring->next_to_clean = 0;
1302 ring->next_to_use = 0;
1303
1304 ixgbevf_configure_srrctl(adapter, reg_idx);
1305
1306 /* prevent DMA from exceeding buffer space available */
1307 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1308 rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
1309 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1310 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1311
1312 ixgbevf_rx_desc_queue_enable(adapter, ring);
1313 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1314 }
1315
1316 /**
1317 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1318 * @adapter: board private structure
1319 *
1320 * Configure the Rx unit of the MAC after a reset.
1321 **/
1322 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1323 {
1324 int i;
1325
1326 ixgbevf_setup_psrtype(adapter);
1327
1328 /* set_rx_buffer_len must be called before ring initialization */
1329 ixgbevf_set_rx_buffer_len(adapter);
1330
1331 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1332 * the Base and Length of the Rx Descriptor Ring */
1333 for (i = 0; i < adapter->num_rx_queues; i++)
1334 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1335 }
1336
1337 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1338 __be16 proto, u16 vid)
1339 {
1340 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1341 struct ixgbe_hw *hw = &adapter->hw;
1342 int err;
1343
1344 spin_lock_bh(&adapter->mbx_lock);
1345
1346 /* add VID to filter table */
1347 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1348
1349 spin_unlock_bh(&adapter->mbx_lock);
1350
1351 /* translate error return types so error makes sense */
1352 if (err == IXGBE_ERR_MBX)
1353 return -EIO;
1354
1355 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1356 return -EACCES;
1357
1358 set_bit(vid, adapter->active_vlans);
1359
1360 return err;
1361 }
1362
1363 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1364 __be16 proto, u16 vid)
1365 {
1366 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1367 struct ixgbe_hw *hw = &adapter->hw;
1368 int err = -EOPNOTSUPP;
1369
1370 spin_lock_bh(&adapter->mbx_lock);
1371
1372 /* remove VID from filter table */
1373 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1374
1375 spin_unlock_bh(&adapter->mbx_lock);
1376
1377 clear_bit(vid, adapter->active_vlans);
1378
1379 return err;
1380 }
1381
1382 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1383 {
1384 u16 vid;
1385
1386 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1387 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1388 htons(ETH_P_8021Q), vid);
1389 }
1390
1391 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1392 {
1393 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1394 struct ixgbe_hw *hw = &adapter->hw;
1395 int count = 0;
1396
1397 if ((netdev_uc_count(netdev)) > 10) {
1398 pr_err("Too many unicast filters - No Space\n");
1399 return -ENOSPC;
1400 }
1401
1402 if (!netdev_uc_empty(netdev)) {
1403 struct netdev_hw_addr *ha;
1404 netdev_for_each_uc_addr(ha, netdev) {
1405 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1406 udelay(200);
1407 }
1408 } else {
1409 /*
1410 * If the list is empty then send message to PF driver to
1411 * clear all macvlans on this VF.
1412 */
1413 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1414 }
1415
1416 return count;
1417 }
1418
1419 /**
1420 * ixgbevf_set_rx_mode - Multicast and unicast set
1421 * @netdev: network interface device structure
1422 *
1423 * The set_rx_method entry point is called whenever the multicast address
1424 * list, unicast address list or the network interface flags are updated.
1425 * This routine is responsible for configuring the hardware for proper
1426 * multicast mode and configuring requested unicast filters.
1427 **/
1428 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1429 {
1430 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1431 struct ixgbe_hw *hw = &adapter->hw;
1432
1433 spin_lock_bh(&adapter->mbx_lock);
1434
1435 /* reprogram multicast list */
1436 hw->mac.ops.update_mc_addr_list(hw, netdev);
1437
1438 ixgbevf_write_uc_addr_list(netdev);
1439
1440 spin_unlock_bh(&adapter->mbx_lock);
1441 }
1442
1443 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1444 {
1445 int q_idx;
1446 struct ixgbevf_q_vector *q_vector;
1447 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1448
1449 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1450 q_vector = adapter->q_vector[q_idx];
1451 #ifdef CONFIG_NET_RX_BUSY_POLL
1452 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1453 #endif
1454 napi_enable(&q_vector->napi);
1455 }
1456 }
1457
1458 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1459 {
1460 int q_idx;
1461 struct ixgbevf_q_vector *q_vector;
1462 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1463
1464 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1465 q_vector = adapter->q_vector[q_idx];
1466 napi_disable(&q_vector->napi);
1467 #ifdef CONFIG_NET_RX_BUSY_POLL
1468 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1469 pr_info("QV %d locked\n", q_idx);
1470 usleep_range(1000, 20000);
1471 }
1472 #endif /* CONFIG_NET_RX_BUSY_POLL */
1473 }
1474 }
1475
1476 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1477 {
1478 struct ixgbe_hw *hw = &adapter->hw;
1479 unsigned int def_q = 0;
1480 unsigned int num_tcs = 0;
1481 unsigned int num_rx_queues = 1;
1482 int err;
1483
1484 spin_lock_bh(&adapter->mbx_lock);
1485
1486 /* fetch queue configuration from the PF */
1487 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1488
1489 spin_unlock_bh(&adapter->mbx_lock);
1490
1491 if (err)
1492 return err;
1493
1494 if (num_tcs > 1) {
1495 /* update default Tx ring register index */
1496 adapter->tx_ring[0]->reg_idx = def_q;
1497
1498 /* we need as many queues as traffic classes */
1499 num_rx_queues = num_tcs;
1500 }
1501
1502 /* if we have a bad config abort request queue reset */
1503 if (adapter->num_rx_queues != num_rx_queues) {
1504 /* force mailbox timeout to prevent further messages */
1505 hw->mbx.timeout = 0;
1506
1507 /* wait for watchdog to come around and bail us out */
1508 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1509 }
1510
1511 return 0;
1512 }
1513
1514 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1515 {
1516 ixgbevf_configure_dcb(adapter);
1517
1518 ixgbevf_set_rx_mode(adapter->netdev);
1519
1520 ixgbevf_restore_vlan(adapter);
1521
1522 ixgbevf_configure_tx(adapter);
1523 ixgbevf_configure_rx(adapter);
1524 }
1525
1526 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1527 {
1528 /* Only save pre-reset stats if there are some */
1529 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1530 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1531 adapter->stats.base_vfgprc;
1532 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1533 adapter->stats.base_vfgptc;
1534 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1535 adapter->stats.base_vfgorc;
1536 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1537 adapter->stats.base_vfgotc;
1538 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1539 adapter->stats.base_vfmprc;
1540 }
1541 }
1542
1543 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1544 {
1545 struct ixgbe_hw *hw = &adapter->hw;
1546
1547 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1548 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1549 adapter->stats.last_vfgorc |=
1550 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1551 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1552 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1553 adapter->stats.last_vfgotc |=
1554 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1555 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1556
1557 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1558 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1559 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1560 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1561 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1562 }
1563
1564 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1565 {
1566 struct ixgbe_hw *hw = &adapter->hw;
1567 int api[] = { ixgbe_mbox_api_11,
1568 ixgbe_mbox_api_10,
1569 ixgbe_mbox_api_unknown };
1570 int err = 0, idx = 0;
1571
1572 spin_lock_bh(&adapter->mbx_lock);
1573
1574 while (api[idx] != ixgbe_mbox_api_unknown) {
1575 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1576 if (!err)
1577 break;
1578 idx++;
1579 }
1580
1581 spin_unlock_bh(&adapter->mbx_lock);
1582 }
1583
1584 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1585 {
1586 struct net_device *netdev = adapter->netdev;
1587 struct ixgbe_hw *hw = &adapter->hw;
1588
1589 ixgbevf_configure_msix(adapter);
1590
1591 spin_lock_bh(&adapter->mbx_lock);
1592
1593 if (is_valid_ether_addr(hw->mac.addr))
1594 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1595 else
1596 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1597
1598 spin_unlock_bh(&adapter->mbx_lock);
1599
1600 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1601 ixgbevf_napi_enable_all(adapter);
1602
1603 /* enable transmits */
1604 netif_tx_start_all_queues(netdev);
1605
1606 ixgbevf_save_reset_stats(adapter);
1607 ixgbevf_init_last_counter_stats(adapter);
1608
1609 hw->mac.get_link_status = 1;
1610 mod_timer(&adapter->watchdog_timer, jiffies);
1611 }
1612
1613 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1614 {
1615 struct ixgbe_hw *hw = &adapter->hw;
1616
1617 ixgbevf_configure(adapter);
1618
1619 ixgbevf_up_complete(adapter);
1620
1621 /* clear any pending interrupts, may auto mask */
1622 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1623
1624 ixgbevf_irq_enable(adapter);
1625 }
1626
1627 /**
1628 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1629 * @rx_ring: ring to free buffers from
1630 **/
1631 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
1632 {
1633 unsigned long size;
1634 unsigned int i;
1635
1636 if (!rx_ring->rx_buffer_info)
1637 return;
1638
1639 /* Free all the Rx ring sk_buffs */
1640 for (i = 0; i < rx_ring->count; i++) {
1641 struct ixgbevf_rx_buffer *rx_buffer_info;
1642
1643 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1644 if (rx_buffer_info->dma) {
1645 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
1646 rx_ring->rx_buf_len,
1647 DMA_FROM_DEVICE);
1648 rx_buffer_info->dma = 0;
1649 }
1650 if (rx_buffer_info->skb) {
1651 struct sk_buff *skb = rx_buffer_info->skb;
1652 rx_buffer_info->skb = NULL;
1653 do {
1654 struct sk_buff *this = skb;
1655 skb = IXGBE_CB(skb)->prev;
1656 dev_kfree_skb(this);
1657 } while (skb);
1658 }
1659 }
1660
1661 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1662 memset(rx_ring->rx_buffer_info, 0, size);
1663
1664 /* Zero out the descriptor ring */
1665 memset(rx_ring->desc, 0, rx_ring->size);
1666 }
1667
1668 /**
1669 * ixgbevf_clean_tx_ring - Free Tx Buffers
1670 * @tx_ring: ring to be cleaned
1671 **/
1672 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
1673 {
1674 struct ixgbevf_tx_buffer *tx_buffer_info;
1675 unsigned long size;
1676 unsigned int i;
1677
1678 if (!tx_ring->tx_buffer_info)
1679 return;
1680
1681 /* Free all the Tx ring sk_buffs */
1682 for (i = 0; i < tx_ring->count; i++) {
1683 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1684 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1685 }
1686
1687 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1688 memset(tx_ring->tx_buffer_info, 0, size);
1689
1690 memset(tx_ring->desc, 0, tx_ring->size);
1691 }
1692
1693 /**
1694 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1695 * @adapter: board private structure
1696 **/
1697 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1698 {
1699 int i;
1700
1701 for (i = 0; i < adapter->num_rx_queues; i++)
1702 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
1703 }
1704
1705 /**
1706 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1707 * @adapter: board private structure
1708 **/
1709 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1710 {
1711 int i;
1712
1713 for (i = 0; i < adapter->num_tx_queues; i++)
1714 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
1715 }
1716
1717 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1718 {
1719 struct net_device *netdev = adapter->netdev;
1720 struct ixgbe_hw *hw = &adapter->hw;
1721 int i;
1722
1723 /* signal that we are down to the interrupt handler */
1724 set_bit(__IXGBEVF_DOWN, &adapter->state);
1725
1726 /* disable all enabled rx queues */
1727 for (i = 0; i < adapter->num_rx_queues; i++)
1728 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
1729
1730 netif_tx_disable(netdev);
1731
1732 msleep(10);
1733
1734 netif_tx_stop_all_queues(netdev);
1735
1736 ixgbevf_irq_disable(adapter);
1737
1738 ixgbevf_napi_disable_all(adapter);
1739
1740 del_timer_sync(&adapter->watchdog_timer);
1741 /* can't call flush scheduled work here because it can deadlock
1742 * if linkwatch_event tries to acquire the rtnl_lock which we are
1743 * holding */
1744 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1745 msleep(1);
1746
1747 /* disable transmits in the hardware now that interrupts are off */
1748 for (i = 0; i < adapter->num_tx_queues; i++) {
1749 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
1750
1751 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
1752 IXGBE_TXDCTL_SWFLSH);
1753 }
1754
1755 netif_carrier_off(netdev);
1756
1757 if (!pci_channel_offline(adapter->pdev))
1758 ixgbevf_reset(adapter);
1759
1760 ixgbevf_clean_all_tx_rings(adapter);
1761 ixgbevf_clean_all_rx_rings(adapter);
1762 }
1763
1764 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1765 {
1766 WARN_ON(in_interrupt());
1767
1768 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1769 msleep(1);
1770
1771 ixgbevf_down(adapter);
1772 ixgbevf_up(adapter);
1773
1774 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1775 }
1776
1777 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1778 {
1779 struct ixgbe_hw *hw = &adapter->hw;
1780 struct net_device *netdev = adapter->netdev;
1781
1782 if (hw->mac.ops.reset_hw(hw)) {
1783 hw_dbg(hw, "PF still resetting\n");
1784 } else {
1785 hw->mac.ops.init_hw(hw);
1786 ixgbevf_negotiate_api(adapter);
1787 }
1788
1789 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1790 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1791 netdev->addr_len);
1792 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1793 netdev->addr_len);
1794 }
1795 }
1796
1797 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1798 int vectors)
1799 {
1800 int err = 0;
1801 int vector_threshold;
1802
1803 /* We'll want at least 2 (vector_threshold):
1804 * 1) TxQ[0] + RxQ[0] handler
1805 * 2) Other (Link Status Change, etc.)
1806 */
1807 vector_threshold = MIN_MSIX_COUNT;
1808
1809 /* The more we get, the more we will assign to Tx/Rx Cleanup
1810 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1811 * Right now, we simply care about how many we'll get; we'll
1812 * set them up later while requesting irq's.
1813 */
1814 while (vectors >= vector_threshold) {
1815 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1816 vectors);
1817 if (!err || err < 0) /* Success or a nasty failure. */
1818 break;
1819 else /* err == number of vectors we should try again with */
1820 vectors = err;
1821 }
1822
1823 if (vectors < vector_threshold)
1824 err = -ENOMEM;
1825
1826 if (err) {
1827 dev_err(&adapter->pdev->dev,
1828 "Unable to allocate MSI-X interrupts\n");
1829 kfree(adapter->msix_entries);
1830 adapter->msix_entries = NULL;
1831 } else {
1832 /*
1833 * Adjust for only the vectors we'll use, which is minimum
1834 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1835 * vectors we were allocated.
1836 */
1837 adapter->num_msix_vectors = vectors;
1838 }
1839
1840 return err;
1841 }
1842
1843 /**
1844 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1845 * @adapter: board private structure to initialize
1846 *
1847 * This is the top level queue allocation routine. The order here is very
1848 * important, starting with the "most" number of features turned on at once,
1849 * and ending with the smallest set of features. This way large combinations
1850 * can be allocated if they're turned on, and smaller combinations are the
1851 * fallthrough conditions.
1852 *
1853 **/
1854 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1855 {
1856 struct ixgbe_hw *hw = &adapter->hw;
1857 unsigned int def_q = 0;
1858 unsigned int num_tcs = 0;
1859 int err;
1860
1861 /* Start with base case */
1862 adapter->num_rx_queues = 1;
1863 adapter->num_tx_queues = 1;
1864
1865 spin_lock_bh(&adapter->mbx_lock);
1866
1867 /* fetch queue configuration from the PF */
1868 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1869
1870 spin_unlock_bh(&adapter->mbx_lock);
1871
1872 if (err)
1873 return;
1874
1875 /* we need as many queues as traffic classes */
1876 if (num_tcs > 1)
1877 adapter->num_rx_queues = num_tcs;
1878 }
1879
1880 /**
1881 * ixgbevf_alloc_queues - Allocate memory for all rings
1882 * @adapter: board private structure to initialize
1883 *
1884 * We allocate one ring per queue at run-time since we don't know the
1885 * number of queues at compile-time. The polling_netdev array is
1886 * intended for Multiqueue, but should work fine with a single queue.
1887 **/
1888 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1889 {
1890 struct ixgbevf_ring *ring;
1891 int rx = 0, tx = 0;
1892
1893 for (; tx < adapter->num_tx_queues; tx++) {
1894 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1895 if (!ring)
1896 goto err_allocation;
1897
1898 ring->dev = &adapter->pdev->dev;
1899 ring->netdev = adapter->netdev;
1900 ring->count = adapter->tx_ring_count;
1901 ring->queue_index = tx;
1902 ring->reg_idx = tx;
1903
1904 adapter->tx_ring[tx] = ring;
1905 }
1906
1907 for (; rx < adapter->num_rx_queues; rx++) {
1908 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1909 if (!ring)
1910 goto err_allocation;
1911
1912 ring->dev = &adapter->pdev->dev;
1913 ring->netdev = adapter->netdev;
1914
1915 ring->count = adapter->rx_ring_count;
1916 ring->queue_index = rx;
1917 ring->reg_idx = rx;
1918
1919 adapter->rx_ring[rx] = ring;
1920 }
1921
1922 return 0;
1923
1924 err_allocation:
1925 while (tx) {
1926 kfree(adapter->tx_ring[--tx]);
1927 adapter->tx_ring[tx] = NULL;
1928 }
1929
1930 while (rx) {
1931 kfree(adapter->rx_ring[--rx]);
1932 adapter->rx_ring[rx] = NULL;
1933 }
1934 return -ENOMEM;
1935 }
1936
1937 /**
1938 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1939 * @adapter: board private structure to initialize
1940 *
1941 * Attempt to configure the interrupts using the best available
1942 * capabilities of the hardware and the kernel.
1943 **/
1944 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1945 {
1946 struct net_device *netdev = adapter->netdev;
1947 int err = 0;
1948 int vector, v_budget;
1949
1950 /*
1951 * It's easy to be greedy for MSI-X vectors, but it really
1952 * doesn't do us much good if we have a lot more vectors
1953 * than CPU's. So let's be conservative and only ask for
1954 * (roughly) the same number of vectors as there are CPU's.
1955 * The default is to use pairs of vectors.
1956 */
1957 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1958 v_budget = min_t(int, v_budget, num_online_cpus());
1959 v_budget += NON_Q_VECTORS;
1960
1961 /* A failure in MSI-X entry allocation isn't fatal, but it does
1962 * mean we disable MSI-X capabilities of the adapter. */
1963 adapter->msix_entries = kcalloc(v_budget,
1964 sizeof(struct msix_entry), GFP_KERNEL);
1965 if (!adapter->msix_entries) {
1966 err = -ENOMEM;
1967 goto out;
1968 }
1969
1970 for (vector = 0; vector < v_budget; vector++)
1971 adapter->msix_entries[vector].entry = vector;
1972
1973 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1974 if (err)
1975 goto out;
1976
1977 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1978 if (err)
1979 goto out;
1980
1981 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1982
1983 out:
1984 return err;
1985 }
1986
1987 /**
1988 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1989 * @adapter: board private structure to initialize
1990 *
1991 * We allocate one q_vector per queue interrupt. If allocation fails we
1992 * return -ENOMEM.
1993 **/
1994 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1995 {
1996 int q_idx, num_q_vectors;
1997 struct ixgbevf_q_vector *q_vector;
1998
1999 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2000
2001 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2002 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2003 if (!q_vector)
2004 goto err_out;
2005 q_vector->adapter = adapter;
2006 q_vector->v_idx = q_idx;
2007 netif_napi_add(adapter->netdev, &q_vector->napi,
2008 ixgbevf_poll, 64);
2009 #ifdef CONFIG_NET_RX_BUSY_POLL
2010 napi_hash_add(&q_vector->napi);
2011 #endif
2012 adapter->q_vector[q_idx] = q_vector;
2013 }
2014
2015 return 0;
2016
2017 err_out:
2018 while (q_idx) {
2019 q_idx--;
2020 q_vector = adapter->q_vector[q_idx];
2021 #ifdef CONFIG_NET_RX_BUSY_POLL
2022 napi_hash_del(&q_vector->napi);
2023 #endif
2024 netif_napi_del(&q_vector->napi);
2025 kfree(q_vector);
2026 adapter->q_vector[q_idx] = NULL;
2027 }
2028 return -ENOMEM;
2029 }
2030
2031 /**
2032 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2033 * @adapter: board private structure to initialize
2034 *
2035 * This function frees the memory allocated to the q_vectors. In addition if
2036 * NAPI is enabled it will delete any references to the NAPI struct prior
2037 * to freeing the q_vector.
2038 **/
2039 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2040 {
2041 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2042
2043 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2044 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2045
2046 adapter->q_vector[q_idx] = NULL;
2047 #ifdef CONFIG_NET_RX_BUSY_POLL
2048 napi_hash_del(&q_vector->napi);
2049 #endif
2050 netif_napi_del(&q_vector->napi);
2051 kfree(q_vector);
2052 }
2053 }
2054
2055 /**
2056 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2057 * @adapter: board private structure
2058 *
2059 **/
2060 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2061 {
2062 pci_disable_msix(adapter->pdev);
2063 kfree(adapter->msix_entries);
2064 adapter->msix_entries = NULL;
2065 }
2066
2067 /**
2068 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2069 * @adapter: board private structure to initialize
2070 *
2071 **/
2072 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2073 {
2074 int err;
2075
2076 /* Number of supported queues */
2077 ixgbevf_set_num_queues(adapter);
2078
2079 err = ixgbevf_set_interrupt_capability(adapter);
2080 if (err) {
2081 hw_dbg(&adapter->hw,
2082 "Unable to setup interrupt capabilities\n");
2083 goto err_set_interrupt;
2084 }
2085
2086 err = ixgbevf_alloc_q_vectors(adapter);
2087 if (err) {
2088 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2089 "vectors\n");
2090 goto err_alloc_q_vectors;
2091 }
2092
2093 err = ixgbevf_alloc_queues(adapter);
2094 if (err) {
2095 pr_err("Unable to allocate memory for queues\n");
2096 goto err_alloc_queues;
2097 }
2098
2099 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2100 "Tx Queue count = %u\n",
2101 (adapter->num_rx_queues > 1) ? "Enabled" :
2102 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2103
2104 set_bit(__IXGBEVF_DOWN, &adapter->state);
2105
2106 return 0;
2107 err_alloc_queues:
2108 ixgbevf_free_q_vectors(adapter);
2109 err_alloc_q_vectors:
2110 ixgbevf_reset_interrupt_capability(adapter);
2111 err_set_interrupt:
2112 return err;
2113 }
2114
2115 /**
2116 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2117 * @adapter: board private structure to clear interrupt scheme on
2118 *
2119 * We go through and clear interrupt specific resources and reset the structure
2120 * to pre-load conditions
2121 **/
2122 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2123 {
2124 int i;
2125
2126 for (i = 0; i < adapter->num_tx_queues; i++) {
2127 kfree(adapter->tx_ring[i]);
2128 adapter->tx_ring[i] = NULL;
2129 }
2130 for (i = 0; i < adapter->num_rx_queues; i++) {
2131 kfree(adapter->rx_ring[i]);
2132 adapter->rx_ring[i] = NULL;
2133 }
2134
2135 adapter->num_tx_queues = 0;
2136 adapter->num_rx_queues = 0;
2137
2138 ixgbevf_free_q_vectors(adapter);
2139 ixgbevf_reset_interrupt_capability(adapter);
2140 }
2141
2142 /**
2143 * ixgbevf_sw_init - Initialize general software structures
2144 * (struct ixgbevf_adapter)
2145 * @adapter: board private structure to initialize
2146 *
2147 * ixgbevf_sw_init initializes the Adapter private data structure.
2148 * Fields are initialized based on PCI device information and
2149 * OS network device settings (MTU size).
2150 **/
2151 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2152 {
2153 struct ixgbe_hw *hw = &adapter->hw;
2154 struct pci_dev *pdev = adapter->pdev;
2155 struct net_device *netdev = adapter->netdev;
2156 int err;
2157
2158 /* PCI config space info */
2159
2160 hw->vendor_id = pdev->vendor;
2161 hw->device_id = pdev->device;
2162 hw->revision_id = pdev->revision;
2163 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2164 hw->subsystem_device_id = pdev->subsystem_device;
2165
2166 hw->mbx.ops.init_params(hw);
2167
2168 /* assume legacy case in which PF would only give VF 2 queues */
2169 hw->mac.max_tx_queues = 2;
2170 hw->mac.max_rx_queues = 2;
2171
2172 /* lock to protect mailbox accesses */
2173 spin_lock_init(&adapter->mbx_lock);
2174
2175 err = hw->mac.ops.reset_hw(hw);
2176 if (err) {
2177 dev_info(&pdev->dev,
2178 "PF still in reset state. Is the PF interface up?\n");
2179 } else {
2180 err = hw->mac.ops.init_hw(hw);
2181 if (err) {
2182 pr_err("init_shared_code failed: %d\n", err);
2183 goto out;
2184 }
2185 ixgbevf_negotiate_api(adapter);
2186 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2187 if (err)
2188 dev_info(&pdev->dev, "Error reading MAC address\n");
2189 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2190 dev_info(&pdev->dev,
2191 "MAC address not assigned by administrator.\n");
2192 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2193 }
2194
2195 if (!is_valid_ether_addr(netdev->dev_addr)) {
2196 dev_info(&pdev->dev, "Assigning random MAC address\n");
2197 eth_hw_addr_random(netdev);
2198 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2199 }
2200
2201 /* Enable dynamic interrupt throttling rates */
2202 adapter->rx_itr_setting = 1;
2203 adapter->tx_itr_setting = 1;
2204
2205 /* set default ring sizes */
2206 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2207 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2208
2209 set_bit(__IXGBEVF_DOWN, &adapter->state);
2210 return 0;
2211
2212 out:
2213 return err;
2214 }
2215
2216 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2217 { \
2218 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2219 if (current_counter < last_counter) \
2220 counter += 0x100000000LL; \
2221 last_counter = current_counter; \
2222 counter &= 0xFFFFFFFF00000000LL; \
2223 counter |= current_counter; \
2224 }
2225
2226 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2227 { \
2228 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2229 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2230 u64 current_counter = (current_counter_msb << 32) | \
2231 current_counter_lsb; \
2232 if (current_counter < last_counter) \
2233 counter += 0x1000000000LL; \
2234 last_counter = current_counter; \
2235 counter &= 0xFFFFFFF000000000LL; \
2236 counter |= current_counter; \
2237 }
2238 /**
2239 * ixgbevf_update_stats - Update the board statistics counters.
2240 * @adapter: board private structure
2241 **/
2242 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2243 {
2244 struct ixgbe_hw *hw = &adapter->hw;
2245 int i;
2246
2247 if (!adapter->link_up)
2248 return;
2249
2250 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2251 adapter->stats.vfgprc);
2252 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2253 adapter->stats.vfgptc);
2254 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2255 adapter->stats.last_vfgorc,
2256 adapter->stats.vfgorc);
2257 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2258 adapter->stats.last_vfgotc,
2259 adapter->stats.vfgotc);
2260 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2261 adapter->stats.vfmprc);
2262
2263 for (i = 0; i < adapter->num_rx_queues; i++) {
2264 adapter->hw_csum_rx_error +=
2265 adapter->rx_ring[i]->hw_csum_rx_error;
2266 adapter->hw_csum_rx_good +=
2267 adapter->rx_ring[i]->hw_csum_rx_good;
2268 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2269 adapter->rx_ring[i]->hw_csum_rx_good = 0;
2270 }
2271 }
2272
2273 /**
2274 * ixgbevf_watchdog - Timer Call-back
2275 * @data: pointer to adapter cast into an unsigned long
2276 **/
2277 static void ixgbevf_watchdog(unsigned long data)
2278 {
2279 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2280 struct ixgbe_hw *hw = &adapter->hw;
2281 u32 eics = 0;
2282 int i;
2283
2284 /*
2285 * Do the watchdog outside of interrupt context due to the lovely
2286 * delays that some of the newer hardware requires
2287 */
2288
2289 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2290 goto watchdog_short_circuit;
2291
2292 /* get one bit for every active tx/rx interrupt vector */
2293 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2294 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2295 if (qv->rx.ring || qv->tx.ring)
2296 eics |= 1 << i;
2297 }
2298
2299 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2300
2301 watchdog_short_circuit:
2302 schedule_work(&adapter->watchdog_task);
2303 }
2304
2305 /**
2306 * ixgbevf_tx_timeout - Respond to a Tx Hang
2307 * @netdev: network interface device structure
2308 **/
2309 static void ixgbevf_tx_timeout(struct net_device *netdev)
2310 {
2311 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2312
2313 /* Do the reset outside of interrupt context */
2314 schedule_work(&adapter->reset_task);
2315 }
2316
2317 static void ixgbevf_reset_task(struct work_struct *work)
2318 {
2319 struct ixgbevf_adapter *adapter;
2320 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2321
2322 /* If we're already down or resetting, just bail */
2323 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2324 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2325 return;
2326
2327 adapter->tx_timeout_count++;
2328
2329 ixgbevf_reinit_locked(adapter);
2330 }
2331
2332 /**
2333 * ixgbevf_watchdog_task - worker thread to bring link up
2334 * @work: pointer to work_struct containing our data
2335 **/
2336 static void ixgbevf_watchdog_task(struct work_struct *work)
2337 {
2338 struct ixgbevf_adapter *adapter = container_of(work,
2339 struct ixgbevf_adapter,
2340 watchdog_task);
2341 struct net_device *netdev = adapter->netdev;
2342 struct ixgbe_hw *hw = &adapter->hw;
2343 u32 link_speed = adapter->link_speed;
2344 bool link_up = adapter->link_up;
2345 s32 need_reset;
2346
2347 ixgbevf_queue_reset_subtask(adapter);
2348
2349 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2350
2351 /*
2352 * Always check the link on the watchdog because we have
2353 * no LSC interrupt
2354 */
2355 spin_lock_bh(&adapter->mbx_lock);
2356
2357 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2358
2359 spin_unlock_bh(&adapter->mbx_lock);
2360
2361 if (need_reset) {
2362 adapter->link_up = link_up;
2363 adapter->link_speed = link_speed;
2364 netif_carrier_off(netdev);
2365 netif_tx_stop_all_queues(netdev);
2366 schedule_work(&adapter->reset_task);
2367 goto pf_has_reset;
2368 }
2369 adapter->link_up = link_up;
2370 adapter->link_speed = link_speed;
2371
2372 if (link_up) {
2373 if (!netif_carrier_ok(netdev)) {
2374 char *link_speed_string;
2375 switch (link_speed) {
2376 case IXGBE_LINK_SPEED_10GB_FULL:
2377 link_speed_string = "10 Gbps";
2378 break;
2379 case IXGBE_LINK_SPEED_1GB_FULL:
2380 link_speed_string = "1 Gbps";
2381 break;
2382 case IXGBE_LINK_SPEED_100_FULL:
2383 link_speed_string = "100 Mbps";
2384 break;
2385 default:
2386 link_speed_string = "unknown speed";
2387 break;
2388 }
2389 dev_info(&adapter->pdev->dev,
2390 "NIC Link is Up, %s\n", link_speed_string);
2391 netif_carrier_on(netdev);
2392 netif_tx_wake_all_queues(netdev);
2393 }
2394 } else {
2395 adapter->link_up = false;
2396 adapter->link_speed = 0;
2397 if (netif_carrier_ok(netdev)) {
2398 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2399 netif_carrier_off(netdev);
2400 netif_tx_stop_all_queues(netdev);
2401 }
2402 }
2403
2404 ixgbevf_update_stats(adapter);
2405
2406 pf_has_reset:
2407 /* Reset the timer */
2408 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2409 mod_timer(&adapter->watchdog_timer,
2410 round_jiffies(jiffies + (2 * HZ)));
2411
2412 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2413 }
2414
2415 /**
2416 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2417 * @tx_ring: Tx descriptor ring for a specific queue
2418 *
2419 * Free all transmit software resources
2420 **/
2421 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2422 {
2423 ixgbevf_clean_tx_ring(tx_ring);
2424
2425 vfree(tx_ring->tx_buffer_info);
2426 tx_ring->tx_buffer_info = NULL;
2427
2428 /* if not set, then don't free */
2429 if (!tx_ring->desc)
2430 return;
2431
2432 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2433 tx_ring->dma);
2434
2435 tx_ring->desc = NULL;
2436 }
2437
2438 /**
2439 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2440 * @adapter: board private structure
2441 *
2442 * Free all transmit software resources
2443 **/
2444 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2445 {
2446 int i;
2447
2448 for (i = 0; i < adapter->num_tx_queues; i++)
2449 if (adapter->tx_ring[i]->desc)
2450 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2451 }
2452
2453 /**
2454 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2455 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2456 *
2457 * Return 0 on success, negative on failure
2458 **/
2459 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2460 {
2461 int size;
2462
2463 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2464 tx_ring->tx_buffer_info = vzalloc(size);
2465 if (!tx_ring->tx_buffer_info)
2466 goto err;
2467
2468 /* round up to nearest 4K */
2469 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2470 tx_ring->size = ALIGN(tx_ring->size, 4096);
2471
2472 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2473 &tx_ring->dma, GFP_KERNEL);
2474 if (!tx_ring->desc)
2475 goto err;
2476
2477 return 0;
2478
2479 err:
2480 vfree(tx_ring->tx_buffer_info);
2481 tx_ring->tx_buffer_info = NULL;
2482 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2483 "descriptor ring\n");
2484 return -ENOMEM;
2485 }
2486
2487 /**
2488 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2489 * @adapter: board private structure
2490 *
2491 * If this function returns with an error, then it's possible one or
2492 * more of the rings is populated (while the rest are not). It is the
2493 * callers duty to clean those orphaned rings.
2494 *
2495 * Return 0 on success, negative on failure
2496 **/
2497 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2498 {
2499 int i, err = 0;
2500
2501 for (i = 0; i < adapter->num_tx_queues; i++) {
2502 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
2503 if (!err)
2504 continue;
2505 hw_dbg(&adapter->hw,
2506 "Allocation for Tx Queue %u failed\n", i);
2507 break;
2508 }
2509
2510 return err;
2511 }
2512
2513 /**
2514 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2515 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2516 *
2517 * Returns 0 on success, negative on failure
2518 **/
2519 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
2520 {
2521 int size;
2522
2523 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2524 rx_ring->rx_buffer_info = vzalloc(size);
2525 if (!rx_ring->rx_buffer_info)
2526 goto err;
2527
2528 /* Round up to nearest 4K */
2529 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2530 rx_ring->size = ALIGN(rx_ring->size, 4096);
2531
2532 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2533 &rx_ring->dma, GFP_KERNEL);
2534
2535 if (!rx_ring->desc)
2536 goto err;
2537
2538 return 0;
2539 err:
2540 vfree(rx_ring->rx_buffer_info);
2541 rx_ring->rx_buffer_info = NULL;
2542 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
2543 return -ENOMEM;
2544 }
2545
2546 /**
2547 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2548 * @adapter: board private structure
2549 *
2550 * If this function returns with an error, then it's possible one or
2551 * more of the rings is populated (while the rest are not). It is the
2552 * callers duty to clean those orphaned rings.
2553 *
2554 * Return 0 on success, negative on failure
2555 **/
2556 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2557 {
2558 int i, err = 0;
2559
2560 for (i = 0; i < adapter->num_rx_queues; i++) {
2561 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
2562 if (!err)
2563 continue;
2564 hw_dbg(&adapter->hw,
2565 "Allocation for Rx Queue %u failed\n", i);
2566 break;
2567 }
2568 return err;
2569 }
2570
2571 /**
2572 * ixgbevf_free_rx_resources - Free Rx Resources
2573 * @rx_ring: ring to clean the resources from
2574 *
2575 * Free all receive software resources
2576 **/
2577 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
2578 {
2579 ixgbevf_clean_rx_ring(rx_ring);
2580
2581 vfree(rx_ring->rx_buffer_info);
2582 rx_ring->rx_buffer_info = NULL;
2583
2584 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2585 rx_ring->dma);
2586
2587 rx_ring->desc = NULL;
2588 }
2589
2590 /**
2591 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2592 * @adapter: board private structure
2593 *
2594 * Free all receive software resources
2595 **/
2596 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2597 {
2598 int i;
2599
2600 for (i = 0; i < adapter->num_rx_queues; i++)
2601 if (adapter->rx_ring[i]->desc)
2602 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
2603 }
2604
2605 /**
2606 * ixgbevf_open - Called when a network interface is made active
2607 * @netdev: network interface device structure
2608 *
2609 * Returns 0 on success, negative value on failure
2610 *
2611 * The open entry point is called when a network interface is made
2612 * active by the system (IFF_UP). At this point all resources needed
2613 * for transmit and receive operations are allocated, the interrupt
2614 * handler is registered with the OS, the watchdog timer is started,
2615 * and the stack is notified that the interface is ready.
2616 **/
2617 static int ixgbevf_open(struct net_device *netdev)
2618 {
2619 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2620 struct ixgbe_hw *hw = &adapter->hw;
2621 int err;
2622
2623 /* A previous failure to open the device because of a lack of
2624 * available MSIX vector resources may have reset the number
2625 * of msix vectors variable to zero. The only way to recover
2626 * is to unload/reload the driver and hope that the system has
2627 * been able to recover some MSIX vector resources.
2628 */
2629 if (!adapter->num_msix_vectors)
2630 return -ENOMEM;
2631
2632 /* disallow open during test */
2633 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2634 return -EBUSY;
2635
2636 if (hw->adapter_stopped) {
2637 ixgbevf_reset(adapter);
2638 /* if adapter is still stopped then PF isn't up and
2639 * the vf can't start. */
2640 if (hw->adapter_stopped) {
2641 err = IXGBE_ERR_MBX;
2642 pr_err("Unable to start - perhaps the PF Driver isn't "
2643 "up yet\n");
2644 goto err_setup_reset;
2645 }
2646 }
2647
2648 /* allocate transmit descriptors */
2649 err = ixgbevf_setup_all_tx_resources(adapter);
2650 if (err)
2651 goto err_setup_tx;
2652
2653 /* allocate receive descriptors */
2654 err = ixgbevf_setup_all_rx_resources(adapter);
2655 if (err)
2656 goto err_setup_rx;
2657
2658 ixgbevf_configure(adapter);
2659
2660 /*
2661 * Map the Tx/Rx rings to the vectors we were allotted.
2662 * if request_irq will be called in this function map_rings
2663 * must be called *before* up_complete
2664 */
2665 ixgbevf_map_rings_to_vectors(adapter);
2666
2667 ixgbevf_up_complete(adapter);
2668
2669 /* clear any pending interrupts, may auto mask */
2670 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2671 err = ixgbevf_request_irq(adapter);
2672 if (err)
2673 goto err_req_irq;
2674
2675 ixgbevf_irq_enable(adapter);
2676
2677 return 0;
2678
2679 err_req_irq:
2680 ixgbevf_down(adapter);
2681 err_setup_rx:
2682 ixgbevf_free_all_rx_resources(adapter);
2683 err_setup_tx:
2684 ixgbevf_free_all_tx_resources(adapter);
2685 ixgbevf_reset(adapter);
2686
2687 err_setup_reset:
2688
2689 return err;
2690 }
2691
2692 /**
2693 * ixgbevf_close - Disables a network interface
2694 * @netdev: network interface device structure
2695 *
2696 * Returns 0, this is not allowed to fail
2697 *
2698 * The close entry point is called when an interface is de-activated
2699 * by the OS. The hardware is still under the drivers control, but
2700 * needs to be disabled. A global MAC reset is issued to stop the
2701 * hardware, and all transmit and receive resources are freed.
2702 **/
2703 static int ixgbevf_close(struct net_device *netdev)
2704 {
2705 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2706
2707 ixgbevf_down(adapter);
2708 ixgbevf_free_irq(adapter);
2709
2710 ixgbevf_free_all_tx_resources(adapter);
2711 ixgbevf_free_all_rx_resources(adapter);
2712
2713 return 0;
2714 }
2715
2716 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
2717 {
2718 struct net_device *dev = adapter->netdev;
2719
2720 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
2721 return;
2722
2723 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
2724
2725 /* if interface is down do nothing */
2726 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2727 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2728 return;
2729
2730 /* Hardware has to reinitialize queues and interrupts to
2731 * match packet buffer alignment. Unfortunately, the
2732 * hardware is not flexible enough to do this dynamically.
2733 */
2734 if (netif_running(dev))
2735 ixgbevf_close(dev);
2736
2737 ixgbevf_clear_interrupt_scheme(adapter);
2738 ixgbevf_init_interrupt_scheme(adapter);
2739
2740 if (netif_running(dev))
2741 ixgbevf_open(dev);
2742 }
2743
2744 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2745 u32 vlan_macip_lens, u32 type_tucmd,
2746 u32 mss_l4len_idx)
2747 {
2748 struct ixgbe_adv_tx_context_desc *context_desc;
2749 u16 i = tx_ring->next_to_use;
2750
2751 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2752
2753 i++;
2754 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2755
2756 /* set bits to identify this as an advanced context descriptor */
2757 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2758
2759 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2760 context_desc->seqnum_seed = 0;
2761 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2762 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2763 }
2764
2765 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2766 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2767 {
2768 u32 vlan_macip_lens, type_tucmd;
2769 u32 mss_l4len_idx, l4len;
2770
2771 if (!skb_is_gso(skb))
2772 return 0;
2773
2774 if (skb_header_cloned(skb)) {
2775 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2776 if (err)
2777 return err;
2778 }
2779
2780 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2781 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2782
2783 if (skb->protocol == htons(ETH_P_IP)) {
2784 struct iphdr *iph = ip_hdr(skb);
2785 iph->tot_len = 0;
2786 iph->check = 0;
2787 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2788 iph->daddr, 0,
2789 IPPROTO_TCP,
2790 0);
2791 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2792 } else if (skb_is_gso_v6(skb)) {
2793 ipv6_hdr(skb)->payload_len = 0;
2794 tcp_hdr(skb)->check =
2795 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2796 &ipv6_hdr(skb)->daddr,
2797 0, IPPROTO_TCP, 0);
2798 }
2799
2800 /* compute header lengths */
2801 l4len = tcp_hdrlen(skb);
2802 *hdr_len += l4len;
2803 *hdr_len = skb_transport_offset(skb) + l4len;
2804
2805 /* mss_l4len_id: use 1 as index for TSO */
2806 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2807 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2808 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2809
2810 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2811 vlan_macip_lens = skb_network_header_len(skb);
2812 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2813 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2814
2815 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2816 type_tucmd, mss_l4len_idx);
2817
2818 return 1;
2819 }
2820
2821 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2822 struct sk_buff *skb, u32 tx_flags)
2823 {
2824 u32 vlan_macip_lens = 0;
2825 u32 mss_l4len_idx = 0;
2826 u32 type_tucmd = 0;
2827
2828 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2829 u8 l4_hdr = 0;
2830 switch (skb->protocol) {
2831 case __constant_htons(ETH_P_IP):
2832 vlan_macip_lens |= skb_network_header_len(skb);
2833 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2834 l4_hdr = ip_hdr(skb)->protocol;
2835 break;
2836 case __constant_htons(ETH_P_IPV6):
2837 vlan_macip_lens |= skb_network_header_len(skb);
2838 l4_hdr = ipv6_hdr(skb)->nexthdr;
2839 break;
2840 default:
2841 if (unlikely(net_ratelimit())) {
2842 dev_warn(tx_ring->dev,
2843 "partial checksum but proto=%x!\n",
2844 skb->protocol);
2845 }
2846 break;
2847 }
2848
2849 switch (l4_hdr) {
2850 case IPPROTO_TCP:
2851 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2852 mss_l4len_idx = tcp_hdrlen(skb) <<
2853 IXGBE_ADVTXD_L4LEN_SHIFT;
2854 break;
2855 case IPPROTO_SCTP:
2856 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2857 mss_l4len_idx = sizeof(struct sctphdr) <<
2858 IXGBE_ADVTXD_L4LEN_SHIFT;
2859 break;
2860 case IPPROTO_UDP:
2861 mss_l4len_idx = sizeof(struct udphdr) <<
2862 IXGBE_ADVTXD_L4LEN_SHIFT;
2863 break;
2864 default:
2865 if (unlikely(net_ratelimit())) {
2866 dev_warn(tx_ring->dev,
2867 "partial checksum but l4 proto=%x!\n",
2868 l4_hdr);
2869 }
2870 break;
2871 }
2872 }
2873
2874 /* vlan_macip_lens: MACLEN, VLAN tag */
2875 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2876 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2877
2878 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2879 type_tucmd, mss_l4len_idx);
2880
2881 return (skb->ip_summed == CHECKSUM_PARTIAL);
2882 }
2883
2884 static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2885 struct sk_buff *skb, u32 tx_flags)
2886 {
2887 struct ixgbevf_tx_buffer *tx_buffer_info;
2888 unsigned int len;
2889 unsigned int total = skb->len;
2890 unsigned int offset = 0, size;
2891 int count = 0;
2892 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2893 unsigned int f;
2894 int i;
2895
2896 i = tx_ring->next_to_use;
2897
2898 len = min(skb_headlen(skb), total);
2899 while (len) {
2900 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2901 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2902
2903 tx_buffer_info->length = size;
2904 tx_buffer_info->mapped_as_page = false;
2905 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2906 skb->data + offset,
2907 size, DMA_TO_DEVICE);
2908 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2909 goto dma_error;
2910
2911 len -= size;
2912 total -= size;
2913 offset += size;
2914 count++;
2915 i++;
2916 if (i == tx_ring->count)
2917 i = 0;
2918 }
2919
2920 for (f = 0; f < nr_frags; f++) {
2921 const struct skb_frag_struct *frag;
2922
2923 frag = &skb_shinfo(skb)->frags[f];
2924 len = min((unsigned int)skb_frag_size(frag), total);
2925 offset = 0;
2926
2927 while (len) {
2928 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2929 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2930
2931 tx_buffer_info->length = size;
2932 tx_buffer_info->dma =
2933 skb_frag_dma_map(tx_ring->dev, frag,
2934 offset, size, DMA_TO_DEVICE);
2935 if (dma_mapping_error(tx_ring->dev,
2936 tx_buffer_info->dma))
2937 goto dma_error;
2938 tx_buffer_info->mapped_as_page = true;
2939
2940 len -= size;
2941 total -= size;
2942 offset += size;
2943 count++;
2944 i++;
2945 if (i == tx_ring->count)
2946 i = 0;
2947 }
2948 if (total == 0)
2949 break;
2950 }
2951
2952 if (i == 0)
2953 i = tx_ring->count - 1;
2954 else
2955 i = i - 1;
2956 tx_ring->tx_buffer_info[i].skb = skb;
2957
2958 return count;
2959
2960 dma_error:
2961 dev_err(tx_ring->dev, "TX DMA map failed\n");
2962
2963 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2964 tx_buffer_info->dma = 0;
2965 count--;
2966
2967 /* clear timestamp and dma mappings for remaining portion of packet */
2968 while (count >= 0) {
2969 count--;
2970 i--;
2971 if (i < 0)
2972 i += tx_ring->count;
2973 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2974 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2975 }
2976
2977 return count;
2978 }
2979
2980 static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2981 int count, unsigned int first, u32 paylen,
2982 u8 hdr_len)
2983 {
2984 union ixgbe_adv_tx_desc *tx_desc = NULL;
2985 struct ixgbevf_tx_buffer *tx_buffer_info;
2986 u32 olinfo_status = 0, cmd_type_len = 0;
2987 unsigned int i;
2988
2989 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2990
2991 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2992
2993 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2994
2995 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2996 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2997
2998 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2999 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
3000
3001 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3002 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3003
3004 /* use index 1 context for tso */
3005 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3006 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3007 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
3008 }
3009
3010 /*
3011 * Check Context must be set if Tx switch is enabled, which it
3012 * always is for case where virtual functions are running
3013 */
3014 olinfo_status |= IXGBE_ADVTXD_CC;
3015
3016 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3017
3018 i = tx_ring->next_to_use;
3019 while (count--) {
3020 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3021 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3022 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3023 tx_desc->read.cmd_type_len =
3024 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3025 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3026 i++;
3027 if (i == tx_ring->count)
3028 i = 0;
3029 }
3030
3031 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3032
3033 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
3034
3035 /* Force memory writes to complete before letting h/w
3036 * know there are new descriptors to fetch. (Only
3037 * applicable for weak-ordered memory model archs,
3038 * such as IA-64).
3039 */
3040 wmb();
3041
3042 tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
3043 tx_ring->next_to_use = i;
3044 }
3045
3046 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3047 {
3048 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3049 /* Herbert's original patch had:
3050 * smp_mb__after_netif_stop_queue();
3051 * but since that doesn't exist yet, just open code it. */
3052 smp_mb();
3053
3054 /* We need to check again in a case another CPU has just
3055 * made room available. */
3056 if (likely(ixgbevf_desc_unused(tx_ring) < size))
3057 return -EBUSY;
3058
3059 /* A reprieve! - use start_queue because it doesn't call schedule */
3060 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3061 ++tx_ring->tx_stats.restart_queue;
3062
3063 return 0;
3064 }
3065
3066 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3067 {
3068 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3069 return 0;
3070 return __ixgbevf_maybe_stop_tx(tx_ring, size);
3071 }
3072
3073 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3074 {
3075 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3076 struct ixgbevf_ring *tx_ring;
3077 unsigned int first;
3078 unsigned int tx_flags = 0;
3079 u8 hdr_len = 0;
3080 int r_idx = 0, tso;
3081 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3082 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3083 unsigned short f;
3084 #endif
3085 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3086 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3087 dev_kfree_skb(skb);
3088 return NETDEV_TX_OK;
3089 }
3090
3091 tx_ring = adapter->tx_ring[r_idx];
3092
3093 /*
3094 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3095 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3096 * + 2 desc gap to keep tail from touching head,
3097 * + 1 desc for context descriptor,
3098 * otherwise try next time
3099 */
3100 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3101 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3102 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3103 #else
3104 count += skb_shinfo(skb)->nr_frags;
3105 #endif
3106 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3107 tx_ring->tx_stats.tx_busy++;
3108 return NETDEV_TX_BUSY;
3109 }
3110
3111 if (vlan_tx_tag_present(skb)) {
3112 tx_flags |= vlan_tx_tag_get(skb);
3113 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3114 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3115 }
3116
3117 first = tx_ring->next_to_use;
3118
3119 if (skb->protocol == htons(ETH_P_IP))
3120 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3121 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
3122 if (tso < 0) {
3123 dev_kfree_skb_any(skb);
3124 return NETDEV_TX_OK;
3125 }
3126
3127 if (tso)
3128 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3129 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3130 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3131
3132 ixgbevf_tx_queue(tx_ring, tx_flags,
3133 ixgbevf_tx_map(tx_ring, skb, tx_flags),
3134 first, skb->len, hdr_len);
3135
3136 writel(tx_ring->next_to_use, tx_ring->tail);
3137
3138 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3139
3140 return NETDEV_TX_OK;
3141 }
3142
3143 /**
3144 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3145 * @netdev: network interface device structure
3146 * @p: pointer to an address structure
3147 *
3148 * Returns 0 on success, negative on failure
3149 **/
3150 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3151 {
3152 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3153 struct ixgbe_hw *hw = &adapter->hw;
3154 struct sockaddr *addr = p;
3155
3156 if (!is_valid_ether_addr(addr->sa_data))
3157 return -EADDRNOTAVAIL;
3158
3159 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3160 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3161
3162 spin_lock_bh(&adapter->mbx_lock);
3163
3164 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3165
3166 spin_unlock_bh(&adapter->mbx_lock);
3167
3168 return 0;
3169 }
3170
3171 /**
3172 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3173 * @netdev: network interface device structure
3174 * @new_mtu: new value for maximum frame size
3175 *
3176 * Returns 0 on success, negative on failure
3177 **/
3178 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3179 {
3180 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3181 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3182 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3183
3184 switch (adapter->hw.api_version) {
3185 case ixgbe_mbox_api_11:
3186 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3187 break;
3188 default:
3189 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3190 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3191 break;
3192 }
3193
3194 /* MTU < 68 is an error and causes problems on some kernels */
3195 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3196 return -EINVAL;
3197
3198 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3199 netdev->mtu, new_mtu);
3200 /* must set new MTU before calling down or up */
3201 netdev->mtu = new_mtu;
3202
3203 if (netif_running(netdev))
3204 ixgbevf_reinit_locked(adapter);
3205
3206 return 0;
3207 }
3208
3209 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3210 {
3211 struct net_device *netdev = pci_get_drvdata(pdev);
3212 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3213 #ifdef CONFIG_PM
3214 int retval = 0;
3215 #endif
3216
3217 netif_device_detach(netdev);
3218
3219 if (netif_running(netdev)) {
3220 rtnl_lock();
3221 ixgbevf_down(adapter);
3222 ixgbevf_free_irq(adapter);
3223 ixgbevf_free_all_tx_resources(adapter);
3224 ixgbevf_free_all_rx_resources(adapter);
3225 rtnl_unlock();
3226 }
3227
3228 ixgbevf_clear_interrupt_scheme(adapter);
3229
3230 #ifdef CONFIG_PM
3231 retval = pci_save_state(pdev);
3232 if (retval)
3233 return retval;
3234
3235 #endif
3236 pci_disable_device(pdev);
3237
3238 return 0;
3239 }
3240
3241 #ifdef CONFIG_PM
3242 static int ixgbevf_resume(struct pci_dev *pdev)
3243 {
3244 struct net_device *netdev = pci_get_drvdata(pdev);
3245 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3246 u32 err;
3247
3248 pci_set_power_state(pdev, PCI_D0);
3249 pci_restore_state(pdev);
3250 /*
3251 * pci_restore_state clears dev->state_saved so call
3252 * pci_save_state to restore it.
3253 */
3254 pci_save_state(pdev);
3255
3256 err = pci_enable_device_mem(pdev);
3257 if (err) {
3258 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3259 return err;
3260 }
3261 pci_set_master(pdev);
3262
3263 ixgbevf_reset(adapter);
3264
3265 rtnl_lock();
3266 err = ixgbevf_init_interrupt_scheme(adapter);
3267 rtnl_unlock();
3268 if (err) {
3269 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3270 return err;
3271 }
3272
3273 if (netif_running(netdev)) {
3274 err = ixgbevf_open(netdev);
3275 if (err)
3276 return err;
3277 }
3278
3279 netif_device_attach(netdev);
3280
3281 return err;
3282 }
3283
3284 #endif /* CONFIG_PM */
3285 static void ixgbevf_shutdown(struct pci_dev *pdev)
3286 {
3287 ixgbevf_suspend(pdev, PMSG_SUSPEND);
3288 }
3289
3290 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3291 struct rtnl_link_stats64 *stats)
3292 {
3293 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3294 unsigned int start;
3295 u64 bytes, packets;
3296 const struct ixgbevf_ring *ring;
3297 int i;
3298
3299 ixgbevf_update_stats(adapter);
3300
3301 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3302
3303 for (i = 0; i < adapter->num_rx_queues; i++) {
3304 ring = adapter->rx_ring[i];
3305 do {
3306 start = u64_stats_fetch_begin_bh(&ring->syncp);
3307 bytes = ring->stats.bytes;
3308 packets = ring->stats.packets;
3309 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3310 stats->rx_bytes += bytes;
3311 stats->rx_packets += packets;
3312 }
3313
3314 for (i = 0; i < adapter->num_tx_queues; i++) {
3315 ring = adapter->tx_ring[i];
3316 do {
3317 start = u64_stats_fetch_begin_bh(&ring->syncp);
3318 bytes = ring->stats.bytes;
3319 packets = ring->stats.packets;
3320 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3321 stats->tx_bytes += bytes;
3322 stats->tx_packets += packets;
3323 }
3324
3325 return stats;
3326 }
3327
3328 static const struct net_device_ops ixgbevf_netdev_ops = {
3329 .ndo_open = ixgbevf_open,
3330 .ndo_stop = ixgbevf_close,
3331 .ndo_start_xmit = ixgbevf_xmit_frame,
3332 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3333 .ndo_get_stats64 = ixgbevf_get_stats,
3334 .ndo_validate_addr = eth_validate_addr,
3335 .ndo_set_mac_address = ixgbevf_set_mac,
3336 .ndo_change_mtu = ixgbevf_change_mtu,
3337 .ndo_tx_timeout = ixgbevf_tx_timeout,
3338 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3339 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3340 #ifdef CONFIG_NET_RX_BUSY_POLL
3341 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3342 #endif
3343 };
3344
3345 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3346 {
3347 dev->netdev_ops = &ixgbevf_netdev_ops;
3348 ixgbevf_set_ethtool_ops(dev);
3349 dev->watchdog_timeo = 5 * HZ;
3350 }
3351
3352 /**
3353 * ixgbevf_probe - Device Initialization Routine
3354 * @pdev: PCI device information struct
3355 * @ent: entry in ixgbevf_pci_tbl
3356 *
3357 * Returns 0 on success, negative on failure
3358 *
3359 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3360 * The OS initialization, configuring of the adapter private structure,
3361 * and a hardware reset occur.
3362 **/
3363 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3364 {
3365 struct net_device *netdev;
3366 struct ixgbevf_adapter *adapter = NULL;
3367 struct ixgbe_hw *hw = NULL;
3368 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3369 static int cards_found;
3370 int err, pci_using_dac;
3371
3372 err = pci_enable_device(pdev);
3373 if (err)
3374 return err;
3375
3376 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3377 pci_using_dac = 1;
3378 } else {
3379 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3380 if (err) {
3381 dev_err(&pdev->dev, "No usable DMA "
3382 "configuration, aborting\n");
3383 goto err_dma;
3384 }
3385 pci_using_dac = 0;
3386 }
3387
3388 err = pci_request_regions(pdev, ixgbevf_driver_name);
3389 if (err) {
3390 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3391 goto err_pci_reg;
3392 }
3393
3394 pci_set_master(pdev);
3395
3396 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3397 MAX_TX_QUEUES);
3398 if (!netdev) {
3399 err = -ENOMEM;
3400 goto err_alloc_etherdev;
3401 }
3402
3403 SET_NETDEV_DEV(netdev, &pdev->dev);
3404
3405 pci_set_drvdata(pdev, netdev);
3406 adapter = netdev_priv(netdev);
3407
3408 adapter->netdev = netdev;
3409 adapter->pdev = pdev;
3410 hw = &adapter->hw;
3411 hw->back = adapter;
3412 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3413
3414 /*
3415 * call save state here in standalone driver because it relies on
3416 * adapter struct to exist, and needs to call netdev_priv
3417 */
3418 pci_save_state(pdev);
3419
3420 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3421 pci_resource_len(pdev, 0));
3422 if (!hw->hw_addr) {
3423 err = -EIO;
3424 goto err_ioremap;
3425 }
3426
3427 ixgbevf_assign_netdev_ops(netdev);
3428
3429 adapter->bd_number = cards_found;
3430
3431 /* Setup hw api */
3432 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3433 hw->mac.type = ii->mac;
3434
3435 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3436 sizeof(struct ixgbe_mbx_operations));
3437
3438 /* setup the private structure */
3439 err = ixgbevf_sw_init(adapter);
3440 if (err)
3441 goto err_sw_init;
3442
3443 /* The HW MAC address was set and/or determined in sw_init */
3444 if (!is_valid_ether_addr(netdev->dev_addr)) {
3445 pr_err("invalid MAC address\n");
3446 err = -EIO;
3447 goto err_sw_init;
3448 }
3449
3450 netdev->hw_features = NETIF_F_SG |
3451 NETIF_F_IP_CSUM |
3452 NETIF_F_IPV6_CSUM |
3453 NETIF_F_TSO |
3454 NETIF_F_TSO6 |
3455 NETIF_F_RXCSUM;
3456
3457 netdev->features = netdev->hw_features |
3458 NETIF_F_HW_VLAN_CTAG_TX |
3459 NETIF_F_HW_VLAN_CTAG_RX |
3460 NETIF_F_HW_VLAN_CTAG_FILTER;
3461
3462 netdev->vlan_features |= NETIF_F_TSO;
3463 netdev->vlan_features |= NETIF_F_TSO6;
3464 netdev->vlan_features |= NETIF_F_IP_CSUM;
3465 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3466 netdev->vlan_features |= NETIF_F_SG;
3467
3468 if (pci_using_dac)
3469 netdev->features |= NETIF_F_HIGHDMA;
3470
3471 netdev->priv_flags |= IFF_UNICAST_FLT;
3472
3473 init_timer(&adapter->watchdog_timer);
3474 adapter->watchdog_timer.function = ixgbevf_watchdog;
3475 adapter->watchdog_timer.data = (unsigned long)adapter;
3476
3477 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3478 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3479
3480 err = ixgbevf_init_interrupt_scheme(adapter);
3481 if (err)
3482 goto err_sw_init;
3483
3484 strcpy(netdev->name, "eth%d");
3485
3486 err = register_netdev(netdev);
3487 if (err)
3488 goto err_register;
3489
3490 netif_carrier_off(netdev);
3491
3492 ixgbevf_init_last_counter_stats(adapter);
3493
3494 /* print the MAC address */
3495 hw_dbg(hw, "%pM\n", netdev->dev_addr);
3496
3497 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3498
3499 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3500 cards_found++;
3501 return 0;
3502
3503 err_register:
3504 ixgbevf_clear_interrupt_scheme(adapter);
3505 err_sw_init:
3506 ixgbevf_reset_interrupt_capability(adapter);
3507 iounmap(hw->hw_addr);
3508 err_ioremap:
3509 free_netdev(netdev);
3510 err_alloc_etherdev:
3511 pci_release_regions(pdev);
3512 err_pci_reg:
3513 err_dma:
3514 pci_disable_device(pdev);
3515 return err;
3516 }
3517
3518 /**
3519 * ixgbevf_remove - Device Removal Routine
3520 * @pdev: PCI device information struct
3521 *
3522 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3523 * that it should release a PCI device. The could be caused by a
3524 * Hot-Plug event, or because the driver is going to be removed from
3525 * memory.
3526 **/
3527 static void ixgbevf_remove(struct pci_dev *pdev)
3528 {
3529 struct net_device *netdev = pci_get_drvdata(pdev);
3530 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3531
3532 set_bit(__IXGBEVF_DOWN, &adapter->state);
3533
3534 del_timer_sync(&adapter->watchdog_timer);
3535
3536 cancel_work_sync(&adapter->reset_task);
3537 cancel_work_sync(&adapter->watchdog_task);
3538
3539 if (netdev->reg_state == NETREG_REGISTERED)
3540 unregister_netdev(netdev);
3541
3542 ixgbevf_clear_interrupt_scheme(adapter);
3543 ixgbevf_reset_interrupt_capability(adapter);
3544
3545 iounmap(adapter->hw.hw_addr);
3546 pci_release_regions(pdev);
3547
3548 hw_dbg(&adapter->hw, "Remove complete\n");
3549
3550 free_netdev(netdev);
3551
3552 pci_disable_device(pdev);
3553 }
3554
3555 /**
3556 * ixgbevf_io_error_detected - called when PCI error is detected
3557 * @pdev: Pointer to PCI device
3558 * @state: The current pci connection state
3559 *
3560 * This function is called after a PCI bus error affecting
3561 * this device has been detected.
3562 */
3563 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3564 pci_channel_state_t state)
3565 {
3566 struct net_device *netdev = pci_get_drvdata(pdev);
3567 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3568
3569 netif_device_detach(netdev);
3570
3571 if (state == pci_channel_io_perm_failure)
3572 return PCI_ERS_RESULT_DISCONNECT;
3573
3574 if (netif_running(netdev))
3575 ixgbevf_down(adapter);
3576
3577 pci_disable_device(pdev);
3578
3579 /* Request a slot slot reset. */
3580 return PCI_ERS_RESULT_NEED_RESET;
3581 }
3582
3583 /**
3584 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3585 * @pdev: Pointer to PCI device
3586 *
3587 * Restart the card from scratch, as if from a cold-boot. Implementation
3588 * resembles the first-half of the ixgbevf_resume routine.
3589 */
3590 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3591 {
3592 struct net_device *netdev = pci_get_drvdata(pdev);
3593 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3594
3595 if (pci_enable_device_mem(pdev)) {
3596 dev_err(&pdev->dev,
3597 "Cannot re-enable PCI device after reset.\n");
3598 return PCI_ERS_RESULT_DISCONNECT;
3599 }
3600
3601 pci_set_master(pdev);
3602
3603 ixgbevf_reset(adapter);
3604
3605 return PCI_ERS_RESULT_RECOVERED;
3606 }
3607
3608 /**
3609 * ixgbevf_io_resume - called when traffic can start flowing again.
3610 * @pdev: Pointer to PCI device
3611 *
3612 * This callback is called when the error recovery driver tells us that
3613 * its OK to resume normal operation. Implementation resembles the
3614 * second-half of the ixgbevf_resume routine.
3615 */
3616 static void ixgbevf_io_resume(struct pci_dev *pdev)
3617 {
3618 struct net_device *netdev = pci_get_drvdata(pdev);
3619 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3620
3621 if (netif_running(netdev))
3622 ixgbevf_up(adapter);
3623
3624 netif_device_attach(netdev);
3625 }
3626
3627 /* PCI Error Recovery (ERS) */
3628 static const struct pci_error_handlers ixgbevf_err_handler = {
3629 .error_detected = ixgbevf_io_error_detected,
3630 .slot_reset = ixgbevf_io_slot_reset,
3631 .resume = ixgbevf_io_resume,
3632 };
3633
3634 static struct pci_driver ixgbevf_driver = {
3635 .name = ixgbevf_driver_name,
3636 .id_table = ixgbevf_pci_tbl,
3637 .probe = ixgbevf_probe,
3638 .remove = ixgbevf_remove,
3639 #ifdef CONFIG_PM
3640 /* Power Management Hooks */
3641 .suspend = ixgbevf_suspend,
3642 .resume = ixgbevf_resume,
3643 #endif
3644 .shutdown = ixgbevf_shutdown,
3645 .err_handler = &ixgbevf_err_handler
3646 };
3647
3648 /**
3649 * ixgbevf_init_module - Driver Registration Routine
3650 *
3651 * ixgbevf_init_module is the first routine called when the driver is
3652 * loaded. All it does is register with the PCI subsystem.
3653 **/
3654 static int __init ixgbevf_init_module(void)
3655 {
3656 int ret;
3657 pr_info("%s - version %s\n", ixgbevf_driver_string,
3658 ixgbevf_driver_version);
3659
3660 pr_info("%s\n", ixgbevf_copyright);
3661
3662 ret = pci_register_driver(&ixgbevf_driver);
3663 return ret;
3664 }
3665
3666 module_init(ixgbevf_init_module);
3667
3668 /**
3669 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3670 *
3671 * ixgbevf_exit_module is called just before the driver is removed
3672 * from memory.
3673 **/
3674 static void __exit ixgbevf_exit_module(void)
3675 {
3676 pci_unregister_driver(&ixgbevf_driver);
3677 }
3678
3679 #ifdef DEBUG
3680 /**
3681 * ixgbevf_get_hw_dev_name - return device name string
3682 * used by hardware layer to print debugging information
3683 **/
3684 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3685 {
3686 struct ixgbevf_adapter *adapter = hw->back;
3687 return adapter->netdev->name;
3688 }
3689
3690 #endif
3691 module_exit(ixgbevf_exit_module);
3692
3693 /* ixgbevf_main.c */
This page took 0.111461 seconds and 5 git commands to generate.