ixgbe: move device pointer into the ring structure
[deliverable/linux.git] / drivers / net / ixgbe / ixgbe_main.c
1 /*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/vmalloc.h>
33 #include <linux/string.h>
34 #include <linux/in.h>
35 #include <linux/ip.h>
36 #include <linux/tcp.h>
37 #include <linux/pkt_sched.h>
38 #include <linux/ipv6.h>
39 #include <linux/slab.h>
40 #include <net/checksum.h>
41 #include <net/ip6_checksum.h>
42 #include <linux/ethtool.h>
43 #include <linux/if_vlan.h>
44 #include <scsi/fc/fc_fcoe.h>
45
46 #include "ixgbe.h"
47 #include "ixgbe_common.h"
48 #include "ixgbe_dcb_82599.h"
49 #include "ixgbe_sriov.h"
50
51 char ixgbe_driver_name[] = "ixgbe";
52 static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54
55 #define DRV_VERSION "2.0.84-k2"
56 const char ixgbe_driver_version[] = DRV_VERSION;
57 static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
58
59 static const struct ixgbe_info *ixgbe_info_tbl[] = {
60 [board_82598] = &ixgbe_82598_info,
61 [board_82599] = &ixgbe_82599_info,
62 };
63
64 /* ixgbe_pci_tbl - PCI Device ID Table
65 *
66 * Wildcard entries (PCI_ANY_ID) should come last
67 * Last entry must be all 0s
68 *
69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
70 * Class, Class Mask, private data (not used) }
71 */
72 static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
74 board_82598 },
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
76 board_82598 },
77 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
78 board_82598 },
79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
80 board_82598 },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
82 board_82598 },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
84 board_82598 },
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
86 board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
88 board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
90 board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
92 board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
94 board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
96 board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
98 board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
100 board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
102 board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
104 board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
106 board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
108 board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
110 board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
112 board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
114 board_82599 },
115
116 /* required last entry */
117 {0, }
118 };
119 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
120
121 #ifdef CONFIG_IXGBE_DCA
122 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
123 void *p);
124 static struct notifier_block dca_notifier = {
125 .notifier_call = ixgbe_notify_dca,
126 .next = NULL,
127 .priority = 0
128 };
129 #endif
130
131 #ifdef CONFIG_PCI_IOV
132 static unsigned int max_vfs;
133 module_param(max_vfs, uint, 0);
134 MODULE_PARM_DESC(max_vfs,
135 "Maximum number of virtual functions to allocate per physical function");
136 #endif /* CONFIG_PCI_IOV */
137
138 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
139 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_VERSION);
142
143 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
144
145 static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
146 {
147 struct ixgbe_hw *hw = &adapter->hw;
148 u32 gcr;
149 u32 gpie;
150 u32 vmdctl;
151
152 #ifdef CONFIG_PCI_IOV
153 /* disable iov and allow time for transactions to clear */
154 pci_disable_sriov(adapter->pdev);
155 #endif
156
157 /* turn off device IOV mode */
158 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
159 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
160 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
161 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
162 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
163 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
164
165 /* set default pool back to 0 */
166 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
167 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
168 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
169
170 /* take a breather then clean up driver data */
171 msleep(100);
172
173 kfree(adapter->vfinfo);
174 adapter->vfinfo = NULL;
175
176 adapter->num_vfs = 0;
177 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
178 }
179
180 struct ixgbe_reg_info {
181 u32 ofs;
182 char *name;
183 };
184
185 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
186
187 /* General Registers */
188 {IXGBE_CTRL, "CTRL"},
189 {IXGBE_STATUS, "STATUS"},
190 {IXGBE_CTRL_EXT, "CTRL_EXT"},
191
192 /* Interrupt Registers */
193 {IXGBE_EICR, "EICR"},
194
195 /* RX Registers */
196 {IXGBE_SRRCTL(0), "SRRCTL"},
197 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
198 {IXGBE_RDLEN(0), "RDLEN"},
199 {IXGBE_RDH(0), "RDH"},
200 {IXGBE_RDT(0), "RDT"},
201 {IXGBE_RXDCTL(0), "RXDCTL"},
202 {IXGBE_RDBAL(0), "RDBAL"},
203 {IXGBE_RDBAH(0), "RDBAH"},
204
205 /* TX Registers */
206 {IXGBE_TDBAL(0), "TDBAL"},
207 {IXGBE_TDBAH(0), "TDBAH"},
208 {IXGBE_TDLEN(0), "TDLEN"},
209 {IXGBE_TDH(0), "TDH"},
210 {IXGBE_TDT(0), "TDT"},
211 {IXGBE_TXDCTL(0), "TXDCTL"},
212
213 /* List Terminator */
214 {}
215 };
216
217
218 /*
219 * ixgbe_regdump - register printout routine
220 */
221 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
222 {
223 int i = 0, j = 0;
224 char rname[16];
225 u32 regs[64];
226
227 switch (reginfo->ofs) {
228 case IXGBE_SRRCTL(0):
229 for (i = 0; i < 64; i++)
230 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
231 break;
232 case IXGBE_DCA_RXCTRL(0):
233 for (i = 0; i < 64; i++)
234 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
235 break;
236 case IXGBE_RDLEN(0):
237 for (i = 0; i < 64; i++)
238 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
239 break;
240 case IXGBE_RDH(0):
241 for (i = 0; i < 64; i++)
242 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
243 break;
244 case IXGBE_RDT(0):
245 for (i = 0; i < 64; i++)
246 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
247 break;
248 case IXGBE_RXDCTL(0):
249 for (i = 0; i < 64; i++)
250 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
251 break;
252 case IXGBE_RDBAL(0):
253 for (i = 0; i < 64; i++)
254 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
255 break;
256 case IXGBE_RDBAH(0):
257 for (i = 0; i < 64; i++)
258 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
259 break;
260 case IXGBE_TDBAL(0):
261 for (i = 0; i < 64; i++)
262 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
263 break;
264 case IXGBE_TDBAH(0):
265 for (i = 0; i < 64; i++)
266 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
267 break;
268 case IXGBE_TDLEN(0):
269 for (i = 0; i < 64; i++)
270 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
271 break;
272 case IXGBE_TDH(0):
273 for (i = 0; i < 64; i++)
274 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
275 break;
276 case IXGBE_TDT(0):
277 for (i = 0; i < 64; i++)
278 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
279 break;
280 case IXGBE_TXDCTL(0):
281 for (i = 0; i < 64; i++)
282 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
283 break;
284 default:
285 pr_info("%-15s %08x\n", reginfo->name,
286 IXGBE_READ_REG(hw, reginfo->ofs));
287 return;
288 }
289
290 for (i = 0; i < 8; i++) {
291 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
292 pr_err("%-15s", rname);
293 for (j = 0; j < 8; j++)
294 pr_cont(" %08x", regs[i*8+j]);
295 pr_cont("\n");
296 }
297
298 }
299
300 /*
301 * ixgbe_dump - Print registers, tx-rings and rx-rings
302 */
303 static void ixgbe_dump(struct ixgbe_adapter *adapter)
304 {
305 struct net_device *netdev = adapter->netdev;
306 struct ixgbe_hw *hw = &adapter->hw;
307 struct ixgbe_reg_info *reginfo;
308 int n = 0;
309 struct ixgbe_ring *tx_ring;
310 struct ixgbe_tx_buffer *tx_buffer_info;
311 union ixgbe_adv_tx_desc *tx_desc;
312 struct my_u0 { u64 a; u64 b; } *u0;
313 struct ixgbe_ring *rx_ring;
314 union ixgbe_adv_rx_desc *rx_desc;
315 struct ixgbe_rx_buffer *rx_buffer_info;
316 u32 staterr;
317 int i = 0;
318
319 if (!netif_msg_hw(adapter))
320 return;
321
322 /* Print netdevice Info */
323 if (netdev) {
324 dev_info(&adapter->pdev->dev, "Net device Info\n");
325 pr_info("Device Name state "
326 "trans_start last_rx\n");
327 pr_info("%-15s %016lX %016lX %016lX\n",
328 netdev->name,
329 netdev->state,
330 netdev->trans_start,
331 netdev->last_rx);
332 }
333
334 /* Print Registers */
335 dev_info(&adapter->pdev->dev, "Register Dump\n");
336 pr_info(" Register Name Value\n");
337 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
338 reginfo->name; reginfo++) {
339 ixgbe_regdump(hw, reginfo);
340 }
341
342 /* Print TX Ring Summary */
343 if (!netdev || !netif_running(netdev))
344 goto exit;
345
346 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
347 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
348 for (n = 0; n < adapter->num_tx_queues; n++) {
349 tx_ring = adapter->tx_ring[n];
350 tx_buffer_info =
351 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
352 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
353 n, tx_ring->next_to_use, tx_ring->next_to_clean,
354 (u64)tx_buffer_info->dma,
355 tx_buffer_info->length,
356 tx_buffer_info->next_to_watch,
357 (u64)tx_buffer_info->time_stamp);
358 }
359
360 /* Print TX Rings */
361 if (!netif_msg_tx_done(adapter))
362 goto rx_ring_summary;
363
364 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
365
366 /* Transmit Descriptor Formats
367 *
368 * Advanced Transmit Descriptor
369 * +--------------------------------------------------------------+
370 * 0 | Buffer Address [63:0] |
371 * +--------------------------------------------------------------+
372 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
373 * +--------------------------------------------------------------+
374 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
375 */
376
377 for (n = 0; n < adapter->num_tx_queues; n++) {
378 tx_ring = adapter->tx_ring[n];
379 pr_info("------------------------------------\n");
380 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
381 pr_info("------------------------------------\n");
382 pr_info("T [desc] [address 63:0 ] "
383 "[PlPOIdStDDt Ln] [bi->dma ] "
384 "leng ntw timestamp bi->skb\n");
385
386 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
387 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
388 tx_buffer_info = &tx_ring->tx_buffer_info[i];
389 u0 = (struct my_u0 *)tx_desc;
390 pr_info("T [0x%03X] %016llX %016llX %016llX"
391 " %04X %3X %016llX %p", i,
392 le64_to_cpu(u0->a),
393 le64_to_cpu(u0->b),
394 (u64)tx_buffer_info->dma,
395 tx_buffer_info->length,
396 tx_buffer_info->next_to_watch,
397 (u64)tx_buffer_info->time_stamp,
398 tx_buffer_info->skb);
399 if (i == tx_ring->next_to_use &&
400 i == tx_ring->next_to_clean)
401 pr_cont(" NTC/U\n");
402 else if (i == tx_ring->next_to_use)
403 pr_cont(" NTU\n");
404 else if (i == tx_ring->next_to_clean)
405 pr_cont(" NTC\n");
406 else
407 pr_cont("\n");
408
409 if (netif_msg_pktdata(adapter) &&
410 tx_buffer_info->dma != 0)
411 print_hex_dump(KERN_INFO, "",
412 DUMP_PREFIX_ADDRESS, 16, 1,
413 phys_to_virt(tx_buffer_info->dma),
414 tx_buffer_info->length, true);
415 }
416 }
417
418 /* Print RX Rings Summary */
419 rx_ring_summary:
420 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
421 pr_info("Queue [NTU] [NTC]\n");
422 for (n = 0; n < adapter->num_rx_queues; n++) {
423 rx_ring = adapter->rx_ring[n];
424 pr_info("%5d %5X %5X\n",
425 n, rx_ring->next_to_use, rx_ring->next_to_clean);
426 }
427
428 /* Print RX Rings */
429 if (!netif_msg_rx_status(adapter))
430 goto exit;
431
432 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
433
434 /* Advanced Receive Descriptor (Read) Format
435 * 63 1 0
436 * +-----------------------------------------------------+
437 * 0 | Packet Buffer Address [63:1] |A0/NSE|
438 * +----------------------------------------------+------+
439 * 8 | Header Buffer Address [63:1] | DD |
440 * +-----------------------------------------------------+
441 *
442 *
443 * Advanced Receive Descriptor (Write-Back) Format
444 *
445 * 63 48 47 32 31 30 21 20 16 15 4 3 0
446 * +------------------------------------------------------+
447 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
448 * | Checksum Ident | | | | Type | Type |
449 * +------------------------------------------------------+
450 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
451 * +------------------------------------------------------+
452 * 63 48 47 32 31 20 19 0
453 */
454 for (n = 0; n < adapter->num_rx_queues; n++) {
455 rx_ring = adapter->rx_ring[n];
456 pr_info("------------------------------------\n");
457 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
458 pr_info("------------------------------------\n");
459 pr_info("R [desc] [ PktBuf A0] "
460 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
461 "<-- Adv Rx Read format\n");
462 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
463 "[vl er S cks ln] ---------------- [bi->skb] "
464 "<-- Adv Rx Write-Back format\n");
465
466 for (i = 0; i < rx_ring->count; i++) {
467 rx_buffer_info = &rx_ring->rx_buffer_info[i];
468 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
469 u0 = (struct my_u0 *)rx_desc;
470 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
471 if (staterr & IXGBE_RXD_STAT_DD) {
472 /* Descriptor Done */
473 pr_info("RWB[0x%03X] %016llX "
474 "%016llX ---------------- %p", i,
475 le64_to_cpu(u0->a),
476 le64_to_cpu(u0->b),
477 rx_buffer_info->skb);
478 } else {
479 pr_info("R [0x%03X] %016llX "
480 "%016llX %016llX %p", i,
481 le64_to_cpu(u0->a),
482 le64_to_cpu(u0->b),
483 (u64)rx_buffer_info->dma,
484 rx_buffer_info->skb);
485
486 if (netif_msg_pktdata(adapter)) {
487 print_hex_dump(KERN_INFO, "",
488 DUMP_PREFIX_ADDRESS, 16, 1,
489 phys_to_virt(rx_buffer_info->dma),
490 rx_ring->rx_buf_len, true);
491
492 if (rx_ring->rx_buf_len
493 < IXGBE_RXBUFFER_2048)
494 print_hex_dump(KERN_INFO, "",
495 DUMP_PREFIX_ADDRESS, 16, 1,
496 phys_to_virt(
497 rx_buffer_info->page_dma +
498 rx_buffer_info->page_offset
499 ),
500 PAGE_SIZE/2, true);
501 }
502 }
503
504 if (i == rx_ring->next_to_use)
505 pr_cont(" NTU\n");
506 else if (i == rx_ring->next_to_clean)
507 pr_cont(" NTC\n");
508 else
509 pr_cont("\n");
510
511 }
512 }
513
514 exit:
515 return;
516 }
517
518 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
519 {
520 u32 ctrl_ext;
521
522 /* Let firmware take over control of h/w */
523 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
525 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
526 }
527
528 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
529 {
530 u32 ctrl_ext;
531
532 /* Let firmware know the driver has taken over */
533 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
535 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
536 }
537
538 /*
539 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
540 * @adapter: pointer to adapter struct
541 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
542 * @queue: queue to map the corresponding interrupt to
543 * @msix_vector: the vector to map to the corresponding queue
544 *
545 */
546 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
547 u8 queue, u8 msix_vector)
548 {
549 u32 ivar, index;
550 struct ixgbe_hw *hw = &adapter->hw;
551 switch (hw->mac.type) {
552 case ixgbe_mac_82598EB:
553 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
554 if (direction == -1)
555 direction = 0;
556 index = (((direction * 64) + queue) >> 2) & 0x1F;
557 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
558 ivar &= ~(0xFF << (8 * (queue & 0x3)));
559 ivar |= (msix_vector << (8 * (queue & 0x3)));
560 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
561 break;
562 case ixgbe_mac_82599EB:
563 if (direction == -1) {
564 /* other causes */
565 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
566 index = ((queue & 1) * 8);
567 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
568 ivar &= ~(0xFF << index);
569 ivar |= (msix_vector << index);
570 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
571 break;
572 } else {
573 /* tx or rx causes */
574 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
575 index = ((16 * (queue & 1)) + (8 * direction));
576 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
577 ivar &= ~(0xFF << index);
578 ivar |= (msix_vector << index);
579 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
580 break;
581 }
582 default:
583 break;
584 }
585 }
586
587 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
588 u64 qmask)
589 {
590 u32 mask;
591
592 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
593 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
594 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
595 } else {
596 mask = (qmask & 0xFFFFFFFF);
597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
598 mask = (qmask >> 32);
599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
600 }
601 }
602
603 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
604 struct ixgbe_tx_buffer *tx_buffer_info)
605 {
606 if (tx_buffer_info->dma) {
607 if (tx_buffer_info->mapped_as_page)
608 dma_unmap_page(tx_ring->dev,
609 tx_buffer_info->dma,
610 tx_buffer_info->length,
611 DMA_TO_DEVICE);
612 else
613 dma_unmap_single(tx_ring->dev,
614 tx_buffer_info->dma,
615 tx_buffer_info->length,
616 DMA_TO_DEVICE);
617 tx_buffer_info->dma = 0;
618 }
619 if (tx_buffer_info->skb) {
620 dev_kfree_skb_any(tx_buffer_info->skb);
621 tx_buffer_info->skb = NULL;
622 }
623 tx_buffer_info->time_stamp = 0;
624 /* tx_buffer_info must be completely set up in the transmit path */
625 }
626
627 /**
628 * ixgbe_tx_xon_state - check the tx ring xon state
629 * @adapter: the ixgbe adapter
630 * @tx_ring: the corresponding tx_ring
631 *
632 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
633 * corresponding TC of this tx_ring when checking TFCS.
634 *
635 * Returns : true if in xon state (currently not paused)
636 */
637 static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
638 struct ixgbe_ring *tx_ring)
639 {
640 u32 txoff = IXGBE_TFCS_TXOFF;
641
642 #ifdef CONFIG_IXGBE_DCB
643 if (adapter->dcb_cfg.pfc_mode_enable) {
644 int tc;
645 int reg_idx = tx_ring->reg_idx;
646 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
647
648 switch (adapter->hw.mac.type) {
649 case ixgbe_mac_82598EB:
650 tc = reg_idx >> 2;
651 txoff = IXGBE_TFCS_TXOFF0;
652 break;
653 case ixgbe_mac_82599EB:
654 tc = 0;
655 txoff = IXGBE_TFCS_TXOFF;
656 if (dcb_i == 8) {
657 /* TC0, TC1 */
658 tc = reg_idx >> 5;
659 if (tc == 2) /* TC2, TC3 */
660 tc += (reg_idx - 64) >> 4;
661 else if (tc == 3) /* TC4, TC5, TC6, TC7 */
662 tc += 1 + ((reg_idx - 96) >> 3);
663 } else if (dcb_i == 4) {
664 /* TC0, TC1 */
665 tc = reg_idx >> 6;
666 if (tc == 1) {
667 tc += (reg_idx - 64) >> 5;
668 if (tc == 2) /* TC2, TC3 */
669 tc += (reg_idx - 96) >> 4;
670 }
671 }
672 break;
673 default:
674 tc = 0;
675 }
676 txoff <<= tc;
677 }
678 #endif
679 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
680 }
681
682 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
683 struct ixgbe_ring *tx_ring,
684 unsigned int eop)
685 {
686 struct ixgbe_hw *hw = &adapter->hw;
687
688 /* Detect a transmit hang in hardware, this serializes the
689 * check with the clearing of time_stamp and movement of eop */
690 adapter->detect_tx_hung = false;
691 if (tx_ring->tx_buffer_info[eop].time_stamp &&
692 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
693 ixgbe_tx_xon_state(adapter, tx_ring)) {
694 /* detected Tx unit hang */
695 union ixgbe_adv_tx_desc *tx_desc;
696 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
697 e_err(drv, "Detected Tx Unit Hang\n"
698 " Tx Queue <%d>\n"
699 " TDH, TDT <%x>, <%x>\n"
700 " next_to_use <%x>\n"
701 " next_to_clean <%x>\n"
702 "tx_buffer_info[next_to_clean]\n"
703 " time_stamp <%lx>\n"
704 " jiffies <%lx>\n",
705 tx_ring->queue_index,
706 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
707 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
708 tx_ring->next_to_use, eop,
709 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
710 return true;
711 }
712
713 return false;
714 }
715
716 #define IXGBE_MAX_TXD_PWR 14
717 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
718
719 /* Tx Descriptors needed, worst case */
720 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
721 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
722 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
723 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
724
725 static void ixgbe_tx_timeout(struct net_device *netdev);
726
727 /**
728 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
729 * @q_vector: structure containing interrupt and ring information
730 * @tx_ring: tx ring to clean
731 **/
732 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
733 struct ixgbe_ring *tx_ring)
734 {
735 struct ixgbe_adapter *adapter = q_vector->adapter;
736 struct net_device *netdev = adapter->netdev;
737 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
738 struct ixgbe_tx_buffer *tx_buffer_info;
739 unsigned int i, eop, count = 0;
740 unsigned int total_bytes = 0, total_packets = 0;
741
742 i = tx_ring->next_to_clean;
743 eop = tx_ring->tx_buffer_info[i].next_to_watch;
744 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
745
746 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
747 (count < tx_ring->work_limit)) {
748 bool cleaned = false;
749 rmb(); /* read buffer_info after eop_desc */
750 for ( ; !cleaned; count++) {
751 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
752 tx_buffer_info = &tx_ring->tx_buffer_info[i];
753
754 tx_desc->wb.status = 0;
755 cleaned = (i == eop);
756
757 i++;
758 if (i == tx_ring->count)
759 i = 0;
760
761 if (cleaned && tx_buffer_info->skb) {
762 total_bytes += tx_buffer_info->bytecount;
763 total_packets += tx_buffer_info->gso_segs;
764 }
765
766 ixgbe_unmap_and_free_tx_resource(tx_ring,
767 tx_buffer_info);
768 }
769
770 eop = tx_ring->tx_buffer_info[i].next_to_watch;
771 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
772 }
773
774 tx_ring->next_to_clean = i;
775
776 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
777 if (unlikely(count && netif_carrier_ok(netdev) &&
778 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
779 /* Make sure that anybody stopping the queue after this
780 * sees the new next_to_clean.
781 */
782 smp_mb();
783 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
784 !test_bit(__IXGBE_DOWN, &adapter->state)) {
785 netif_wake_subqueue(netdev, tx_ring->queue_index);
786 ++tx_ring->restart_queue;
787 }
788 }
789
790 if (adapter->detect_tx_hung) {
791 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
792 /* schedule immediate reset if we believe we hung */
793 e_info(probe, "tx hang %d detected, resetting "
794 "adapter\n", adapter->tx_timeout_count + 1);
795 ixgbe_tx_timeout(adapter->netdev);
796 }
797 }
798
799 /* re-arm the interrupt */
800 if (count >= tx_ring->work_limit)
801 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
802
803 tx_ring->total_bytes += total_bytes;
804 tx_ring->total_packets += total_packets;
805 u64_stats_update_begin(&tx_ring->syncp);
806 tx_ring->stats.packets += total_packets;
807 tx_ring->stats.bytes += total_bytes;
808 u64_stats_update_end(&tx_ring->syncp);
809 return count < tx_ring->work_limit;
810 }
811
812 #ifdef CONFIG_IXGBE_DCA
813 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
814 struct ixgbe_ring *rx_ring)
815 {
816 u32 rxctrl;
817 int cpu = get_cpu();
818 int q = rx_ring->reg_idx;
819
820 if (rx_ring->cpu != cpu) {
821 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
822 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
823 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
824 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
825 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
826 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
827 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
828 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
829 }
830 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
831 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
832 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
833 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
834 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
835 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
836 rx_ring->cpu = cpu;
837 }
838 put_cpu();
839 }
840
841 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
842 struct ixgbe_ring *tx_ring)
843 {
844 u32 txctrl;
845 int cpu = get_cpu();
846 int q = tx_ring->reg_idx;
847 struct ixgbe_hw *hw = &adapter->hw;
848
849 if (tx_ring->cpu != cpu) {
850 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
851 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
852 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
853 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
854 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
855 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
856 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
857 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
858 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
859 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
860 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
861 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
862 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
863 }
864 tx_ring->cpu = cpu;
865 }
866 put_cpu();
867 }
868
869 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
870 {
871 int i;
872
873 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
874 return;
875
876 /* always use CB2 mode, difference is masked in the CB driver */
877 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
878
879 for (i = 0; i < adapter->num_tx_queues; i++) {
880 adapter->tx_ring[i]->cpu = -1;
881 ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
882 }
883 for (i = 0; i < adapter->num_rx_queues; i++) {
884 adapter->rx_ring[i]->cpu = -1;
885 ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
886 }
887 }
888
889 static int __ixgbe_notify_dca(struct device *dev, void *data)
890 {
891 struct net_device *netdev = dev_get_drvdata(dev);
892 struct ixgbe_adapter *adapter = netdev_priv(netdev);
893 unsigned long event = *(unsigned long *)data;
894
895 switch (event) {
896 case DCA_PROVIDER_ADD:
897 /* if we're already enabled, don't do it again */
898 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
899 break;
900 if (dca_add_requester(dev) == 0) {
901 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
902 ixgbe_setup_dca(adapter);
903 break;
904 }
905 /* Fall Through since DCA is disabled. */
906 case DCA_PROVIDER_REMOVE:
907 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
908 dca_remove_requester(dev);
909 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
910 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
911 }
912 break;
913 }
914
915 return 0;
916 }
917
918 #endif /* CONFIG_IXGBE_DCA */
919 /**
920 * ixgbe_receive_skb - Send a completed packet up the stack
921 * @adapter: board private structure
922 * @skb: packet to send up
923 * @status: hardware indication of status of receive
924 * @rx_ring: rx descriptor ring (for a specific queue) to setup
925 * @rx_desc: rx descriptor
926 **/
927 static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
928 struct sk_buff *skb, u8 status,
929 struct ixgbe_ring *ring,
930 union ixgbe_adv_rx_desc *rx_desc)
931 {
932 struct ixgbe_adapter *adapter = q_vector->adapter;
933 struct napi_struct *napi = &q_vector->napi;
934 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
935 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
936
937 if (is_vlan && (tag & VLAN_VID_MASK))
938 __vlan_hwaccel_put_tag(skb, tag);
939
940 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
941 napi_gro_receive(napi, skb);
942 else
943 netif_rx(skb);
944 }
945
946 /**
947 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
948 * @adapter: address of board private structure
949 * @status_err: hardware indication of status of receive
950 * @skb: skb currently being received and modified
951 **/
952 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
953 union ixgbe_adv_rx_desc *rx_desc,
954 struct sk_buff *skb)
955 {
956 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
957
958 skb_checksum_none_assert(skb);
959
960 /* Rx csum disabled */
961 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
962 return;
963
964 /* if IP and error */
965 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
966 (status_err & IXGBE_RXDADV_ERR_IPE)) {
967 adapter->hw_csum_rx_error++;
968 return;
969 }
970
971 if (!(status_err & IXGBE_RXD_STAT_L4CS))
972 return;
973
974 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
975 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
976
977 /*
978 * 82599 errata, UDP frames with a 0 checksum can be marked as
979 * checksum errors.
980 */
981 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
982 (adapter->hw.mac.type == ixgbe_mac_82599EB))
983 return;
984
985 adapter->hw_csum_rx_error++;
986 return;
987 }
988
989 /* It must be a TCP or UDP packet with a valid checksum */
990 skb->ip_summed = CHECKSUM_UNNECESSARY;
991 }
992
993 static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
994 {
995 /*
996 * Force memory writes to complete before letting h/w
997 * know there are new descriptors to fetch. (Only
998 * applicable for weak-ordered memory model archs,
999 * such as IA-64).
1000 */
1001 wmb();
1002 writel(val, rx_ring->tail);
1003 }
1004
1005 /**
1006 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1007 * @adapter: address of board private structure
1008 **/
1009 void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1010 struct ixgbe_ring *rx_ring,
1011 u16 cleaned_count)
1012 {
1013 union ixgbe_adv_rx_desc *rx_desc;
1014 struct ixgbe_rx_buffer *bi;
1015 struct sk_buff *skb;
1016 u16 i = rx_ring->next_to_use;
1017
1018 while (cleaned_count--) {
1019 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1020 bi = &rx_ring->rx_buffer_info[i];
1021 skb = bi->skb;
1022
1023 if (!skb) {
1024 skb = netdev_alloc_skb_ip_align(adapter->netdev,
1025 rx_ring->rx_buf_len);
1026 if (!skb) {
1027 adapter->alloc_rx_buff_failed++;
1028 goto no_buffers;
1029 }
1030 /* initialize queue mapping */
1031 skb_record_rx_queue(skb, rx_ring->queue_index);
1032 bi->skb = skb;
1033 }
1034
1035 if (!bi->dma) {
1036 bi->dma = dma_map_single(rx_ring->dev,
1037 skb->data,
1038 rx_ring->rx_buf_len,
1039 DMA_FROM_DEVICE);
1040 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1041 adapter->alloc_rx_buff_failed++;
1042 bi->dma = 0;
1043 goto no_buffers;
1044 }
1045 }
1046
1047 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1048 if (!bi->page) {
1049 bi->page = netdev_alloc_page(adapter->netdev);
1050 if (!bi->page) {
1051 adapter->alloc_rx_page_failed++;
1052 goto no_buffers;
1053 }
1054 }
1055
1056 if (!bi->page_dma) {
1057 /* use a half page if we're re-using */
1058 bi->page_offset ^= PAGE_SIZE / 2;
1059 bi->page_dma = dma_map_page(rx_ring->dev,
1060 bi->page,
1061 bi->page_offset,
1062 PAGE_SIZE / 2,
1063 DMA_FROM_DEVICE);
1064 if (dma_mapping_error(rx_ring->dev,
1065 bi->page_dma)) {
1066 adapter->alloc_rx_page_failed++;
1067 bi->page_dma = 0;
1068 goto no_buffers;
1069 }
1070 }
1071
1072 /* Refresh the desc even if buffer_addrs didn't change
1073 * because each write-back erases this info. */
1074 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1075 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1076 } else {
1077 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1078 rx_desc->read.hdr_addr = 0;
1079 }
1080
1081 i++;
1082 if (i == rx_ring->count)
1083 i = 0;
1084 }
1085
1086 no_buffers:
1087 if (rx_ring->next_to_use != i) {
1088 rx_ring->next_to_use = i;
1089 ixgbe_release_rx_desc(rx_ring, i);
1090 }
1091 }
1092
1093 static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
1094 {
1095 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
1096 }
1097
1098 static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
1099 {
1100 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1101 }
1102
1103 static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
1104 {
1105 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1106 IXGBE_RXDADV_RSCCNT_MASK) >>
1107 IXGBE_RXDADV_RSCCNT_SHIFT;
1108 }
1109
1110 /**
1111 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1112 * @skb: pointer to the last skb in the rsc queue
1113 * @count: pointer to number of packets coalesced in this context
1114 *
1115 * This function changes a queue full of hw rsc buffers into a completed
1116 * packet. It uses the ->prev pointers to find the first packet and then
1117 * turns it into the frag list owner.
1118 **/
1119 static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
1120 u64 *count)
1121 {
1122 unsigned int frag_list_size = 0;
1123
1124 while (skb->prev) {
1125 struct sk_buff *prev = skb->prev;
1126 frag_list_size += skb->len;
1127 skb->prev = NULL;
1128 skb = prev;
1129 *count += 1;
1130 }
1131
1132 skb_shinfo(skb)->frag_list = skb->next;
1133 skb->next = NULL;
1134 skb->len += frag_list_size;
1135 skb->data_len += frag_list_size;
1136 skb->truesize += frag_list_size;
1137 return skb;
1138 }
1139
1140 struct ixgbe_rsc_cb {
1141 dma_addr_t dma;
1142 bool delay_unmap;
1143 };
1144
1145 #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1146
1147 static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1148 struct ixgbe_ring *rx_ring,
1149 int *work_done, int work_to_do)
1150 {
1151 struct ixgbe_adapter *adapter = q_vector->adapter;
1152 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1153 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1154 struct sk_buff *skb;
1155 unsigned int i, rsc_count = 0;
1156 u32 len, staterr;
1157 u16 hdr_info;
1158 bool cleaned = false;
1159 int cleaned_count = 0;
1160 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1161 #ifdef IXGBE_FCOE
1162 int ddp_bytes = 0;
1163 #endif /* IXGBE_FCOE */
1164
1165 i = rx_ring->next_to_clean;
1166 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1167 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1168 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1169
1170 while (staterr & IXGBE_RXD_STAT_DD) {
1171 u32 upper_len = 0;
1172 if (*work_done >= work_to_do)
1173 break;
1174 (*work_done)++;
1175
1176 rmb(); /* read descriptor and rx_buffer_info after status DD */
1177 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1178 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1179 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1180 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1181 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1182 if ((len > IXGBE_RX_HDR_SIZE) ||
1183 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1184 len = IXGBE_RX_HDR_SIZE;
1185 } else {
1186 len = le16_to_cpu(rx_desc->wb.upper.length);
1187 }
1188
1189 cleaned = true;
1190 skb = rx_buffer_info->skb;
1191 prefetch(skb->data);
1192 rx_buffer_info->skb = NULL;
1193
1194 if (rx_buffer_info->dma) {
1195 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
1196 (!(staterr & IXGBE_RXD_STAT_EOP)) &&
1197 (!(skb->prev))) {
1198 /*
1199 * When HWRSC is enabled, delay unmapping
1200 * of the first packet. It carries the
1201 * header information, HW may still
1202 * access the header after the writeback.
1203 * Only unmap it when EOP is reached
1204 */
1205 IXGBE_RSC_CB(skb)->delay_unmap = true;
1206 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1207 } else {
1208 dma_unmap_single(rx_ring->dev,
1209 rx_buffer_info->dma,
1210 rx_ring->rx_buf_len,
1211 DMA_FROM_DEVICE);
1212 }
1213 rx_buffer_info->dma = 0;
1214 skb_put(skb, len);
1215 }
1216
1217 if (upper_len) {
1218 dma_unmap_page(rx_ring->dev,
1219 rx_buffer_info->page_dma,
1220 PAGE_SIZE / 2,
1221 DMA_FROM_DEVICE);
1222 rx_buffer_info->page_dma = 0;
1223 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1224 rx_buffer_info->page,
1225 rx_buffer_info->page_offset,
1226 upper_len);
1227
1228 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
1229 (page_count(rx_buffer_info->page) != 1))
1230 rx_buffer_info->page = NULL;
1231 else
1232 get_page(rx_buffer_info->page);
1233
1234 skb->len += upper_len;
1235 skb->data_len += upper_len;
1236 skb->truesize += upper_len;
1237 }
1238
1239 i++;
1240 if (i == rx_ring->count)
1241 i = 0;
1242
1243 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
1244 prefetch(next_rxd);
1245 cleaned_count++;
1246
1247 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
1248 rsc_count = ixgbe_get_rsc_count(rx_desc);
1249
1250 if (rsc_count) {
1251 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1252 IXGBE_RXDADV_NEXTP_SHIFT;
1253 next_buffer = &rx_ring->rx_buffer_info[nextp];
1254 } else {
1255 next_buffer = &rx_ring->rx_buffer_info[i];
1256 }
1257
1258 if (staterr & IXGBE_RXD_STAT_EOP) {
1259 if (skb->prev)
1260 skb = ixgbe_transform_rsc_queue(skb,
1261 &(rx_ring->rsc_count));
1262 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1263 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1264 dma_unmap_single(rx_ring->dev,
1265 IXGBE_RSC_CB(skb)->dma,
1266 rx_ring->rx_buf_len,
1267 DMA_FROM_DEVICE);
1268 IXGBE_RSC_CB(skb)->dma = 0;
1269 IXGBE_RSC_CB(skb)->delay_unmap = false;
1270 }
1271 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
1272 rx_ring->rsc_count +=
1273 skb_shinfo(skb)->nr_frags;
1274 else
1275 rx_ring->rsc_count++;
1276 rx_ring->rsc_flush++;
1277 }
1278 u64_stats_update_begin(&rx_ring->syncp);
1279 rx_ring->stats.packets++;
1280 rx_ring->stats.bytes += skb->len;
1281 u64_stats_update_end(&rx_ring->syncp);
1282 } else {
1283 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1284 rx_buffer_info->skb = next_buffer->skb;
1285 rx_buffer_info->dma = next_buffer->dma;
1286 next_buffer->skb = skb;
1287 next_buffer->dma = 0;
1288 } else {
1289 skb->next = next_buffer->skb;
1290 skb->next->prev = skb;
1291 }
1292 rx_ring->non_eop_descs++;
1293 goto next_desc;
1294 }
1295
1296 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
1297 dev_kfree_skb_irq(skb);
1298 goto next_desc;
1299 }
1300
1301 ixgbe_rx_checksum(adapter, rx_desc, skb);
1302
1303 /* probably a little skewed due to removing CRC */
1304 total_rx_bytes += skb->len;
1305 total_rx_packets++;
1306
1307 skb->protocol = eth_type_trans(skb, adapter->netdev);
1308 #ifdef IXGBE_FCOE
1309 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1310 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
1311 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1312 if (!ddp_bytes)
1313 goto next_desc;
1314 }
1315 #endif /* IXGBE_FCOE */
1316 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
1317
1318 next_desc:
1319 rx_desc->wb.upper.status_error = 0;
1320
1321 /* return some buffers to hardware, one at a time is too slow */
1322 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1323 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1324 cleaned_count = 0;
1325 }
1326
1327 /* use prefetched values */
1328 rx_desc = next_rxd;
1329 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1330
1331 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1332 }
1333
1334 rx_ring->next_to_clean = i;
1335 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1336
1337 if (cleaned_count)
1338 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1339
1340 #ifdef IXGBE_FCOE
1341 /* include DDPed FCoE data */
1342 if (ddp_bytes > 0) {
1343 unsigned int mss;
1344
1345 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
1346 sizeof(struct fc_frame_header) -
1347 sizeof(struct fcoe_crc_eof);
1348 if (mss > 512)
1349 mss &= ~511;
1350 total_rx_bytes += ddp_bytes;
1351 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1352 }
1353 #endif /* IXGBE_FCOE */
1354
1355 rx_ring->total_packets += total_rx_packets;
1356 rx_ring->total_bytes += total_rx_bytes;
1357
1358 return cleaned;
1359 }
1360
1361 static int ixgbe_clean_rxonly(struct napi_struct *, int);
1362 /**
1363 * ixgbe_configure_msix - Configure MSI-X hardware
1364 * @adapter: board private structure
1365 *
1366 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1367 * interrupts.
1368 **/
1369 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1370 {
1371 struct ixgbe_q_vector *q_vector;
1372 int i, j, q_vectors, v_idx, r_idx;
1373 u32 mask;
1374
1375 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1376
1377 /*
1378 * Populate the IVAR table and set the ITR values to the
1379 * corresponding register.
1380 */
1381 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1382 q_vector = adapter->q_vector[v_idx];
1383 /* XXX for_each_set_bit(...) */
1384 r_idx = find_first_bit(q_vector->rxr_idx,
1385 adapter->num_rx_queues);
1386
1387 for (i = 0; i < q_vector->rxr_count; i++) {
1388 j = adapter->rx_ring[r_idx]->reg_idx;
1389 ixgbe_set_ivar(adapter, 0, j, v_idx);
1390 r_idx = find_next_bit(q_vector->rxr_idx,
1391 adapter->num_rx_queues,
1392 r_idx + 1);
1393 }
1394 r_idx = find_first_bit(q_vector->txr_idx,
1395 adapter->num_tx_queues);
1396
1397 for (i = 0; i < q_vector->txr_count; i++) {
1398 j = adapter->tx_ring[r_idx]->reg_idx;
1399 ixgbe_set_ivar(adapter, 1, j, v_idx);
1400 r_idx = find_next_bit(q_vector->txr_idx,
1401 adapter->num_tx_queues,
1402 r_idx + 1);
1403 }
1404
1405 if (q_vector->txr_count && !q_vector->rxr_count)
1406 /* tx only */
1407 q_vector->eitr = adapter->tx_eitr_param;
1408 else if (q_vector->rxr_count)
1409 /* rx or mixed */
1410 q_vector->eitr = adapter->rx_eitr_param;
1411
1412 ixgbe_write_eitr(q_vector);
1413 /* If Flow Director is enabled, set interrupt affinity */
1414 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
1415 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
1416 /*
1417 * Allocate the affinity_hint cpumask, assign the mask
1418 * for this vector, and set our affinity_hint for
1419 * this irq.
1420 */
1421 if (!alloc_cpumask_var(&q_vector->affinity_mask,
1422 GFP_KERNEL))
1423 return;
1424 cpumask_set_cpu(v_idx, q_vector->affinity_mask);
1425 irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
1426 q_vector->affinity_mask);
1427 }
1428 }
1429
1430 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1431 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1432 v_idx);
1433 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1434 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1435 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1436
1437 /* set up to autoclear timer, and the vectors */
1438 mask = IXGBE_EIMS_ENABLE_MASK;
1439 if (adapter->num_vfs)
1440 mask &= ~(IXGBE_EIMS_OTHER |
1441 IXGBE_EIMS_MAILBOX |
1442 IXGBE_EIMS_LSC);
1443 else
1444 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1445 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
1446 }
1447
1448 enum latency_range {
1449 lowest_latency = 0,
1450 low_latency = 1,
1451 bulk_latency = 2,
1452 latency_invalid = 255
1453 };
1454
1455 /**
1456 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1457 * @adapter: pointer to adapter
1458 * @eitr: eitr setting (ints per sec) to give last timeslice
1459 * @itr_setting: current throttle rate in ints/second
1460 * @packets: the number of packets during this measurement interval
1461 * @bytes: the number of bytes during this measurement interval
1462 *
1463 * Stores a new ITR value based on packets and byte
1464 * counts during the last interrupt. The advantage of per interrupt
1465 * computation is faster updates and more accurate ITR for the current
1466 * traffic pattern. Constants in this function were computed
1467 * based on theoretical maximum wire speed and thresholds were set based
1468 * on testing data as well as attempting to minimize response time
1469 * while increasing bulk throughput.
1470 * this functionality is controlled by the InterruptThrottleRate module
1471 * parameter (see ixgbe_param.c)
1472 **/
1473 static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
1474 u32 eitr, u8 itr_setting,
1475 int packets, int bytes)
1476 {
1477 unsigned int retval = itr_setting;
1478 u32 timepassed_us;
1479 u64 bytes_perint;
1480
1481 if (packets == 0)
1482 goto update_itr_done;
1483
1484
1485 /* simple throttlerate management
1486 * 0-20MB/s lowest (100000 ints/s)
1487 * 20-100MB/s low (20000 ints/s)
1488 * 100-1249MB/s bulk (8000 ints/s)
1489 */
1490 /* what was last interrupt timeslice? */
1491 timepassed_us = 1000000/eitr;
1492 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1493
1494 switch (itr_setting) {
1495 case lowest_latency:
1496 if (bytes_perint > adapter->eitr_low)
1497 retval = low_latency;
1498 break;
1499 case low_latency:
1500 if (bytes_perint > adapter->eitr_high)
1501 retval = bulk_latency;
1502 else if (bytes_perint <= adapter->eitr_low)
1503 retval = lowest_latency;
1504 break;
1505 case bulk_latency:
1506 if (bytes_perint <= adapter->eitr_high)
1507 retval = low_latency;
1508 break;
1509 }
1510
1511 update_itr_done:
1512 return retval;
1513 }
1514
1515 /**
1516 * ixgbe_write_eitr - write EITR register in hardware specific way
1517 * @q_vector: structure containing interrupt and ring information
1518 *
1519 * This function is made to be called by ethtool and by the driver
1520 * when it needs to update EITR registers at runtime. Hardware
1521 * specific quirks/differences are taken care of here.
1522 */
1523 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1524 {
1525 struct ixgbe_adapter *adapter = q_vector->adapter;
1526 struct ixgbe_hw *hw = &adapter->hw;
1527 int v_idx = q_vector->v_idx;
1528 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1529
1530 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1531 /* must write high and low 16 bits to reset counter */
1532 itr_reg |= (itr_reg << 16);
1533 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1534 /*
1535 * 82599 can support a value of zero, so allow it for
1536 * max interrupt rate, but there is an errata where it can
1537 * not be zero with RSC
1538 */
1539 if (itr_reg == 8 &&
1540 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1541 itr_reg = 0;
1542
1543 /*
1544 * set the WDIS bit to not clear the timer bits and cause an
1545 * immediate assertion of the interrupt
1546 */
1547 itr_reg |= IXGBE_EITR_CNT_WDIS;
1548 }
1549 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1550 }
1551
1552 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1553 {
1554 struct ixgbe_adapter *adapter = q_vector->adapter;
1555 u32 new_itr;
1556 u8 current_itr, ret_itr;
1557 int i, r_idx;
1558 struct ixgbe_ring *rx_ring, *tx_ring;
1559
1560 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1561 for (i = 0; i < q_vector->txr_count; i++) {
1562 tx_ring = adapter->tx_ring[r_idx];
1563 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1564 q_vector->tx_itr,
1565 tx_ring->total_packets,
1566 tx_ring->total_bytes);
1567 /* if the result for this queue would decrease interrupt
1568 * rate for this vector then use that result */
1569 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
1570 q_vector->tx_itr - 1 : ret_itr);
1571 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1572 r_idx + 1);
1573 }
1574
1575 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1576 for (i = 0; i < q_vector->rxr_count; i++) {
1577 rx_ring = adapter->rx_ring[r_idx];
1578 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1579 q_vector->rx_itr,
1580 rx_ring->total_packets,
1581 rx_ring->total_bytes);
1582 /* if the result for this queue would decrease interrupt
1583 * rate for this vector then use that result */
1584 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
1585 q_vector->rx_itr - 1 : ret_itr);
1586 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1587 r_idx + 1);
1588 }
1589
1590 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
1591
1592 switch (current_itr) {
1593 /* counts and packets in update_itr are dependent on these numbers */
1594 case lowest_latency:
1595 new_itr = 100000;
1596 break;
1597 case low_latency:
1598 new_itr = 20000; /* aka hwitr = ~200 */
1599 break;
1600 case bulk_latency:
1601 default:
1602 new_itr = 8000;
1603 break;
1604 }
1605
1606 if (new_itr != q_vector->eitr) {
1607 /* do an exponential smoothing */
1608 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1609
1610 /* save the algorithm value here, not the smoothed one */
1611 q_vector->eitr = new_itr;
1612
1613 ixgbe_write_eitr(q_vector);
1614 }
1615 }
1616
1617 /**
1618 * ixgbe_check_overtemp_task - worker thread to check over tempurature
1619 * @work: pointer to work_struct containing our data
1620 **/
1621 static void ixgbe_check_overtemp_task(struct work_struct *work)
1622 {
1623 struct ixgbe_adapter *adapter = container_of(work,
1624 struct ixgbe_adapter,
1625 check_overtemp_task);
1626 struct ixgbe_hw *hw = &adapter->hw;
1627 u32 eicr = adapter->interrupt_event;
1628
1629 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
1630 return;
1631
1632 switch (hw->device_id) {
1633 case IXGBE_DEV_ID_82599_T3_LOM: {
1634 u32 autoneg;
1635 bool link_up = false;
1636
1637 if (hw->mac.ops.check_link)
1638 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1639
1640 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
1641 (eicr & IXGBE_EICR_LSC))
1642 /* Check if this is due to overtemp */
1643 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
1644 break;
1645 return;
1646 }
1647 default:
1648 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1649 return;
1650 break;
1651 }
1652 e_crit(drv,
1653 "Network adapter has been stopped because it has over heated. "
1654 "Restart the computer. If the problem persists, "
1655 "power off the system and replace the adapter\n");
1656 /* write to clear the interrupt */
1657 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1658 }
1659
1660 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1661 {
1662 struct ixgbe_hw *hw = &adapter->hw;
1663
1664 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1665 (eicr & IXGBE_EICR_GPI_SDP1)) {
1666 e_crit(probe, "Fan has stopped, replace the adapter\n");
1667 /* write to clear the interrupt */
1668 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1669 }
1670 }
1671
1672 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1673 {
1674 struct ixgbe_hw *hw = &adapter->hw;
1675
1676 if (eicr & IXGBE_EICR_GPI_SDP1) {
1677 /* Clear the interrupt */
1678 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1679 schedule_work(&adapter->multispeed_fiber_task);
1680 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1681 /* Clear the interrupt */
1682 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1683 schedule_work(&adapter->sfp_config_module_task);
1684 } else {
1685 /* Interrupt isn't for us... */
1686 return;
1687 }
1688 }
1689
1690 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1691 {
1692 struct ixgbe_hw *hw = &adapter->hw;
1693
1694 adapter->lsc_int++;
1695 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1696 adapter->link_check_timeout = jiffies;
1697 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1698 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1699 IXGBE_WRITE_FLUSH(hw);
1700 schedule_work(&adapter->watchdog_task);
1701 }
1702 }
1703
1704 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1705 {
1706 struct net_device *netdev = data;
1707 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1708 struct ixgbe_hw *hw = &adapter->hw;
1709 u32 eicr;
1710
1711 /*
1712 * Workaround for Silicon errata. Use clear-by-write instead
1713 * of clear-by-read. Reading with EICS will return the
1714 * interrupt causes without clearing, which later be done
1715 * with the write to EICR.
1716 */
1717 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1718 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
1719
1720 if (eicr & IXGBE_EICR_LSC)
1721 ixgbe_check_lsc(adapter);
1722
1723 if (eicr & IXGBE_EICR_MAILBOX)
1724 ixgbe_msg_task(adapter);
1725
1726 if (hw->mac.type == ixgbe_mac_82598EB)
1727 ixgbe_check_fan_failure(adapter, eicr);
1728
1729 if (hw->mac.type == ixgbe_mac_82599EB) {
1730 ixgbe_check_sfp_event(adapter, eicr);
1731 adapter->interrupt_event = eicr;
1732 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1733 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
1734 schedule_work(&adapter->check_overtemp_task);
1735
1736 /* Handle Flow Director Full threshold interrupt */
1737 if (eicr & IXGBE_EICR_FLOW_DIR) {
1738 int i;
1739 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1740 /* Disable transmits before FDIR Re-initialization */
1741 netif_tx_stop_all_queues(netdev);
1742 for (i = 0; i < adapter->num_tx_queues; i++) {
1743 struct ixgbe_ring *tx_ring =
1744 adapter->tx_ring[i];
1745 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1746 &tx_ring->reinit_state))
1747 schedule_work(&adapter->fdir_reinit_task);
1748 }
1749 }
1750 }
1751 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1752 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1753
1754 return IRQ_HANDLED;
1755 }
1756
1757 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1758 u64 qmask)
1759 {
1760 u32 mask;
1761
1762 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1763 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1764 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1765 } else {
1766 mask = (qmask & 0xFFFFFFFF);
1767 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1768 mask = (qmask >> 32);
1769 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1770 }
1771 /* skip the flush */
1772 }
1773
1774 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1775 u64 qmask)
1776 {
1777 u32 mask;
1778
1779 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1780 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1781 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1782 } else {
1783 mask = (qmask & 0xFFFFFFFF);
1784 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1785 mask = (qmask >> 32);
1786 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1787 }
1788 /* skip the flush */
1789 }
1790
1791 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1792 {
1793 struct ixgbe_q_vector *q_vector = data;
1794 struct ixgbe_adapter *adapter = q_vector->adapter;
1795 struct ixgbe_ring *tx_ring;
1796 int i, r_idx;
1797
1798 if (!q_vector->txr_count)
1799 return IRQ_HANDLED;
1800
1801 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1802 for (i = 0; i < q_vector->txr_count; i++) {
1803 tx_ring = adapter->tx_ring[r_idx];
1804 tx_ring->total_bytes = 0;
1805 tx_ring->total_packets = 0;
1806 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1807 r_idx + 1);
1808 }
1809
1810 /* EIAM disabled interrupts (on this vector) for us */
1811 napi_schedule(&q_vector->napi);
1812
1813 return IRQ_HANDLED;
1814 }
1815
1816 /**
1817 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1818 * @irq: unused
1819 * @data: pointer to our q_vector struct for this interrupt vector
1820 **/
1821 static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1822 {
1823 struct ixgbe_q_vector *q_vector = data;
1824 struct ixgbe_adapter *adapter = q_vector->adapter;
1825 struct ixgbe_ring *rx_ring;
1826 int r_idx;
1827 int i;
1828
1829 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1830 for (i = 0; i < q_vector->rxr_count; i++) {
1831 rx_ring = adapter->rx_ring[r_idx];
1832 rx_ring->total_bytes = 0;
1833 rx_ring->total_packets = 0;
1834 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1835 r_idx + 1);
1836 }
1837
1838 if (!q_vector->rxr_count)
1839 return IRQ_HANDLED;
1840
1841 /* disable interrupts on this vector only */
1842 /* EIAM disabled interrupts (on this vector) for us */
1843 napi_schedule(&q_vector->napi);
1844
1845 return IRQ_HANDLED;
1846 }
1847
1848 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1849 {
1850 struct ixgbe_q_vector *q_vector = data;
1851 struct ixgbe_adapter *adapter = q_vector->adapter;
1852 struct ixgbe_ring *ring;
1853 int r_idx;
1854 int i;
1855
1856 if (!q_vector->txr_count && !q_vector->rxr_count)
1857 return IRQ_HANDLED;
1858
1859 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1860 for (i = 0; i < q_vector->txr_count; i++) {
1861 ring = adapter->tx_ring[r_idx];
1862 ring->total_bytes = 0;
1863 ring->total_packets = 0;
1864 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1865 r_idx + 1);
1866 }
1867
1868 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1869 for (i = 0; i < q_vector->rxr_count; i++) {
1870 ring = adapter->rx_ring[r_idx];
1871 ring->total_bytes = 0;
1872 ring->total_packets = 0;
1873 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1874 r_idx + 1);
1875 }
1876
1877 /* EIAM disabled interrupts (on this vector) for us */
1878 napi_schedule(&q_vector->napi);
1879
1880 return IRQ_HANDLED;
1881 }
1882
1883 /**
1884 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1885 * @napi: napi struct with our devices info in it
1886 * @budget: amount of work driver is allowed to do this pass, in packets
1887 *
1888 * This function is optimized for cleaning one queue only on a single
1889 * q_vector!!!
1890 **/
1891 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1892 {
1893 struct ixgbe_q_vector *q_vector =
1894 container_of(napi, struct ixgbe_q_vector, napi);
1895 struct ixgbe_adapter *adapter = q_vector->adapter;
1896 struct ixgbe_ring *rx_ring = NULL;
1897 int work_done = 0;
1898 long r_idx;
1899
1900 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1901 rx_ring = adapter->rx_ring[r_idx];
1902 #ifdef CONFIG_IXGBE_DCA
1903 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1904 ixgbe_update_rx_dca(adapter, rx_ring);
1905 #endif
1906
1907 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1908
1909 /* If all Rx work done, exit the polling mode */
1910 if (work_done < budget) {
1911 napi_complete(napi);
1912 if (adapter->rx_itr_setting & 1)
1913 ixgbe_set_itr_msix(q_vector);
1914 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1915 ixgbe_irq_enable_queues(adapter,
1916 ((u64)1 << q_vector->v_idx));
1917 }
1918
1919 return work_done;
1920 }
1921
1922 /**
1923 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
1924 * @napi: napi struct with our devices info in it
1925 * @budget: amount of work driver is allowed to do this pass, in packets
1926 *
1927 * This function will clean more than one rx queue associated with a
1928 * q_vector.
1929 **/
1930 static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1931 {
1932 struct ixgbe_q_vector *q_vector =
1933 container_of(napi, struct ixgbe_q_vector, napi);
1934 struct ixgbe_adapter *adapter = q_vector->adapter;
1935 struct ixgbe_ring *ring = NULL;
1936 int work_done = 0, i;
1937 long r_idx;
1938 bool tx_clean_complete = true;
1939
1940 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1941 for (i = 0; i < q_vector->txr_count; i++) {
1942 ring = adapter->tx_ring[r_idx];
1943 #ifdef CONFIG_IXGBE_DCA
1944 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1945 ixgbe_update_tx_dca(adapter, ring);
1946 #endif
1947 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1948 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1949 r_idx + 1);
1950 }
1951
1952 /* attempt to distribute budget to each queue fairly, but don't allow
1953 * the budget to go below 1 because we'll exit polling */
1954 budget /= (q_vector->rxr_count ?: 1);
1955 budget = max(budget, 1);
1956 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1957 for (i = 0; i < q_vector->rxr_count; i++) {
1958 ring = adapter->rx_ring[r_idx];
1959 #ifdef CONFIG_IXGBE_DCA
1960 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1961 ixgbe_update_rx_dca(adapter, ring);
1962 #endif
1963 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1964 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1965 r_idx + 1);
1966 }
1967
1968 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1969 ring = adapter->rx_ring[r_idx];
1970 /* If all Rx work done, exit the polling mode */
1971 if (work_done < budget) {
1972 napi_complete(napi);
1973 if (adapter->rx_itr_setting & 1)
1974 ixgbe_set_itr_msix(q_vector);
1975 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1976 ixgbe_irq_enable_queues(adapter,
1977 ((u64)1 << q_vector->v_idx));
1978 return 0;
1979 }
1980
1981 return work_done;
1982 }
1983
1984 /**
1985 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
1986 * @napi: napi struct with our devices info in it
1987 * @budget: amount of work driver is allowed to do this pass, in packets
1988 *
1989 * This function is optimized for cleaning one queue only on a single
1990 * q_vector!!!
1991 **/
1992 static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1993 {
1994 struct ixgbe_q_vector *q_vector =
1995 container_of(napi, struct ixgbe_q_vector, napi);
1996 struct ixgbe_adapter *adapter = q_vector->adapter;
1997 struct ixgbe_ring *tx_ring = NULL;
1998 int work_done = 0;
1999 long r_idx;
2000
2001 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2002 tx_ring = adapter->tx_ring[r_idx];
2003 #ifdef CONFIG_IXGBE_DCA
2004 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2005 ixgbe_update_tx_dca(adapter, tx_ring);
2006 #endif
2007
2008 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2009 work_done = budget;
2010
2011 /* If all Tx work done, exit the polling mode */
2012 if (work_done < budget) {
2013 napi_complete(napi);
2014 if (adapter->tx_itr_setting & 1)
2015 ixgbe_set_itr_msix(q_vector);
2016 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2017 ixgbe_irq_enable_queues(adapter,
2018 ((u64)1 << q_vector->v_idx));
2019 }
2020
2021 return work_done;
2022 }
2023
2024 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2025 int r_idx)
2026 {
2027 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2028
2029 set_bit(r_idx, q_vector->rxr_idx);
2030 q_vector->rxr_count++;
2031 }
2032
2033 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2034 int t_idx)
2035 {
2036 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2037
2038 set_bit(t_idx, q_vector->txr_idx);
2039 q_vector->txr_count++;
2040 }
2041
2042 /**
2043 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2044 * @adapter: board private structure to initialize
2045 * @vectors: allotted vector count for descriptor rings
2046 *
2047 * This function maps descriptor rings to the queue-specific vectors
2048 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2049 * one vector per ring/queue, but on a constrained vector budget, we
2050 * group the rings as "efficiently" as possible. You would add new
2051 * mapping configurations in here.
2052 **/
2053 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2054 int vectors)
2055 {
2056 int v_start = 0;
2057 int rxr_idx = 0, txr_idx = 0;
2058 int rxr_remaining = adapter->num_rx_queues;
2059 int txr_remaining = adapter->num_tx_queues;
2060 int i, j;
2061 int rqpv, tqpv;
2062 int err = 0;
2063
2064 /* No mapping required if MSI-X is disabled. */
2065 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2066 goto out;
2067
2068 /*
2069 * The ideal configuration...
2070 * We have enough vectors to map one per queue.
2071 */
2072 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
2073 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2074 map_vector_to_rxq(adapter, v_start, rxr_idx);
2075
2076 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
2077 map_vector_to_txq(adapter, v_start, txr_idx);
2078
2079 goto out;
2080 }
2081
2082 /*
2083 * If we don't have enough vectors for a 1-to-1
2084 * mapping, we'll have to group them so there are
2085 * multiple queues per vector.
2086 */
2087 /* Re-adjusting *qpv takes care of the remainder. */
2088 for (i = v_start; i < vectors; i++) {
2089 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
2090 for (j = 0; j < rqpv; j++) {
2091 map_vector_to_rxq(adapter, i, rxr_idx);
2092 rxr_idx++;
2093 rxr_remaining--;
2094 }
2095 }
2096 for (i = v_start; i < vectors; i++) {
2097 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
2098 for (j = 0; j < tqpv; j++) {
2099 map_vector_to_txq(adapter, i, txr_idx);
2100 txr_idx++;
2101 txr_remaining--;
2102 }
2103 }
2104
2105 out:
2106 return err;
2107 }
2108
2109 /**
2110 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2111 * @adapter: board private structure
2112 *
2113 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2114 * interrupts from the kernel.
2115 **/
2116 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2117 {
2118 struct net_device *netdev = adapter->netdev;
2119 irqreturn_t (*handler)(int, void *);
2120 int i, vector, q_vectors, err;
2121 int ri = 0, ti = 0;
2122
2123 /* Decrement for Other and TCP Timer vectors */
2124 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2125
2126 /* Map the Tx/Rx rings to the vectors we were allotted. */
2127 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
2128 if (err)
2129 goto out;
2130
2131 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
2132 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
2133 &ixgbe_msix_clean_many)
2134 for (vector = 0; vector < q_vectors; vector++) {
2135 handler = SET_HANDLER(adapter->q_vector[vector]);
2136
2137 if (handler == &ixgbe_msix_clean_rx) {
2138 sprintf(adapter->name[vector], "%s-%s-%d",
2139 netdev->name, "rx", ri++);
2140 } else if (handler == &ixgbe_msix_clean_tx) {
2141 sprintf(adapter->name[vector], "%s-%s-%d",
2142 netdev->name, "tx", ti++);
2143 } else
2144 sprintf(adapter->name[vector], "%s-%s-%d",
2145 netdev->name, "TxRx", vector);
2146
2147 err = request_irq(adapter->msix_entries[vector].vector,
2148 handler, 0, adapter->name[vector],
2149 adapter->q_vector[vector]);
2150 if (err) {
2151 e_err(probe, "request_irq failed for MSIX interrupt "
2152 "Error: %d\n", err);
2153 goto free_queue_irqs;
2154 }
2155 }
2156
2157 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
2158 err = request_irq(adapter->msix_entries[vector].vector,
2159 ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
2160 if (err) {
2161 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2162 goto free_queue_irqs;
2163 }
2164
2165 return 0;
2166
2167 free_queue_irqs:
2168 for (i = vector - 1; i >= 0; i--)
2169 free_irq(adapter->msix_entries[--vector].vector,
2170 adapter->q_vector[i]);
2171 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2172 pci_disable_msix(adapter->pdev);
2173 kfree(adapter->msix_entries);
2174 adapter->msix_entries = NULL;
2175 out:
2176 return err;
2177 }
2178
2179 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2180 {
2181 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2182 u8 current_itr;
2183 u32 new_itr = q_vector->eitr;
2184 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2185 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
2186
2187 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
2188 q_vector->tx_itr,
2189 tx_ring->total_packets,
2190 tx_ring->total_bytes);
2191 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
2192 q_vector->rx_itr,
2193 rx_ring->total_packets,
2194 rx_ring->total_bytes);
2195
2196 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
2197
2198 switch (current_itr) {
2199 /* counts and packets in update_itr are dependent on these numbers */
2200 case lowest_latency:
2201 new_itr = 100000;
2202 break;
2203 case low_latency:
2204 new_itr = 20000; /* aka hwitr = ~200 */
2205 break;
2206 case bulk_latency:
2207 new_itr = 8000;
2208 break;
2209 default:
2210 break;
2211 }
2212
2213 if (new_itr != q_vector->eitr) {
2214 /* do an exponential smoothing */
2215 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
2216
2217 /* save the algorithm value here, not the smoothed one */
2218 q_vector->eitr = new_itr;
2219
2220 ixgbe_write_eitr(q_vector);
2221 }
2222 }
2223
2224 /**
2225 * ixgbe_irq_enable - Enable default interrupt generation settings
2226 * @adapter: board private structure
2227 **/
2228 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2229 bool flush)
2230 {
2231 u32 mask;
2232
2233 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2234 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2235 mask |= IXGBE_EIMS_GPI_SDP0;
2236 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2237 mask |= IXGBE_EIMS_GPI_SDP1;
2238 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2239 mask |= IXGBE_EIMS_ECC;
2240 mask |= IXGBE_EIMS_GPI_SDP1;
2241 mask |= IXGBE_EIMS_GPI_SDP2;
2242 if (adapter->num_vfs)
2243 mask |= IXGBE_EIMS_MAILBOX;
2244 }
2245 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2246 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2247 mask |= IXGBE_EIMS_FLOW_DIR;
2248
2249 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2250 if (queues)
2251 ixgbe_irq_enable_queues(adapter, ~0);
2252 if (flush)
2253 IXGBE_WRITE_FLUSH(&adapter->hw);
2254
2255 if (adapter->num_vfs > 32) {
2256 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2257 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2258 }
2259 }
2260
2261 /**
2262 * ixgbe_intr - legacy mode Interrupt Handler
2263 * @irq: interrupt number
2264 * @data: pointer to a network interface device structure
2265 **/
2266 static irqreturn_t ixgbe_intr(int irq, void *data)
2267 {
2268 struct net_device *netdev = data;
2269 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2270 struct ixgbe_hw *hw = &adapter->hw;
2271 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2272 u32 eicr;
2273
2274 /*
2275 * Workaround for silicon errata on 82598. Mask the interrupts
2276 * before the read of EICR.
2277 */
2278 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2279
2280 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2281 * therefore no explict interrupt disable is necessary */
2282 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2283 if (!eicr) {
2284 /*
2285 * shared interrupt alert!
2286 * make sure interrupts are enabled because the read will
2287 * have disabled interrupts due to EIAM
2288 * finish the workaround of silicon errata on 82598. Unmask
2289 * the interrupt that we masked before the EICR read.
2290 */
2291 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2292 ixgbe_irq_enable(adapter, true, true);
2293 return IRQ_NONE; /* Not our interrupt */
2294 }
2295
2296 if (eicr & IXGBE_EICR_LSC)
2297 ixgbe_check_lsc(adapter);
2298
2299 if (hw->mac.type == ixgbe_mac_82599EB)
2300 ixgbe_check_sfp_event(adapter, eicr);
2301
2302 ixgbe_check_fan_failure(adapter, eicr);
2303 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2304 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
2305 schedule_work(&adapter->check_overtemp_task);
2306
2307 if (napi_schedule_prep(&(q_vector->napi))) {
2308 adapter->tx_ring[0]->total_packets = 0;
2309 adapter->tx_ring[0]->total_bytes = 0;
2310 adapter->rx_ring[0]->total_packets = 0;
2311 adapter->rx_ring[0]->total_bytes = 0;
2312 /* would disable interrupts here but EIAM disabled it */
2313 __napi_schedule(&(q_vector->napi));
2314 }
2315
2316 /*
2317 * re-enable link(maybe) and non-queue interrupts, no flush.
2318 * ixgbe_poll will re-enable the queue interrupts
2319 */
2320
2321 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2322 ixgbe_irq_enable(adapter, false, false);
2323
2324 return IRQ_HANDLED;
2325 }
2326
2327 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2328 {
2329 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2330
2331 for (i = 0; i < q_vectors; i++) {
2332 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
2333 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
2334 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
2335 q_vector->rxr_count = 0;
2336 q_vector->txr_count = 0;
2337 }
2338 }
2339
2340 /**
2341 * ixgbe_request_irq - initialize interrupts
2342 * @adapter: board private structure
2343 *
2344 * Attempts to configure interrupts using the best available
2345 * capabilities of the hardware and kernel.
2346 **/
2347 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2348 {
2349 struct net_device *netdev = adapter->netdev;
2350 int err;
2351
2352 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2353 err = ixgbe_request_msix_irqs(adapter);
2354 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2355 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2356 netdev->name, netdev);
2357 } else {
2358 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2359 netdev->name, netdev);
2360 }
2361
2362 if (err)
2363 e_err(probe, "request_irq failed, Error %d\n", err);
2364
2365 return err;
2366 }
2367
2368 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2369 {
2370 struct net_device *netdev = adapter->netdev;
2371
2372 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2373 int i, q_vectors;
2374
2375 q_vectors = adapter->num_msix_vectors;
2376
2377 i = q_vectors - 1;
2378 free_irq(adapter->msix_entries[i].vector, netdev);
2379
2380 i--;
2381 for (; i >= 0; i--) {
2382 free_irq(adapter->msix_entries[i].vector,
2383 adapter->q_vector[i]);
2384 }
2385
2386 ixgbe_reset_q_vectors(adapter);
2387 } else {
2388 free_irq(adapter->pdev->irq, netdev);
2389 }
2390 }
2391
2392 /**
2393 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2394 * @adapter: board private structure
2395 **/
2396 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2397 {
2398 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2399 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2400 } else {
2401 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2402 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2403 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2404 if (adapter->num_vfs > 32)
2405 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
2406 }
2407 IXGBE_WRITE_FLUSH(&adapter->hw);
2408 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2409 int i;
2410 for (i = 0; i < adapter->num_msix_vectors; i++)
2411 synchronize_irq(adapter->msix_entries[i].vector);
2412 } else {
2413 synchronize_irq(adapter->pdev->irq);
2414 }
2415 }
2416
2417 /**
2418 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2419 *
2420 **/
2421 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2422 {
2423 struct ixgbe_hw *hw = &adapter->hw;
2424
2425 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
2426 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
2427
2428 ixgbe_set_ivar(adapter, 0, 0, 0);
2429 ixgbe_set_ivar(adapter, 1, 0, 0);
2430
2431 map_vector_to_rxq(adapter, 0, 0);
2432 map_vector_to_txq(adapter, 0, 0);
2433
2434 e_info(hw, "Legacy interrupt IVAR setup done\n");
2435 }
2436
2437 /**
2438 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2439 * @adapter: board private structure
2440 * @ring: structure containing ring specific data
2441 *
2442 * Configure the Tx descriptor ring after a reset.
2443 **/
2444 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2445 struct ixgbe_ring *ring)
2446 {
2447 struct ixgbe_hw *hw = &adapter->hw;
2448 u64 tdba = ring->dma;
2449 int wait_loop = 10;
2450 u32 txdctl;
2451 u16 reg_idx = ring->reg_idx;
2452
2453 /* disable queue to avoid issues while updating state */
2454 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2455 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
2456 txdctl & ~IXGBE_TXDCTL_ENABLE);
2457 IXGBE_WRITE_FLUSH(hw);
2458
2459 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2460 (tdba & DMA_BIT_MASK(32)));
2461 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2462 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2463 ring->count * sizeof(union ixgbe_adv_tx_desc));
2464 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2465 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2466 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
2467
2468 /* configure fetching thresholds */
2469 if (adapter->rx_itr_setting == 0) {
2470 /* cannot set wthresh when itr==0 */
2471 txdctl &= ~0x007F0000;
2472 } else {
2473 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2474 txdctl |= (8 << 16);
2475 }
2476 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2477 /* PThresh workaround for Tx hang with DFP enabled. */
2478 txdctl |= 32;
2479 }
2480
2481 /* reinitialize flowdirector state */
2482 set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
2483
2484 /* enable queue */
2485 txdctl |= IXGBE_TXDCTL_ENABLE;
2486 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2487
2488 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2489 if (hw->mac.type == ixgbe_mac_82598EB &&
2490 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2491 return;
2492
2493 /* poll to verify queue is enabled */
2494 do {
2495 msleep(1);
2496 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2497 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2498 if (!wait_loop)
2499 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
2500 }
2501
2502 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2503 {
2504 struct ixgbe_hw *hw = &adapter->hw;
2505 u32 rttdcs;
2506 u32 mask;
2507
2508 if (hw->mac.type == ixgbe_mac_82598EB)
2509 return;
2510
2511 /* disable the arbiter while setting MTQC */
2512 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2513 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2514 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2515
2516 /* set transmit pool layout */
2517 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2518 switch (adapter->flags & mask) {
2519
2520 case (IXGBE_FLAG_SRIOV_ENABLED):
2521 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2522 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2523 break;
2524
2525 case (IXGBE_FLAG_DCB_ENABLED):
2526 /* We enable 8 traffic classes, DCB only */
2527 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2528 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2529 break;
2530
2531 default:
2532 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2533 break;
2534 }
2535
2536 /* re-enable the arbiter */
2537 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2538 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2539 }
2540
2541 /**
2542 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
2543 * @adapter: board private structure
2544 *
2545 * Configure the Tx unit of the MAC after a reset.
2546 **/
2547 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2548 {
2549 struct ixgbe_hw *hw = &adapter->hw;
2550 u32 dmatxctl;
2551 u32 i;
2552
2553 ixgbe_setup_mtqc(adapter);
2554
2555 if (hw->mac.type != ixgbe_mac_82598EB) {
2556 /* DMATXCTL.EN must be before Tx queues are enabled */
2557 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2558 dmatxctl |= IXGBE_DMATXCTL_TE;
2559 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2560 }
2561
2562 /* Setup the HW Tx Head and Tail descriptor pointers */
2563 for (i = 0; i < adapter->num_tx_queues; i++)
2564 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
2565 }
2566
2567 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2568
2569 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2570 struct ixgbe_ring *rx_ring)
2571 {
2572 u32 srrctl;
2573 int index;
2574 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2575
2576 index = rx_ring->reg_idx;
2577 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2578 unsigned long mask;
2579 mask = (unsigned long) feature[RING_F_RSS].mask;
2580 index = index & mask;
2581 }
2582 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
2583
2584 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2585 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2586 if (adapter->num_vfs)
2587 srrctl |= IXGBE_SRRCTL_DROP_EN;
2588
2589 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2590 IXGBE_SRRCTL_BSIZEHDR_MASK;
2591
2592 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2593 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2594 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2595 #else
2596 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2597 #endif
2598 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2599 } else {
2600 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
2601 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2602 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2603 }
2604
2605 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
2606 }
2607
2608 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2609 {
2610 struct ixgbe_hw *hw = &adapter->hw;
2611 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2612 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2613 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2614 u32 mrqc = 0, reta = 0;
2615 u32 rxcsum;
2616 int i, j;
2617 int mask;
2618
2619 /* Fill out hash function seeds */
2620 for (i = 0; i < 10; i++)
2621 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2622
2623 /* Fill out redirection table */
2624 for (i = 0, j = 0; i < 128; i++, j++) {
2625 if (j == adapter->ring_feature[RING_F_RSS].indices)
2626 j = 0;
2627 /* reta = 4-byte sliding window of
2628 * 0x00..(indices-1)(indices-1)00..etc. */
2629 reta = (reta << 8) | (j * 0x11);
2630 if ((i & 3) == 3)
2631 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2632 }
2633
2634 /* Disable indicating checksum in descriptor, enables RSS hash */
2635 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2636 rxcsum |= IXGBE_RXCSUM_PCSD;
2637 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2638
2639 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2640 mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
2641 else
2642 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
2643 #ifdef CONFIG_IXGBE_DCB
2644 | IXGBE_FLAG_DCB_ENABLED
2645 #endif
2646 | IXGBE_FLAG_SRIOV_ENABLED
2647 );
2648
2649 switch (mask) {
2650 case (IXGBE_FLAG_RSS_ENABLED):
2651 mrqc = IXGBE_MRQC_RSSEN;
2652 break;
2653 case (IXGBE_FLAG_SRIOV_ENABLED):
2654 mrqc = IXGBE_MRQC_VMDQEN;
2655 break;
2656 #ifdef CONFIG_IXGBE_DCB
2657 case (IXGBE_FLAG_DCB_ENABLED):
2658 mrqc = IXGBE_MRQC_RT8TCEN;
2659 break;
2660 #endif /* CONFIG_IXGBE_DCB */
2661 default:
2662 break;
2663 }
2664
2665 /* Perform hash on these packet types */
2666 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2667 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2668 | IXGBE_MRQC_RSS_FIELD_IPV6
2669 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2670
2671 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2672 }
2673
2674 /**
2675 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2676 * @adapter: address of board private structure
2677 * @index: index of ring to set
2678 **/
2679 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2680 struct ixgbe_ring *ring)
2681 {
2682 struct ixgbe_hw *hw = &adapter->hw;
2683 u32 rscctrl;
2684 int rx_buf_len;
2685 u16 reg_idx = ring->reg_idx;
2686
2687 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
2688 return;
2689
2690 rx_buf_len = ring->rx_buf_len;
2691 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2692 rscctrl |= IXGBE_RSCCTL_RSCEN;
2693 /*
2694 * we must limit the number of descriptors so that the
2695 * total size of max desc * buf_len is not greater
2696 * than 65535
2697 */
2698 if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2699 #if (MAX_SKB_FRAGS > 16)
2700 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2701 #elif (MAX_SKB_FRAGS > 8)
2702 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2703 #elif (MAX_SKB_FRAGS > 4)
2704 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2705 #else
2706 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2707 #endif
2708 } else {
2709 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2710 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2711 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2712 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2713 else
2714 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2715 }
2716 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2717 }
2718
2719 /**
2720 * ixgbe_set_uta - Set unicast filter table address
2721 * @adapter: board private structure
2722 *
2723 * The unicast table address is a register array of 32-bit registers.
2724 * The table is meant to be used in a way similar to how the MTA is used
2725 * however due to certain limitations in the hardware it is necessary to
2726 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2727 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2728 **/
2729 static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2730 {
2731 struct ixgbe_hw *hw = &adapter->hw;
2732 int i;
2733
2734 /* The UTA table only exists on 82599 hardware and newer */
2735 if (hw->mac.type < ixgbe_mac_82599EB)
2736 return;
2737
2738 /* we only need to do this if VMDq is enabled */
2739 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2740 return;
2741
2742 for (i = 0; i < 128; i++)
2743 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2744 }
2745
2746 #define IXGBE_MAX_RX_DESC_POLL 10
2747 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2748 struct ixgbe_ring *ring)
2749 {
2750 struct ixgbe_hw *hw = &adapter->hw;
2751 int reg_idx = ring->reg_idx;
2752 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2753 u32 rxdctl;
2754
2755 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2756 if (hw->mac.type == ixgbe_mac_82598EB &&
2757 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2758 return;
2759
2760 do {
2761 msleep(1);
2762 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2763 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
2764
2765 if (!wait_loop) {
2766 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
2767 "the polling period\n", reg_idx);
2768 }
2769 }
2770
2771 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2772 struct ixgbe_ring *ring)
2773 {
2774 struct ixgbe_hw *hw = &adapter->hw;
2775 u64 rdba = ring->dma;
2776 u32 rxdctl;
2777 u16 reg_idx = ring->reg_idx;
2778
2779 /* disable queue to avoid issues while updating state */
2780 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2781 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
2782 rxdctl & ~IXGBE_RXDCTL_ENABLE);
2783 IXGBE_WRITE_FLUSH(hw);
2784
2785 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
2786 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
2787 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
2788 ring->count * sizeof(union ixgbe_adv_rx_desc));
2789 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
2790 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
2791 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
2792
2793 ixgbe_configure_srrctl(adapter, ring);
2794 ixgbe_configure_rscctl(adapter, ring);
2795
2796 if (hw->mac.type == ixgbe_mac_82598EB) {
2797 /*
2798 * enable cache line friendly hardware writes:
2799 * PTHRESH=32 descriptors (half the internal cache),
2800 * this also removes ugly rx_no_buffer_count increment
2801 * HTHRESH=4 descriptors (to minimize latency on fetch)
2802 * WTHRESH=8 burst writeback up to two cache lines
2803 */
2804 rxdctl &= ~0x3FFFFF;
2805 rxdctl |= 0x080420;
2806 }
2807
2808 /* enable receive descriptor ring */
2809 rxdctl |= IXGBE_RXDCTL_ENABLE;
2810 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2811
2812 ixgbe_rx_desc_queue_enable(adapter, ring);
2813 ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
2814 }
2815
2816 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
2817 {
2818 struct ixgbe_hw *hw = &adapter->hw;
2819 int p;
2820
2821 /* PSRTYPE must be initialized in non 82598 adapters */
2822 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2823 IXGBE_PSRTYPE_UDPHDR |
2824 IXGBE_PSRTYPE_IPV4HDR |
2825 IXGBE_PSRTYPE_L2HDR |
2826 IXGBE_PSRTYPE_IPV6HDR;
2827
2828 if (hw->mac.type == ixgbe_mac_82598EB)
2829 return;
2830
2831 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
2832 psrtype |= (adapter->num_rx_queues_per_pool << 29);
2833
2834 for (p = 0; p < adapter->num_rx_pools; p++)
2835 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
2836 psrtype);
2837 }
2838
2839 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
2840 {
2841 struct ixgbe_hw *hw = &adapter->hw;
2842 u32 gcr_ext;
2843 u32 vt_reg_bits;
2844 u32 reg_offset, vf_shift;
2845 u32 vmdctl;
2846
2847 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2848 return;
2849
2850 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2851 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
2852 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
2853 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2854
2855 vf_shift = adapter->num_vfs % 32;
2856 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
2857
2858 /* Enable only the PF's pool for Tx/Rx */
2859 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2860 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
2861 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2862 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
2863 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2864
2865 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
2866 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2867
2868 /*
2869 * Set up VF register offsets for selected VT Mode,
2870 * i.e. 32 or 64 VFs for SR-IOV
2871 */
2872 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2873 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
2874 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
2875 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
2876
2877 /* enable Tx loopback for VF/PF communication */
2878 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2879 }
2880
2881 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2882 {
2883 struct ixgbe_hw *hw = &adapter->hw;
2884 struct net_device *netdev = adapter->netdev;
2885 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2886 int rx_buf_len;
2887 struct ixgbe_ring *rx_ring;
2888 int i;
2889 u32 mhadd, hlreg0;
2890
2891 /* Decide whether to use packet split mode or not */
2892 /* Do not use packet split if we're in SR-IOV Mode */
2893 if (!adapter->num_vfs)
2894 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
2895
2896 /* Set the RX buffer length according to the mode */
2897 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
2898 rx_buf_len = IXGBE_RX_HDR_SIZE;
2899 } else {
2900 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
2901 (netdev->mtu <= ETH_DATA_LEN))
2902 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
2903 else
2904 rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
2905 }
2906
2907 #ifdef IXGBE_FCOE
2908 /* adjust max frame to be able to do baby jumbo for FCoE */
2909 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
2910 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2911 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
2912
2913 #endif /* IXGBE_FCOE */
2914 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2915 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2916 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2917 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2918
2919 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2920 }
2921
2922 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2923 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
2924 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2925 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2926
2927 /*
2928 * Setup the HW Rx Head and Tail Descriptor Pointers and
2929 * the Base and Length of the Rx Descriptor Ring
2930 */
2931 for (i = 0; i < adapter->num_rx_queues; i++) {
2932 rx_ring = adapter->rx_ring[i];
2933 rx_ring->rx_buf_len = rx_buf_len;
2934
2935 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2936 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
2937 else
2938 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2939
2940 #ifdef IXGBE_FCOE
2941 if (netdev->features & NETIF_F_FCOE_MTU) {
2942 struct ixgbe_ring_feature *f;
2943 f = &adapter->ring_feature[RING_F_FCOE];
2944 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2945 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2946 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2947 rx_ring->rx_buf_len =
2948 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2949 }
2950 }
2951 #endif /* IXGBE_FCOE */
2952 }
2953
2954 }
2955
2956 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
2957 {
2958 struct ixgbe_hw *hw = &adapter->hw;
2959 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2960
2961 switch (hw->mac.type) {
2962 case ixgbe_mac_82598EB:
2963 /*
2964 * For VMDq support of different descriptor types or
2965 * buffer sizes through the use of multiple SRRCTL
2966 * registers, RDRXCTL.MVMEN must be set to 1
2967 *
2968 * also, the manual doesn't mention it clearly but DCA hints
2969 * will only use queue 0's tags unless this bit is set. Side
2970 * effects of setting this bit are only that SRRCTL must be
2971 * fully programmed [0..15]
2972 */
2973 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2974 break;
2975 case ixgbe_mac_82599EB:
2976 /* Disable RSC for ACK packets */
2977 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2978 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
2979 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2980 /* hardware requires some bits to be set by default */
2981 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
2982 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
2983 break;
2984 default:
2985 /* We should do nothing since we don't know this hardware */
2986 return;
2987 }
2988
2989 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2990 }
2991
2992 /**
2993 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
2994 * @adapter: board private structure
2995 *
2996 * Configure the Rx unit of the MAC after a reset.
2997 **/
2998 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2999 {
3000 struct ixgbe_hw *hw = &adapter->hw;
3001 int i;
3002 u32 rxctrl;
3003
3004 /* disable receives while setting up the descriptors */
3005 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3006 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3007
3008 ixgbe_setup_psrtype(adapter);
3009 ixgbe_setup_rdrxctl(adapter);
3010
3011 /* Program registers for the distribution of queues */
3012 ixgbe_setup_mrqc(adapter);
3013
3014 ixgbe_set_uta(adapter);
3015
3016 /* set_rx_buffer_len must be called before ring initialization */
3017 ixgbe_set_rx_buffer_len(adapter);
3018
3019 /*
3020 * Setup the HW Rx Head and Tail Descriptor Pointers and
3021 * the Base and Length of the Rx Descriptor Ring
3022 */
3023 for (i = 0; i < adapter->num_rx_queues; i++)
3024 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
3025
3026 /* disable drop enable for 82598 parts */
3027 if (hw->mac.type == ixgbe_mac_82598EB)
3028 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3029
3030 /* enable all receives */
3031 rxctrl |= IXGBE_RXCTRL_RXEN;
3032 hw->mac.ops.enable_rx_dma(hw, rxctrl);
3033 }
3034
3035 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3036 {
3037 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3038 struct ixgbe_hw *hw = &adapter->hw;
3039 int pool_ndx = adapter->num_vfs;
3040
3041 /* add VID to filter table */
3042 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
3043 set_bit(vid, adapter->active_vlans);
3044 }
3045
3046 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3047 {
3048 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3049 struct ixgbe_hw *hw = &adapter->hw;
3050 int pool_ndx = adapter->num_vfs;
3051
3052 /* remove VID from filter table */
3053 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
3054 clear_bit(vid, adapter->active_vlans);
3055 }
3056
3057 /**
3058 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3059 * @adapter: driver data
3060 */
3061 static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3062 {
3063 struct ixgbe_hw *hw = &adapter->hw;
3064 u32 vlnctrl;
3065
3066 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3067 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3068 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3069 }
3070
3071 /**
3072 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3073 * @adapter: driver data
3074 */
3075 static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3076 {
3077 struct ixgbe_hw *hw = &adapter->hw;
3078 u32 vlnctrl;
3079
3080 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3081 vlnctrl |= IXGBE_VLNCTRL_VFE;
3082 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3083 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3084 }
3085
3086 /**
3087 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3088 * @adapter: driver data
3089 */
3090 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3091 {
3092 struct ixgbe_hw *hw = &adapter->hw;
3093 u32 vlnctrl;
3094 int i, j;
3095
3096 switch (hw->mac.type) {
3097 case ixgbe_mac_82598EB:
3098 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3099 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3100 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3101 break;
3102 case ixgbe_mac_82599EB:
3103 for (i = 0; i < adapter->num_rx_queues; i++) {
3104 j = adapter->rx_ring[i]->reg_idx;
3105 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3106 vlnctrl &= ~IXGBE_RXDCTL_VME;
3107 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3108 }
3109 break;
3110 default:
3111 break;
3112 }
3113 }
3114
3115 /**
3116 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
3117 * @adapter: driver data
3118 */
3119 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3120 {
3121 struct ixgbe_hw *hw = &adapter->hw;
3122 u32 vlnctrl;
3123 int i, j;
3124
3125 switch (hw->mac.type) {
3126 case ixgbe_mac_82598EB:
3127 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3128 vlnctrl |= IXGBE_VLNCTRL_VME;
3129 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3130 break;
3131 case ixgbe_mac_82599EB:
3132 for (i = 0; i < adapter->num_rx_queues; i++) {
3133 j = adapter->rx_ring[i]->reg_idx;
3134 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3135 vlnctrl |= IXGBE_RXDCTL_VME;
3136 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3137 }
3138 break;
3139 default:
3140 break;
3141 }
3142 }
3143
3144 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3145 {
3146 u16 vid;
3147
3148 ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
3149
3150 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3151 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
3152 }
3153
3154 /**
3155 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3156 * @netdev: network interface device structure
3157 *
3158 * Writes unicast address list to the RAR table.
3159 * Returns: -ENOMEM on failure/insufficient address space
3160 * 0 on no addresses written
3161 * X on writing X addresses to the RAR table
3162 **/
3163 static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3164 {
3165 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3166 struct ixgbe_hw *hw = &adapter->hw;
3167 unsigned int vfn = adapter->num_vfs;
3168 unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
3169 int count = 0;
3170
3171 /* return ENOMEM indicating insufficient memory for addresses */
3172 if (netdev_uc_count(netdev) > rar_entries)
3173 return -ENOMEM;
3174
3175 if (!netdev_uc_empty(netdev) && rar_entries) {
3176 struct netdev_hw_addr *ha;
3177 /* return error if we do not support writing to RAR table */
3178 if (!hw->mac.ops.set_rar)
3179 return -ENOMEM;
3180
3181 netdev_for_each_uc_addr(ha, netdev) {
3182 if (!rar_entries)
3183 break;
3184 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3185 vfn, IXGBE_RAH_AV);
3186 count++;
3187 }
3188 }
3189 /* write the addresses in reverse order to avoid write combining */
3190 for (; rar_entries > 0 ; rar_entries--)
3191 hw->mac.ops.clear_rar(hw, rar_entries);
3192
3193 return count;
3194 }
3195
3196 /**
3197 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
3198 * @netdev: network interface device structure
3199 *
3200 * The set_rx_method entry point is called whenever the unicast/multicast
3201 * address list or the network interface flags are updated. This routine is
3202 * responsible for configuring the hardware for proper unicast, multicast and
3203 * promiscuous mode.
3204 **/
3205 void ixgbe_set_rx_mode(struct net_device *netdev)
3206 {
3207 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3208 struct ixgbe_hw *hw = &adapter->hw;
3209 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3210 int count;
3211
3212 /* Check for Promiscuous and All Multicast modes */
3213
3214 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3215
3216 /* set all bits that we expect to always be set */
3217 fctrl |= IXGBE_FCTRL_BAM;
3218 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3219 fctrl |= IXGBE_FCTRL_PMCF;
3220
3221 /* clear the bits we are changing the status of */
3222 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3223
3224 if (netdev->flags & IFF_PROMISC) {
3225 hw->addr_ctrl.user_set_promisc = true;
3226 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3227 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
3228 /* don't hardware filter vlans in promisc mode */
3229 ixgbe_vlan_filter_disable(adapter);
3230 } else {
3231 if (netdev->flags & IFF_ALLMULTI) {
3232 fctrl |= IXGBE_FCTRL_MPE;
3233 vmolr |= IXGBE_VMOLR_MPE;
3234 } else {
3235 /*
3236 * Write addresses to the MTA, if the attempt fails
3237 * then we should just turn on promiscous mode so
3238 * that we can at least receive multicast traffic
3239 */
3240 hw->mac.ops.update_mc_addr_list(hw, netdev);
3241 vmolr |= IXGBE_VMOLR_ROMPE;
3242 }
3243 ixgbe_vlan_filter_enable(adapter);
3244 hw->addr_ctrl.user_set_promisc = false;
3245 /*
3246 * Write addresses to available RAR registers, if there is not
3247 * sufficient space to store all the addresses then enable
3248 * unicast promiscous mode
3249 */
3250 count = ixgbe_write_uc_addr_list(netdev);
3251 if (count < 0) {
3252 fctrl |= IXGBE_FCTRL_UPE;
3253 vmolr |= IXGBE_VMOLR_ROPE;
3254 }
3255 }
3256
3257 if (adapter->num_vfs) {
3258 ixgbe_restore_vf_multicasts(adapter);
3259 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
3260 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3261 IXGBE_VMOLR_ROPE);
3262 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
3263 }
3264
3265 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3266
3267 if (netdev->features & NETIF_F_HW_VLAN_RX)
3268 ixgbe_vlan_strip_enable(adapter);
3269 else
3270 ixgbe_vlan_strip_disable(adapter);
3271 }
3272
3273 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3274 {
3275 int q_idx;
3276 struct ixgbe_q_vector *q_vector;
3277 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3278
3279 /* legacy and MSI only use one vector */
3280 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3281 q_vectors = 1;
3282
3283 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
3284 struct napi_struct *napi;
3285 q_vector = adapter->q_vector[q_idx];
3286 napi = &q_vector->napi;
3287 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3288 if (!q_vector->rxr_count || !q_vector->txr_count) {
3289 if (q_vector->txr_count == 1)
3290 napi->poll = &ixgbe_clean_txonly;
3291 else if (q_vector->rxr_count == 1)
3292 napi->poll = &ixgbe_clean_rxonly;
3293 }
3294 }
3295
3296 napi_enable(napi);
3297 }
3298 }
3299
3300 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3301 {
3302 int q_idx;
3303 struct ixgbe_q_vector *q_vector;
3304 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3305
3306 /* legacy and MSI only use one vector */
3307 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3308 q_vectors = 1;
3309
3310 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
3311 q_vector = adapter->q_vector[q_idx];
3312 napi_disable(&q_vector->napi);
3313 }
3314 }
3315
3316 #ifdef CONFIG_IXGBE_DCB
3317 /*
3318 * ixgbe_configure_dcb - Configure DCB hardware
3319 * @adapter: ixgbe adapter struct
3320 *
3321 * This is called by the driver on open to configure the DCB hardware.
3322 * This is also called by the gennetlink interface when reconfiguring
3323 * the DCB state.
3324 */
3325 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3326 {
3327 struct ixgbe_hw *hw = &adapter->hw;
3328 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3329 u32 txdctl;
3330 int i, j;
3331
3332 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3333 if (hw->mac.type == ixgbe_mac_82598EB)
3334 netif_set_gso_max_size(adapter->netdev, 65536);
3335 return;
3336 }
3337
3338 if (hw->mac.type == ixgbe_mac_82598EB)
3339 netif_set_gso_max_size(adapter->netdev, 32768);
3340
3341 #ifdef CONFIG_FCOE
3342 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3343 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3344 #endif
3345
3346 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3347 DCB_TX_CONFIG);
3348 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3349 DCB_RX_CONFIG);
3350
3351 /* reconfigure the hardware */
3352 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
3353
3354 for (i = 0; i < adapter->num_tx_queues; i++) {
3355 j = adapter->tx_ring[i]->reg_idx;
3356 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3357 /* PThresh workaround for Tx hang with DFP enabled. */
3358 txdctl |= 32;
3359 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3360 }
3361 /* Enable VLAN tag insert/strip */
3362 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
3363
3364 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3365 }
3366
3367 #endif
3368 static void ixgbe_configure(struct ixgbe_adapter *adapter)
3369 {
3370 struct net_device *netdev = adapter->netdev;
3371 struct ixgbe_hw *hw = &adapter->hw;
3372 int i;
3373
3374 #ifdef CONFIG_IXGBE_DCB
3375 ixgbe_configure_dcb(adapter);
3376 #endif
3377
3378 ixgbe_set_rx_mode(netdev);
3379 ixgbe_restore_vlan(adapter);
3380
3381 #ifdef IXGBE_FCOE
3382 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3383 ixgbe_configure_fcoe(adapter);
3384
3385 #endif /* IXGBE_FCOE */
3386 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3387 for (i = 0; i < adapter->num_tx_queues; i++)
3388 adapter->tx_ring[i]->atr_sample_rate =
3389 adapter->atr_sample_rate;
3390 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
3391 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3392 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
3393 }
3394 ixgbe_configure_virtualization(adapter);
3395
3396 ixgbe_configure_tx(adapter);
3397 ixgbe_configure_rx(adapter);
3398 }
3399
3400 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3401 {
3402 switch (hw->phy.type) {
3403 case ixgbe_phy_sfp_avago:
3404 case ixgbe_phy_sfp_ftl:
3405 case ixgbe_phy_sfp_intel:
3406 case ixgbe_phy_sfp_unknown:
3407 case ixgbe_phy_sfp_passive_tyco:
3408 case ixgbe_phy_sfp_passive_unknown:
3409 case ixgbe_phy_sfp_active_unknown:
3410 case ixgbe_phy_sfp_ftl_active:
3411 return true;
3412 default:
3413 return false;
3414 }
3415 }
3416
3417 /**
3418 * ixgbe_sfp_link_config - set up SFP+ link
3419 * @adapter: pointer to private adapter struct
3420 **/
3421 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3422 {
3423 struct ixgbe_hw *hw = &adapter->hw;
3424
3425 if (hw->phy.multispeed_fiber) {
3426 /*
3427 * In multispeed fiber setups, the device may not have
3428 * had a physical connection when the driver loaded.
3429 * If that's the case, the initial link configuration
3430 * couldn't get the MAC into 10G or 1G mode, so we'll
3431 * never have a link status change interrupt fire.
3432 * We need to try and force an autonegotiation
3433 * session, then bring up link.
3434 */
3435 hw->mac.ops.setup_sfp(hw);
3436 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3437 schedule_work(&adapter->multispeed_fiber_task);
3438 } else {
3439 /*
3440 * Direct Attach Cu and non-multispeed fiber modules
3441 * still need to be configured properly prior to
3442 * attempting link.
3443 */
3444 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
3445 schedule_work(&adapter->sfp_config_module_task);
3446 }
3447 }
3448
3449 /**
3450 * ixgbe_non_sfp_link_config - set up non-SFP+ link
3451 * @hw: pointer to private hardware struct
3452 *
3453 * Returns 0 on success, negative on failure
3454 **/
3455 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
3456 {
3457 u32 autoneg;
3458 bool negotiation, link_up = false;
3459 u32 ret = IXGBE_ERR_LINK_SETUP;
3460
3461 if (hw->mac.ops.check_link)
3462 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
3463
3464 if (ret)
3465 goto link_cfg_out;
3466
3467 if (hw->mac.ops.get_link_capabilities)
3468 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3469 &negotiation);
3470 if (ret)
3471 goto link_cfg_out;
3472
3473 if (hw->mac.ops.setup_link)
3474 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
3475 link_cfg_out:
3476 return ret;
3477 }
3478
3479 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
3480 {
3481 struct ixgbe_hw *hw = &adapter->hw;
3482 u32 gpie = 0;
3483
3484 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3485 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3486 IXGBE_GPIE_OCD;
3487 gpie |= IXGBE_GPIE_EIAME;
3488 /*
3489 * use EIAM to auto-mask when MSI-X interrupt is asserted
3490 * this saves a register write for every interrupt
3491 */
3492 switch (hw->mac.type) {
3493 case ixgbe_mac_82598EB:
3494 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3495 break;
3496 default:
3497 case ixgbe_mac_82599EB:
3498 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3499 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3500 break;
3501 }
3502 } else {
3503 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
3504 * specifically only auto mask tx and rx interrupts */
3505 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3506 }
3507
3508 /* XXX: to interrupt immediately for EICS writes, enable this */
3509 /* gpie |= IXGBE_GPIE_EIMEN; */
3510
3511 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3512 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3513 gpie |= IXGBE_GPIE_VTMODE_64;
3514 }
3515
3516 /* Enable fan failure interrupt */
3517 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3518 gpie |= IXGBE_SDP1_GPIEN;
3519
3520 if (hw->mac.type == ixgbe_mac_82599EB)
3521 gpie |= IXGBE_SDP1_GPIEN;
3522 gpie |= IXGBE_SDP2_GPIEN;
3523
3524 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3525 }
3526
3527 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3528 {
3529 struct ixgbe_hw *hw = &adapter->hw;
3530 int err;
3531 u32 ctrl_ext;
3532
3533 ixgbe_get_hw_control(adapter);
3534 ixgbe_setup_gpie(adapter);
3535
3536 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3537 ixgbe_configure_msix(adapter);
3538 else
3539 ixgbe_configure_msi_and_legacy(adapter);
3540
3541 /* enable the optics */
3542 if (hw->phy.multispeed_fiber)
3543 hw->mac.ops.enable_tx_laser(hw);
3544
3545 clear_bit(__IXGBE_DOWN, &adapter->state);
3546 ixgbe_napi_enable_all(adapter);
3547
3548 /* clear any pending interrupts, may auto mask */
3549 IXGBE_READ_REG(hw, IXGBE_EICR);
3550 ixgbe_irq_enable(adapter, true, true);
3551
3552 /*
3553 * If this adapter has a fan, check to see if we had a failure
3554 * before we enabled the interrupt.
3555 */
3556 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3557 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3558 if (esdp & IXGBE_ESDP_SDP1)
3559 e_crit(drv, "Fan has stopped, replace the adapter\n");
3560 }
3561
3562 /*
3563 * For hot-pluggable SFP+ devices, a new SFP+ module may have
3564 * arrived before interrupts were enabled but after probe. Such
3565 * devices wouldn't have their type identified yet. We need to
3566 * kick off the SFP+ module setup first, then try to bring up link.
3567 * If we're not hot-pluggable SFP+, we just need to configure link
3568 * and bring it up.
3569 */
3570 if (hw->phy.type == ixgbe_phy_unknown) {
3571 err = hw->phy.ops.identify(hw);
3572 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3573 /*
3574 * Take the device down and schedule the sfp tasklet
3575 * which will unregister_netdev and log it.
3576 */
3577 ixgbe_down(adapter);
3578 schedule_work(&adapter->sfp_config_module_task);
3579 return err;
3580 }
3581 }
3582
3583 if (ixgbe_is_sfp(hw)) {
3584 ixgbe_sfp_link_config(adapter);
3585 } else {
3586 err = ixgbe_non_sfp_link_config(hw);
3587 if (err)
3588 e_err(probe, "link_config FAILED %d\n", err);
3589 }
3590
3591 /* enable transmits */
3592 netif_tx_start_all_queues(adapter->netdev);
3593
3594 /* bring the link up in the watchdog, this could race with our first
3595 * link up interrupt but shouldn't be a problem */
3596 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3597 adapter->link_check_timeout = jiffies;
3598 mod_timer(&adapter->watchdog_timer, jiffies);
3599
3600 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3601 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3602 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3603 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3604
3605 return 0;
3606 }
3607
3608 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3609 {
3610 WARN_ON(in_interrupt());
3611 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3612 msleep(1);
3613 ixgbe_down(adapter);
3614 /*
3615 * If SR-IOV enabled then wait a bit before bringing the adapter
3616 * back up to give the VFs time to respond to the reset. The
3617 * two second wait is based upon the watchdog timer cycle in
3618 * the VF driver.
3619 */
3620 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3621 msleep(2000);
3622 ixgbe_up(adapter);
3623 clear_bit(__IXGBE_RESETTING, &adapter->state);
3624 }
3625
3626 int ixgbe_up(struct ixgbe_adapter *adapter)
3627 {
3628 /* hardware has been reset, we need to reload some things */
3629 ixgbe_configure(adapter);
3630
3631 return ixgbe_up_complete(adapter);
3632 }
3633
3634 void ixgbe_reset(struct ixgbe_adapter *adapter)
3635 {
3636 struct ixgbe_hw *hw = &adapter->hw;
3637 int err;
3638
3639 err = hw->mac.ops.init_hw(hw);
3640 switch (err) {
3641 case 0:
3642 case IXGBE_ERR_SFP_NOT_PRESENT:
3643 break;
3644 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
3645 e_dev_err("master disable timed out\n");
3646 break;
3647 case IXGBE_ERR_EEPROM_VERSION:
3648 /* We are running on a pre-production device, log a warning */
3649 e_dev_warn("This device is a pre-production adapter/LOM. "
3650 "Please be aware there may be issuesassociated with "
3651 "your hardware. If you are experiencing problems "
3652 "please contact your Intel or hardware "
3653 "representative who provided you with this "
3654 "hardware.\n");
3655 break;
3656 default:
3657 e_dev_err("Hardware Error: %d\n", err);
3658 }
3659
3660 /* reprogram the RAR[0] in case user changed it. */
3661 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3662 IXGBE_RAH_AV);
3663 }
3664
3665 /**
3666 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
3667 * @rx_ring: ring to free buffers from
3668 **/
3669 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
3670 {
3671 struct device *dev = rx_ring->dev;
3672 unsigned long size;
3673 u16 i;
3674
3675 /* ring already cleared, nothing to do */
3676 if (!rx_ring->rx_buffer_info)
3677 return;
3678
3679 /* Free all the Rx ring sk_buffs */
3680 for (i = 0; i < rx_ring->count; i++) {
3681 struct ixgbe_rx_buffer *rx_buffer_info;
3682
3683 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3684 if (rx_buffer_info->dma) {
3685 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
3686 rx_ring->rx_buf_len,
3687 DMA_FROM_DEVICE);
3688 rx_buffer_info->dma = 0;
3689 }
3690 if (rx_buffer_info->skb) {
3691 struct sk_buff *skb = rx_buffer_info->skb;
3692 rx_buffer_info->skb = NULL;
3693 do {
3694 struct sk_buff *this = skb;
3695 if (IXGBE_RSC_CB(this)->delay_unmap) {
3696 dma_unmap_single(dev,
3697 IXGBE_RSC_CB(this)->dma,
3698 rx_ring->rx_buf_len,
3699 DMA_FROM_DEVICE);
3700 IXGBE_RSC_CB(this)->dma = 0;
3701 IXGBE_RSC_CB(skb)->delay_unmap = false;
3702 }
3703 skb = skb->prev;
3704 dev_kfree_skb(this);
3705 } while (skb);
3706 }
3707 if (!rx_buffer_info->page)
3708 continue;
3709 if (rx_buffer_info->page_dma) {
3710 dma_unmap_page(dev, rx_buffer_info->page_dma,
3711 PAGE_SIZE / 2, DMA_FROM_DEVICE);
3712 rx_buffer_info->page_dma = 0;
3713 }
3714 put_page(rx_buffer_info->page);
3715 rx_buffer_info->page = NULL;
3716 rx_buffer_info->page_offset = 0;
3717 }
3718
3719 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
3720 memset(rx_ring->rx_buffer_info, 0, size);
3721
3722 /* Zero out the descriptor ring */
3723 memset(rx_ring->desc, 0, rx_ring->size);
3724
3725 rx_ring->next_to_clean = 0;
3726 rx_ring->next_to_use = 0;
3727 }
3728
3729 /**
3730 * ixgbe_clean_tx_ring - Free Tx Buffers
3731 * @tx_ring: ring to be cleaned
3732 **/
3733 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
3734 {
3735 struct ixgbe_tx_buffer *tx_buffer_info;
3736 unsigned long size;
3737 u16 i;
3738
3739 /* ring already cleared, nothing to do */
3740 if (!tx_ring->tx_buffer_info)
3741 return;
3742
3743 /* Free all the Tx ring sk_buffs */
3744 for (i = 0; i < tx_ring->count; i++) {
3745 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3746 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
3747 }
3748
3749 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3750 memset(tx_ring->tx_buffer_info, 0, size);
3751
3752 /* Zero out the descriptor ring */
3753 memset(tx_ring->desc, 0, tx_ring->size);
3754
3755 tx_ring->next_to_use = 0;
3756 tx_ring->next_to_clean = 0;
3757 }
3758
3759 /**
3760 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
3761 * @adapter: board private structure
3762 **/
3763 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
3764 {
3765 int i;
3766
3767 for (i = 0; i < adapter->num_rx_queues; i++)
3768 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
3769 }
3770
3771 /**
3772 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
3773 * @adapter: board private structure
3774 **/
3775 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
3776 {
3777 int i;
3778
3779 for (i = 0; i < adapter->num_tx_queues; i++)
3780 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
3781 }
3782
3783 void ixgbe_down(struct ixgbe_adapter *adapter)
3784 {
3785 struct net_device *netdev = adapter->netdev;
3786 struct ixgbe_hw *hw = &adapter->hw;
3787 u32 rxctrl;
3788 u32 txdctl;
3789 int i, j;
3790 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3791
3792 /* signal that we are down to the interrupt handler */
3793 set_bit(__IXGBE_DOWN, &adapter->state);
3794
3795 /* disable receive for all VFs and wait one second */
3796 if (adapter->num_vfs) {
3797 /* ping all the active vfs to let them know we are going down */
3798 ixgbe_ping_all_vfs(adapter);
3799
3800 /* Disable all VFTE/VFRE TX/RX */
3801 ixgbe_disable_tx_rx(adapter);
3802
3803 /* Mark all the VFs as inactive */
3804 for (i = 0 ; i < adapter->num_vfs; i++)
3805 adapter->vfinfo[i].clear_to_send = 0;
3806 }
3807
3808 /* disable receives */
3809 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3810 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3811
3812 IXGBE_WRITE_FLUSH(hw);
3813 msleep(10);
3814
3815 netif_tx_stop_all_queues(netdev);
3816
3817 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3818 del_timer_sync(&adapter->sfp_timer);
3819 del_timer_sync(&adapter->watchdog_timer);
3820 cancel_work_sync(&adapter->watchdog_task);
3821
3822 netif_carrier_off(netdev);
3823 netif_tx_disable(netdev);
3824
3825 ixgbe_irq_disable(adapter);
3826
3827 ixgbe_napi_disable_all(adapter);
3828
3829 /* Cleanup the affinity_hint CPU mask memory and callback */
3830 for (i = 0; i < num_q_vectors; i++) {
3831 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
3832 /* clear the affinity_mask in the IRQ descriptor */
3833 irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
3834 /* release the CPU mask memory */
3835 free_cpumask_var(q_vector->affinity_mask);
3836 }
3837
3838 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3839 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3840 cancel_work_sync(&adapter->fdir_reinit_task);
3841
3842 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3843 cancel_work_sync(&adapter->check_overtemp_task);
3844
3845 /* disable transmits in the hardware now that interrupts are off */
3846 for (i = 0; i < adapter->num_tx_queues; i++) {
3847 j = adapter->tx_ring[i]->reg_idx;
3848 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3849 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
3850 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3851 }
3852 /* Disable the Tx DMA engine on 82599 */
3853 if (hw->mac.type == ixgbe_mac_82599EB)
3854 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3855 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3856 ~IXGBE_DMATXCTL_TE));
3857
3858 /* power down the optics */
3859 if (hw->phy.multispeed_fiber)
3860 hw->mac.ops.disable_tx_laser(hw);
3861
3862 /* clear n-tuple filters that are cached */
3863 ethtool_ntuple_flush(netdev);
3864
3865 if (!pci_channel_offline(adapter->pdev))
3866 ixgbe_reset(adapter);
3867 ixgbe_clean_all_tx_rings(adapter);
3868 ixgbe_clean_all_rx_rings(adapter);
3869
3870 #ifdef CONFIG_IXGBE_DCA
3871 /* since we reset the hardware DCA settings were cleared */
3872 ixgbe_setup_dca(adapter);
3873 #endif
3874 }
3875
3876 /**
3877 * ixgbe_poll - NAPI Rx polling callback
3878 * @napi: structure for representing this polling device
3879 * @budget: how many packets driver is allowed to clean
3880 *
3881 * This function is used for legacy and MSI, NAPI mode
3882 **/
3883 static int ixgbe_poll(struct napi_struct *napi, int budget)
3884 {
3885 struct ixgbe_q_vector *q_vector =
3886 container_of(napi, struct ixgbe_q_vector, napi);
3887 struct ixgbe_adapter *adapter = q_vector->adapter;
3888 int tx_clean_complete, work_done = 0;
3889
3890 #ifdef CONFIG_IXGBE_DCA
3891 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3892 ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
3893 ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
3894 }
3895 #endif
3896
3897 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
3898 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
3899
3900 if (!tx_clean_complete)
3901 work_done = budget;
3902
3903 /* If budget not fully consumed, exit the polling mode */
3904 if (work_done < budget) {
3905 napi_complete(napi);
3906 if (adapter->rx_itr_setting & 1)
3907 ixgbe_set_itr(adapter);
3908 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3909 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
3910 }
3911 return work_done;
3912 }
3913
3914 /**
3915 * ixgbe_tx_timeout - Respond to a Tx Hang
3916 * @netdev: network interface device structure
3917 **/
3918 static void ixgbe_tx_timeout(struct net_device *netdev)
3919 {
3920 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3921
3922 /* Do the reset outside of interrupt context */
3923 schedule_work(&adapter->reset_task);
3924 }
3925
3926 static void ixgbe_reset_task(struct work_struct *work)
3927 {
3928 struct ixgbe_adapter *adapter;
3929 adapter = container_of(work, struct ixgbe_adapter, reset_task);
3930
3931 /* If we're already down or resetting, just bail */
3932 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3933 test_bit(__IXGBE_RESETTING, &adapter->state))
3934 return;
3935
3936 adapter->tx_timeout_count++;
3937
3938 ixgbe_dump(adapter);
3939 netdev_err(adapter->netdev, "Reset adapter\n");
3940 ixgbe_reinit_locked(adapter);
3941 }
3942
3943 #ifdef CONFIG_IXGBE_DCB
3944 static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
3945 {
3946 bool ret = false;
3947 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
3948
3949 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3950 return ret;
3951
3952 f->mask = 0x7 << 3;
3953 adapter->num_rx_queues = f->indices;
3954 adapter->num_tx_queues = f->indices;
3955 ret = true;
3956
3957 return ret;
3958 }
3959 #endif
3960
3961 /**
3962 * ixgbe_set_rss_queues: Allocate queues for RSS
3963 * @adapter: board private structure to initialize
3964 *
3965 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
3966 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
3967 *
3968 **/
3969 static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3970 {
3971 bool ret = false;
3972 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
3973
3974 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3975 f->mask = 0xF;
3976 adapter->num_rx_queues = f->indices;
3977 adapter->num_tx_queues = f->indices;
3978 ret = true;
3979 } else {
3980 ret = false;
3981 }
3982
3983 return ret;
3984 }
3985
3986 /**
3987 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
3988 * @adapter: board private structure to initialize
3989 *
3990 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
3991 * to the original CPU that initiated the Tx session. This runs in addition
3992 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
3993 * Rx load across CPUs using RSS.
3994 *
3995 **/
3996 static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3997 {
3998 bool ret = false;
3999 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
4000
4001 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
4002 f_fdir->mask = 0;
4003
4004 /* Flow Director must have RSS enabled */
4005 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4006 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
4007 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
4008 adapter->num_tx_queues = f_fdir->indices;
4009 adapter->num_rx_queues = f_fdir->indices;
4010 ret = true;
4011 } else {
4012 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4013 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4014 }
4015 return ret;
4016 }
4017
4018 #ifdef IXGBE_FCOE
4019 /**
4020 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
4021 * @adapter: board private structure to initialize
4022 *
4023 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
4024 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
4025 * rx queues out of the max number of rx queues, instead, it is used as the
4026 * index of the first rx queue used by FCoE.
4027 *
4028 **/
4029 static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4030 {
4031 bool ret = false;
4032 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4033
4034 f->indices = min((int)num_online_cpus(), f->indices);
4035 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4036 adapter->num_rx_queues = 1;
4037 adapter->num_tx_queues = 1;
4038 #ifdef CONFIG_IXGBE_DCB
4039 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4040 e_info(probe, "FCoE enabled with DCB\n");
4041 ixgbe_set_dcb_queues(adapter);
4042 }
4043 #endif
4044 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4045 e_info(probe, "FCoE enabled with RSS\n");
4046 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4047 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4048 ixgbe_set_fdir_queues(adapter);
4049 else
4050 ixgbe_set_rss_queues(adapter);
4051 }
4052 /* adding FCoE rx rings to the end */
4053 f->mask = adapter->num_rx_queues;
4054 adapter->num_rx_queues += f->indices;
4055 adapter->num_tx_queues += f->indices;
4056
4057 ret = true;
4058 }
4059
4060 return ret;
4061 }
4062
4063 #endif /* IXGBE_FCOE */
4064 /**
4065 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4066 * @adapter: board private structure to initialize
4067 *
4068 * IOV doesn't actually use anything, so just NAK the
4069 * request for now and let the other queue routines
4070 * figure out what to do.
4071 */
4072 static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
4073 {
4074 return false;
4075 }
4076
4077 /*
4078 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
4079 * @adapter: board private structure to initialize
4080 *
4081 * This is the top level queue allocation routine. The order here is very
4082 * important, starting with the "most" number of features turned on at once,
4083 * and ending with the smallest set of features. This way large combinations
4084 * can be allocated if they're turned on, and smaller combinations are the
4085 * fallthrough conditions.
4086 *
4087 **/
4088 static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4089 {
4090 /* Start with base case */
4091 adapter->num_rx_queues = 1;
4092 adapter->num_tx_queues = 1;
4093 adapter->num_rx_pools = adapter->num_rx_queues;
4094 adapter->num_rx_queues_per_pool = 1;
4095
4096 if (ixgbe_set_sriov_queues(adapter))
4097 goto done;
4098
4099 #ifdef IXGBE_FCOE
4100 if (ixgbe_set_fcoe_queues(adapter))
4101 goto done;
4102
4103 #endif /* IXGBE_FCOE */
4104 #ifdef CONFIG_IXGBE_DCB
4105 if (ixgbe_set_dcb_queues(adapter))
4106 goto done;
4107
4108 #endif
4109 if (ixgbe_set_fdir_queues(adapter))
4110 goto done;
4111
4112 if (ixgbe_set_rss_queues(adapter))
4113 goto done;
4114
4115 /* fallback to base case */
4116 adapter->num_rx_queues = 1;
4117 adapter->num_tx_queues = 1;
4118
4119 done:
4120 /* Notify the stack of the (possibly) reduced queue counts. */
4121 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4122 return netif_set_real_num_rx_queues(adapter->netdev,
4123 adapter->num_rx_queues);
4124 }
4125
4126 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4127 int vectors)
4128 {
4129 int err, vector_threshold;
4130
4131 /* We'll want at least 3 (vector_threshold):
4132 * 1) TxQ[0] Cleanup
4133 * 2) RxQ[0] Cleanup
4134 * 3) Other (Link Status Change, etc.)
4135 * 4) TCP Timer (optional)
4136 */
4137 vector_threshold = MIN_MSIX_COUNT;
4138
4139 /* The more we get, the more we will assign to Tx/Rx Cleanup
4140 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
4141 * Right now, we simply care about how many we'll get; we'll
4142 * set them up later while requesting irq's.
4143 */
4144 while (vectors >= vector_threshold) {
4145 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
4146 vectors);
4147 if (!err) /* Success in acquiring all requested vectors. */
4148 break;
4149 else if (err < 0)
4150 vectors = 0; /* Nasty failure, quit now */
4151 else /* err == number of vectors we should try again with */
4152 vectors = err;
4153 }
4154
4155 if (vectors < vector_threshold) {
4156 /* Can't allocate enough MSI-X interrupts? Oh well.
4157 * This just means we'll go with either a single MSI
4158 * vector or fall back to legacy interrupts.
4159 */
4160 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4161 "Unable to allocate MSI-X interrupts\n");
4162 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4163 kfree(adapter->msix_entries);
4164 adapter->msix_entries = NULL;
4165 } else {
4166 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
4167 /*
4168 * Adjust for only the vectors we'll use, which is minimum
4169 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
4170 * vectors we were allocated.
4171 */
4172 adapter->num_msix_vectors = min(vectors,
4173 adapter->max_msix_q_vectors + NON_Q_VECTORS);
4174 }
4175 }
4176
4177 /**
4178 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
4179 * @adapter: board private structure to initialize
4180 *
4181 * Cache the descriptor ring offsets for RSS to the assigned rings.
4182 *
4183 **/
4184 static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4185 {
4186 int i;
4187 bool ret = false;
4188
4189 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4190 for (i = 0; i < adapter->num_rx_queues; i++)
4191 adapter->rx_ring[i]->reg_idx = i;
4192 for (i = 0; i < adapter->num_tx_queues; i++)
4193 adapter->tx_ring[i]->reg_idx = i;
4194 ret = true;
4195 } else {
4196 ret = false;
4197 }
4198
4199 return ret;
4200 }
4201
4202 #ifdef CONFIG_IXGBE_DCB
4203 /**
4204 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4205 * @adapter: board private structure to initialize
4206 *
4207 * Cache the descriptor ring offsets for DCB to the assigned rings.
4208 *
4209 **/
4210 static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4211 {
4212 int i;
4213 bool ret = false;
4214 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
4215
4216 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4217 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
4218 /* the number of queues is assumed to be symmetric */
4219 for (i = 0; i < dcb_i; i++) {
4220 adapter->rx_ring[i]->reg_idx = i << 3;
4221 adapter->tx_ring[i]->reg_idx = i << 2;
4222 }
4223 ret = true;
4224 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4225 if (dcb_i == 8) {
4226 /*
4227 * Tx TC0 starts at: descriptor queue 0
4228 * Tx TC1 starts at: descriptor queue 32
4229 * Tx TC2 starts at: descriptor queue 64
4230 * Tx TC3 starts at: descriptor queue 80
4231 * Tx TC4 starts at: descriptor queue 96
4232 * Tx TC5 starts at: descriptor queue 104
4233 * Tx TC6 starts at: descriptor queue 112
4234 * Tx TC7 starts at: descriptor queue 120
4235 *
4236 * Rx TC0-TC7 are offset by 16 queues each
4237 */
4238 for (i = 0; i < 3; i++) {
4239 adapter->tx_ring[i]->reg_idx = i << 5;
4240 adapter->rx_ring[i]->reg_idx = i << 4;
4241 }
4242 for ( ; i < 5; i++) {
4243 adapter->tx_ring[i]->reg_idx =
4244 ((i + 2) << 4);
4245 adapter->rx_ring[i]->reg_idx = i << 4;
4246 }
4247 for ( ; i < dcb_i; i++) {
4248 adapter->tx_ring[i]->reg_idx =
4249 ((i + 8) << 3);
4250 adapter->rx_ring[i]->reg_idx = i << 4;
4251 }
4252
4253 ret = true;
4254 } else if (dcb_i == 4) {
4255 /*
4256 * Tx TC0 starts at: descriptor queue 0
4257 * Tx TC1 starts at: descriptor queue 64
4258 * Tx TC2 starts at: descriptor queue 96
4259 * Tx TC3 starts at: descriptor queue 112
4260 *
4261 * Rx TC0-TC3 are offset by 32 queues each
4262 */
4263 adapter->tx_ring[0]->reg_idx = 0;
4264 adapter->tx_ring[1]->reg_idx = 64;
4265 adapter->tx_ring[2]->reg_idx = 96;
4266 adapter->tx_ring[3]->reg_idx = 112;
4267 for (i = 0 ; i < dcb_i; i++)
4268 adapter->rx_ring[i]->reg_idx = i << 5;
4269
4270 ret = true;
4271 } else {
4272 ret = false;
4273 }
4274 } else {
4275 ret = false;
4276 }
4277 } else {
4278 ret = false;
4279 }
4280
4281 return ret;
4282 }
4283 #endif
4284
4285 /**
4286 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
4287 * @adapter: board private structure to initialize
4288 *
4289 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4290 *
4291 **/
4292 static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
4293 {
4294 int i;
4295 bool ret = false;
4296
4297 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
4298 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4299 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
4300 for (i = 0; i < adapter->num_rx_queues; i++)
4301 adapter->rx_ring[i]->reg_idx = i;
4302 for (i = 0; i < adapter->num_tx_queues; i++)
4303 adapter->tx_ring[i]->reg_idx = i;
4304 ret = true;
4305 }
4306
4307 return ret;
4308 }
4309
4310 #ifdef IXGBE_FCOE
4311 /**
4312 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
4313 * @adapter: board private structure to initialize
4314 *
4315 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
4316 *
4317 */
4318 static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4319 {
4320 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
4321 bool ret = false;
4322 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4323
4324 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4325 #ifdef CONFIG_IXGBE_DCB
4326 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4327 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4328
4329 ixgbe_cache_ring_dcb(adapter);
4330 /* find out queues in TC for FCoE */
4331 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4332 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
4333 /*
4334 * In 82599, the number of Tx queues for each traffic
4335 * class for both 8-TC and 4-TC modes are:
4336 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4337 * 8 TCs: 32 32 16 16 8 8 8 8
4338 * 4 TCs: 64 64 32 32
4339 * We have max 8 queues for FCoE, where 8 the is
4340 * FCoE redirection table size. If TC for FCoE is
4341 * less than or equal to TC3, we have enough queues
4342 * to add max of 8 queues for FCoE, so we start FCoE
4343 * tx descriptor from the next one, i.e., reg_idx + 1.
4344 * If TC for FCoE is above TC3, implying 8 TC mode,
4345 * and we need 8 for FCoE, we have to take all queues
4346 * in that traffic class for FCoE.
4347 */
4348 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4349 fcoe_tx_i--;
4350 }
4351 #endif /* CONFIG_IXGBE_DCB */
4352 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4353 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4354 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4355 ixgbe_cache_ring_fdir(adapter);
4356 else
4357 ixgbe_cache_ring_rss(adapter);
4358
4359 fcoe_rx_i = f->mask;
4360 fcoe_tx_i = f->mask;
4361 }
4362 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4363 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4364 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4365 }
4366 ret = true;
4367 }
4368 return ret;
4369 }
4370
4371 #endif /* IXGBE_FCOE */
4372 /**
4373 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
4374 * @adapter: board private structure to initialize
4375 *
4376 * SR-IOV doesn't use any descriptor rings but changes the default if
4377 * no other mapping is used.
4378 *
4379 */
4380 static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
4381 {
4382 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
4383 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
4384 if (adapter->num_vfs)
4385 return true;
4386 else
4387 return false;
4388 }
4389
4390 /**
4391 * ixgbe_cache_ring_register - Descriptor ring to register mapping
4392 * @adapter: board private structure to initialize
4393 *
4394 * Once we know the feature-set enabled for the device, we'll cache
4395 * the register offset the descriptor ring is assigned to.
4396 *
4397 * Note, the order the various feature calls is important. It must start with
4398 * the "most" features enabled at the same time, then trickle down to the
4399 * least amount of features turned on at once.
4400 **/
4401 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4402 {
4403 /* start with default case */
4404 adapter->rx_ring[0]->reg_idx = 0;
4405 adapter->tx_ring[0]->reg_idx = 0;
4406
4407 if (ixgbe_cache_ring_sriov(adapter))
4408 return;
4409
4410 #ifdef IXGBE_FCOE
4411 if (ixgbe_cache_ring_fcoe(adapter))
4412 return;
4413
4414 #endif /* IXGBE_FCOE */
4415 #ifdef CONFIG_IXGBE_DCB
4416 if (ixgbe_cache_ring_dcb(adapter))
4417 return;
4418
4419 #endif
4420 if (ixgbe_cache_ring_fdir(adapter))
4421 return;
4422
4423 if (ixgbe_cache_ring_rss(adapter))
4424 return;
4425 }
4426
4427 /**
4428 * ixgbe_alloc_queues - Allocate memory for all rings
4429 * @adapter: board private structure to initialize
4430 *
4431 * We allocate one ring per queue at run-time since we don't know the
4432 * number of queues at compile-time. The polling_netdev array is
4433 * intended for Multiqueue, but should work fine with a single queue.
4434 **/
4435 static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4436 {
4437 int i;
4438 int rx_count;
4439 int orig_node = adapter->node;
4440
4441 for (i = 0; i < adapter->num_tx_queues; i++) {
4442 struct ixgbe_ring *ring = adapter->tx_ring[i];
4443 if (orig_node == -1) {
4444 int cur_node = next_online_node(adapter->node);
4445 if (cur_node == MAX_NUMNODES)
4446 cur_node = first_online_node;
4447 adapter->node = cur_node;
4448 }
4449 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4450 adapter->node);
4451 if (!ring)
4452 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4453 if (!ring)
4454 goto err_tx_ring_allocation;
4455 ring->count = adapter->tx_ring_count;
4456 ring->queue_index = i;
4457 ring->dev = &adapter->pdev->dev;
4458 ring->numa_node = adapter->node;
4459
4460 adapter->tx_ring[i] = ring;
4461 }
4462
4463 /* Restore the adapter's original node */
4464 adapter->node = orig_node;
4465
4466 rx_count = adapter->rx_ring_count;
4467 for (i = 0; i < adapter->num_rx_queues; i++) {
4468 struct ixgbe_ring *ring = adapter->rx_ring[i];
4469 if (orig_node == -1) {
4470 int cur_node = next_online_node(adapter->node);
4471 if (cur_node == MAX_NUMNODES)
4472 cur_node = first_online_node;
4473 adapter->node = cur_node;
4474 }
4475 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4476 adapter->node);
4477 if (!ring)
4478 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4479 if (!ring)
4480 goto err_rx_ring_allocation;
4481 ring->count = rx_count;
4482 ring->queue_index = i;
4483 ring->dev = &adapter->pdev->dev;
4484 ring->numa_node = adapter->node;
4485
4486 adapter->rx_ring[i] = ring;
4487 }
4488
4489 /* Restore the adapter's original node */
4490 adapter->node = orig_node;
4491
4492 ixgbe_cache_ring_register(adapter);
4493
4494 return 0;
4495
4496 err_rx_ring_allocation:
4497 for (i = 0; i < adapter->num_tx_queues; i++)
4498 kfree(adapter->tx_ring[i]);
4499 err_tx_ring_allocation:
4500 return -ENOMEM;
4501 }
4502
4503 /**
4504 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
4505 * @adapter: board private structure to initialize
4506 *
4507 * Attempt to configure the interrupts using the best available
4508 * capabilities of the hardware and the kernel.
4509 **/
4510 static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4511 {
4512 struct ixgbe_hw *hw = &adapter->hw;
4513 int err = 0;
4514 int vector, v_budget;
4515
4516 /*
4517 * It's easy to be greedy for MSI-X vectors, but it really
4518 * doesn't do us much good if we have a lot more vectors
4519 * than CPU's. So let's be conservative and only ask for
4520 * (roughly) the same number of vectors as there are CPU's.
4521 */
4522 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
4523 (int)num_online_cpus()) + NON_Q_VECTORS;
4524
4525 /*
4526 * At the same time, hardware can only support a maximum of
4527 * hw.mac->max_msix_vectors vectors. With features
4528 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
4529 * descriptor queues supported by our device. Thus, we cap it off in
4530 * those rare cases where the cpu count also exceeds our vector limit.
4531 */
4532 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
4533
4534 /* A failure in MSI-X entry allocation isn't fatal, but it does
4535 * mean we disable MSI-X capabilities of the adapter. */
4536 adapter->msix_entries = kcalloc(v_budget,
4537 sizeof(struct msix_entry), GFP_KERNEL);
4538 if (adapter->msix_entries) {
4539 for (vector = 0; vector < v_budget; vector++)
4540 adapter->msix_entries[vector].entry = vector;
4541
4542 ixgbe_acquire_msix_vectors(adapter, v_budget);
4543
4544 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4545 goto out;
4546 }
4547
4548 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4549 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4550 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4551 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4552 adapter->atr_sample_rate = 0;
4553 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4554 ixgbe_disable_sriov(adapter);
4555
4556 err = ixgbe_set_num_queues(adapter);
4557 if (err)
4558 return err;
4559
4560 err = pci_enable_msi(adapter->pdev);
4561 if (!err) {
4562 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
4563 } else {
4564 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4565 "Unable to allocate MSI interrupt, "
4566 "falling back to legacy. Error: %d\n", err);
4567 /* reset err */
4568 err = 0;
4569 }
4570
4571 out:
4572 return err;
4573 }
4574
4575 /**
4576 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
4577 * @adapter: board private structure to initialize
4578 *
4579 * We allocate one q_vector per queue interrupt. If allocation fails we
4580 * return -ENOMEM.
4581 **/
4582 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4583 {
4584 int q_idx, num_q_vectors;
4585 struct ixgbe_q_vector *q_vector;
4586 int napi_vectors;
4587 int (*poll)(struct napi_struct *, int);
4588
4589 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4590 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4591 napi_vectors = adapter->num_rx_queues;
4592 poll = &ixgbe_clean_rxtx_many;
4593 } else {
4594 num_q_vectors = 1;
4595 napi_vectors = 1;
4596 poll = &ixgbe_poll;
4597 }
4598
4599 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4600 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
4601 GFP_KERNEL, adapter->node);
4602 if (!q_vector)
4603 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
4604 GFP_KERNEL);
4605 if (!q_vector)
4606 goto err_out;
4607 q_vector->adapter = adapter;
4608 if (q_vector->txr_count && !q_vector->rxr_count)
4609 q_vector->eitr = adapter->tx_eitr_param;
4610 else
4611 q_vector->eitr = adapter->rx_eitr_param;
4612 q_vector->v_idx = q_idx;
4613 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
4614 adapter->q_vector[q_idx] = q_vector;
4615 }
4616
4617 return 0;
4618
4619 err_out:
4620 while (q_idx) {
4621 q_idx--;
4622 q_vector = adapter->q_vector[q_idx];
4623 netif_napi_del(&q_vector->napi);
4624 kfree(q_vector);
4625 adapter->q_vector[q_idx] = NULL;
4626 }
4627 return -ENOMEM;
4628 }
4629
4630 /**
4631 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
4632 * @adapter: board private structure to initialize
4633 *
4634 * This function frees the memory allocated to the q_vectors. In addition if
4635 * NAPI is enabled it will delete any references to the NAPI struct prior
4636 * to freeing the q_vector.
4637 **/
4638 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
4639 {
4640 int q_idx, num_q_vectors;
4641
4642 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4643 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4644 else
4645 num_q_vectors = 1;
4646
4647 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4648 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
4649 adapter->q_vector[q_idx] = NULL;
4650 netif_napi_del(&q_vector->napi);
4651 kfree(q_vector);
4652 }
4653 }
4654
4655 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
4656 {
4657 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4658 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4659 pci_disable_msix(adapter->pdev);
4660 kfree(adapter->msix_entries);
4661 adapter->msix_entries = NULL;
4662 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
4663 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
4664 pci_disable_msi(adapter->pdev);
4665 }
4666 }
4667
4668 /**
4669 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
4670 * @adapter: board private structure to initialize
4671 *
4672 * We determine which interrupt scheme to use based on...
4673 * - Kernel support (MSI, MSI-X)
4674 * - which can be user-defined (via MODULE_PARAM)
4675 * - Hardware queue count (num_*_queues)
4676 * - defined by miscellaneous hardware support/features (RSS, etc.)
4677 **/
4678 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
4679 {
4680 int err;
4681
4682 /* Number of supported queues */
4683 err = ixgbe_set_num_queues(adapter);
4684 if (err)
4685 return err;
4686
4687 err = ixgbe_set_interrupt_capability(adapter);
4688 if (err) {
4689 e_dev_err("Unable to setup interrupt capabilities\n");
4690 goto err_set_interrupt;
4691 }
4692
4693 err = ixgbe_alloc_q_vectors(adapter);
4694 if (err) {
4695 e_dev_err("Unable to allocate memory for queue vectors\n");
4696 goto err_alloc_q_vectors;
4697 }
4698
4699 err = ixgbe_alloc_queues(adapter);
4700 if (err) {
4701 e_dev_err("Unable to allocate memory for queues\n");
4702 goto err_alloc_queues;
4703 }
4704
4705 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
4706 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
4707 adapter->num_rx_queues, adapter->num_tx_queues);
4708
4709 set_bit(__IXGBE_DOWN, &adapter->state);
4710
4711 return 0;
4712
4713 err_alloc_queues:
4714 ixgbe_free_q_vectors(adapter);
4715 err_alloc_q_vectors:
4716 ixgbe_reset_interrupt_capability(adapter);
4717 err_set_interrupt:
4718 return err;
4719 }
4720
4721 static void ring_free_rcu(struct rcu_head *head)
4722 {
4723 kfree(container_of(head, struct ixgbe_ring, rcu));
4724 }
4725
4726 /**
4727 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4728 * @adapter: board private structure to clear interrupt scheme on
4729 *
4730 * We go through and clear interrupt specific resources and reset the structure
4731 * to pre-load conditions
4732 **/
4733 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4734 {
4735 int i;
4736
4737 for (i = 0; i < adapter->num_tx_queues; i++) {
4738 kfree(adapter->tx_ring[i]);
4739 adapter->tx_ring[i] = NULL;
4740 }
4741 for (i = 0; i < adapter->num_rx_queues; i++) {
4742 struct ixgbe_ring *ring = adapter->rx_ring[i];
4743
4744 /* ixgbe_get_stats64() might access this ring, we must wait
4745 * a grace period before freeing it.
4746 */
4747 call_rcu(&ring->rcu, ring_free_rcu);
4748 adapter->rx_ring[i] = NULL;
4749 }
4750
4751 ixgbe_free_q_vectors(adapter);
4752 ixgbe_reset_interrupt_capability(adapter);
4753 }
4754
4755 /**
4756 * ixgbe_sfp_timer - worker thread to find a missing module
4757 * @data: pointer to our adapter struct
4758 **/
4759 static void ixgbe_sfp_timer(unsigned long data)
4760 {
4761 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
4762
4763 /*
4764 * Do the sfp_timer outside of interrupt context due to the
4765 * delays that sfp+ detection requires
4766 */
4767 schedule_work(&adapter->sfp_task);
4768 }
4769
4770 /**
4771 * ixgbe_sfp_task - worker thread to find a missing module
4772 * @work: pointer to work_struct containing our data
4773 **/
4774 static void ixgbe_sfp_task(struct work_struct *work)
4775 {
4776 struct ixgbe_adapter *adapter = container_of(work,
4777 struct ixgbe_adapter,
4778 sfp_task);
4779 struct ixgbe_hw *hw = &adapter->hw;
4780
4781 if ((hw->phy.type == ixgbe_phy_nl) &&
4782 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4783 s32 ret = hw->phy.ops.identify_sfp(hw);
4784 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
4785 goto reschedule;
4786 ret = hw->phy.ops.reset(hw);
4787 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4788 e_dev_err("failed to initialize because an unsupported "
4789 "SFP+ module type was detected.\n");
4790 e_dev_err("Reload the driver after installing a "
4791 "supported module.\n");
4792 unregister_netdev(adapter->netdev);
4793 } else {
4794 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
4795 }
4796 /* don't need this routine any more */
4797 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4798 }
4799 return;
4800 reschedule:
4801 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
4802 mod_timer(&adapter->sfp_timer,
4803 round_jiffies(jiffies + (2 * HZ)));
4804 }
4805
4806 /**
4807 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
4808 * @adapter: board private structure to initialize
4809 *
4810 * ixgbe_sw_init initializes the Adapter private data structure.
4811 * Fields are initialized based on PCI device information and
4812 * OS network device settings (MTU size).
4813 **/
4814 static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4815 {
4816 struct ixgbe_hw *hw = &adapter->hw;
4817 struct pci_dev *pdev = adapter->pdev;
4818 struct net_device *dev = adapter->netdev;
4819 unsigned int rss;
4820 #ifdef CONFIG_IXGBE_DCB
4821 int j;
4822 struct tc_configuration *tc;
4823 #endif
4824 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4825
4826 /* PCI config space info */
4827
4828 hw->vendor_id = pdev->vendor;
4829 hw->device_id = pdev->device;
4830 hw->revision_id = pdev->revision;
4831 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4832 hw->subsystem_device_id = pdev->subsystem_device;
4833
4834 /* Set capability flags */
4835 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
4836 adapter->ring_feature[RING_F_RSS].indices = rss;
4837 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
4838 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
4839 if (hw->mac.type == ixgbe_mac_82598EB) {
4840 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4841 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4842 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
4843 } else if (hw->mac.type == ixgbe_mac_82599EB) {
4844 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
4845 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4846 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4847 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4848 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4849 if (dev->features & NETIF_F_NTUPLE) {
4850 /* Flow Director perfect filter enabled */
4851 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4852 adapter->atr_sample_rate = 0;
4853 spin_lock_init(&adapter->fdir_perfect_lock);
4854 } else {
4855 /* Flow Director hash filters enabled */
4856 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4857 adapter->atr_sample_rate = 20;
4858 }
4859 adapter->ring_feature[RING_F_FDIR].indices =
4860 IXGBE_MAX_FDIR_INDICES;
4861 adapter->fdir_pballoc = 0;
4862 #ifdef IXGBE_FCOE
4863 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4864 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4865 adapter->ring_feature[RING_F_FCOE].indices = 0;
4866 #ifdef CONFIG_IXGBE_DCB
4867 /* Default traffic class to use for FCoE */
4868 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
4869 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4870 #endif
4871 #endif /* IXGBE_FCOE */
4872 }
4873
4874 #ifdef CONFIG_IXGBE_DCB
4875 /* Configure DCB traffic classes */
4876 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
4877 tc = &adapter->dcb_cfg.tc_config[j];
4878 tc->path[DCB_TX_CONFIG].bwg_id = 0;
4879 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
4880 tc->path[DCB_RX_CONFIG].bwg_id = 0;
4881 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
4882 tc->dcb_pfc = pfc_disabled;
4883 }
4884 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
4885 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
4886 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
4887 adapter->dcb_cfg.pfc_mode_enable = false;
4888 adapter->dcb_cfg.round_robin_enable = false;
4889 adapter->dcb_set_bitmap = 0x00;
4890 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
4891 adapter->ring_feature[RING_F_DCB].indices);
4892
4893 #endif
4894
4895 /* default flow control settings */
4896 hw->fc.requested_mode = ixgbe_fc_full;
4897 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
4898 #ifdef CONFIG_DCB
4899 adapter->last_lfc_mode = hw->fc.current_mode;
4900 #endif
4901 hw->fc.high_water = FC_HIGH_WATER(max_frame);
4902 hw->fc.low_water = FC_LOW_WATER(max_frame);
4903 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4904 hw->fc.send_xon = true;
4905 hw->fc.disable_fc_autoneg = false;
4906
4907 /* enable itr by default in dynamic mode */
4908 adapter->rx_itr_setting = 1;
4909 adapter->rx_eitr_param = 20000;
4910 adapter->tx_itr_setting = 1;
4911 adapter->tx_eitr_param = 10000;
4912
4913 /* set defaults for eitr in MegaBytes */
4914 adapter->eitr_low = 10;
4915 adapter->eitr_high = 20;
4916
4917 /* set default ring sizes */
4918 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
4919 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
4920
4921 /* initialize eeprom parameters */
4922 if (ixgbe_init_eeprom_params_generic(hw)) {
4923 e_dev_err("EEPROM initialization failed\n");
4924 return -EIO;
4925 }
4926
4927 /* enable rx csum by default */
4928 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
4929
4930 /* get assigned NUMA node */
4931 adapter->node = dev_to_node(&pdev->dev);
4932
4933 set_bit(__IXGBE_DOWN, &adapter->state);
4934
4935 return 0;
4936 }
4937
4938 /**
4939 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
4940 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4941 *
4942 * Return 0 on success, negative on failure
4943 **/
4944 int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
4945 {
4946 struct device *dev = tx_ring->dev;
4947 int size;
4948
4949 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4950 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
4951 if (!tx_ring->tx_buffer_info)
4952 tx_ring->tx_buffer_info = vmalloc(size);
4953 if (!tx_ring->tx_buffer_info)
4954 goto err;
4955 memset(tx_ring->tx_buffer_info, 0, size);
4956
4957 /* round up to nearest 4K */
4958 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4959 tx_ring->size = ALIGN(tx_ring->size, 4096);
4960
4961 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4962 &tx_ring->dma, GFP_KERNEL);
4963 if (!tx_ring->desc)
4964 goto err;
4965
4966 tx_ring->next_to_use = 0;
4967 tx_ring->next_to_clean = 0;
4968 tx_ring->work_limit = tx_ring->count;
4969 return 0;
4970
4971 err:
4972 vfree(tx_ring->tx_buffer_info);
4973 tx_ring->tx_buffer_info = NULL;
4974 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4975 return -ENOMEM;
4976 }
4977
4978 /**
4979 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
4980 * @adapter: board private structure
4981 *
4982 * If this function returns with an error, then it's possible one or
4983 * more of the rings is populated (while the rest are not). It is the
4984 * callers duty to clean those orphaned rings.
4985 *
4986 * Return 0 on success, negative on failure
4987 **/
4988 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4989 {
4990 int i, err = 0;
4991
4992 for (i = 0; i < adapter->num_tx_queues; i++) {
4993 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
4994 if (!err)
4995 continue;
4996 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
4997 break;
4998 }
4999
5000 return err;
5001 }
5002
5003 /**
5004 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5005 * @rx_ring: rx descriptor ring (for a specific queue) to setup
5006 *
5007 * Returns 0 on success, negative on failure
5008 **/
5009 int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5010 {
5011 struct device *dev = rx_ring->dev;
5012 int size;
5013
5014 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5015 rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
5016 if (!rx_ring->rx_buffer_info)
5017 rx_ring->rx_buffer_info = vmalloc(size);
5018 if (!rx_ring->rx_buffer_info)
5019 goto err;
5020 memset(rx_ring->rx_buffer_info, 0, size);
5021
5022 /* Round up to nearest 4K */
5023 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5024 rx_ring->size = ALIGN(rx_ring->size, 4096);
5025
5026 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
5027 &rx_ring->dma, GFP_KERNEL);
5028
5029 if (!rx_ring->desc)
5030 goto err;
5031
5032 rx_ring->next_to_clean = 0;
5033 rx_ring->next_to_use = 0;
5034
5035 return 0;
5036 err:
5037 vfree(rx_ring->rx_buffer_info);
5038 rx_ring->rx_buffer_info = NULL;
5039 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
5040 return -ENOMEM;
5041 }
5042
5043 /**
5044 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5045 * @adapter: board private structure
5046 *
5047 * If this function returns with an error, then it's possible one or
5048 * more of the rings is populated (while the rest are not). It is the
5049 * callers duty to clean those orphaned rings.
5050 *
5051 * Return 0 on success, negative on failure
5052 **/
5053 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5054 {
5055 int i, err = 0;
5056
5057 for (i = 0; i < adapter->num_rx_queues; i++) {
5058 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
5059 if (!err)
5060 continue;
5061 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
5062 break;
5063 }
5064
5065 return err;
5066 }
5067
5068 /**
5069 * ixgbe_free_tx_resources - Free Tx Resources per Queue
5070 * @tx_ring: Tx descriptor ring for a specific queue
5071 *
5072 * Free all transmit software resources
5073 **/
5074 void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5075 {
5076 ixgbe_clean_tx_ring(tx_ring);
5077
5078 vfree(tx_ring->tx_buffer_info);
5079 tx_ring->tx_buffer_info = NULL;
5080
5081 /* if not set, then don't free */
5082 if (!tx_ring->desc)
5083 return;
5084
5085 dma_free_coherent(tx_ring->dev, tx_ring->size,
5086 tx_ring->desc, tx_ring->dma);
5087
5088 tx_ring->desc = NULL;
5089 }
5090
5091 /**
5092 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5093 * @adapter: board private structure
5094 *
5095 * Free all transmit software resources
5096 **/
5097 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5098 {
5099 int i;
5100
5101 for (i = 0; i < adapter->num_tx_queues; i++)
5102 if (adapter->tx_ring[i]->desc)
5103 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5104 }
5105
5106 /**
5107 * ixgbe_free_rx_resources - Free Rx Resources
5108 * @rx_ring: ring to clean the resources from
5109 *
5110 * Free all receive software resources
5111 **/
5112 void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5113 {
5114 ixgbe_clean_rx_ring(rx_ring);
5115
5116 vfree(rx_ring->rx_buffer_info);
5117 rx_ring->rx_buffer_info = NULL;
5118
5119 /* if not set, then don't free */
5120 if (!rx_ring->desc)
5121 return;
5122
5123 dma_free_coherent(rx_ring->dev, rx_ring->size,
5124 rx_ring->desc, rx_ring->dma);
5125
5126 rx_ring->desc = NULL;
5127 }
5128
5129 /**
5130 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5131 * @adapter: board private structure
5132 *
5133 * Free all receive software resources
5134 **/
5135 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5136 {
5137 int i;
5138
5139 for (i = 0; i < adapter->num_rx_queues; i++)
5140 if (adapter->rx_ring[i]->desc)
5141 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5142 }
5143
5144 /**
5145 * ixgbe_change_mtu - Change the Maximum Transfer Unit
5146 * @netdev: network interface device structure
5147 * @new_mtu: new value for maximum frame size
5148 *
5149 * Returns 0 on success, negative on failure
5150 **/
5151 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5152 {
5153 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5154 struct ixgbe_hw *hw = &adapter->hw;
5155 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5156
5157 /* MTU < 68 is an error and causes problems on some kernels */
5158 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5159 return -EINVAL;
5160
5161 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5162 /* must set new MTU before calling down or up */
5163 netdev->mtu = new_mtu;
5164
5165 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5166 hw->fc.low_water = FC_LOW_WATER(max_frame);
5167
5168 if (netif_running(netdev))
5169 ixgbe_reinit_locked(adapter);
5170
5171 return 0;
5172 }
5173
5174 /**
5175 * ixgbe_open - Called when a network interface is made active
5176 * @netdev: network interface device structure
5177 *
5178 * Returns 0 on success, negative value on failure
5179 *
5180 * The open entry point is called when a network interface is made
5181 * active by the system (IFF_UP). At this point all resources needed
5182 * for transmit and receive operations are allocated, the interrupt
5183 * handler is registered with the OS, the watchdog timer is started,
5184 * and the stack is notified that the interface is ready.
5185 **/
5186 static int ixgbe_open(struct net_device *netdev)
5187 {
5188 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5189 int err;
5190
5191 /* disallow open during test */
5192 if (test_bit(__IXGBE_TESTING, &adapter->state))
5193 return -EBUSY;
5194
5195 netif_carrier_off(netdev);
5196
5197 /* allocate transmit descriptors */
5198 err = ixgbe_setup_all_tx_resources(adapter);
5199 if (err)
5200 goto err_setup_tx;
5201
5202 /* allocate receive descriptors */
5203 err = ixgbe_setup_all_rx_resources(adapter);
5204 if (err)
5205 goto err_setup_rx;
5206
5207 ixgbe_configure(adapter);
5208
5209 err = ixgbe_request_irq(adapter);
5210 if (err)
5211 goto err_req_irq;
5212
5213 err = ixgbe_up_complete(adapter);
5214 if (err)
5215 goto err_up;
5216
5217 netif_tx_start_all_queues(netdev);
5218
5219 return 0;
5220
5221 err_up:
5222 ixgbe_release_hw_control(adapter);
5223 ixgbe_free_irq(adapter);
5224 err_req_irq:
5225 err_setup_rx:
5226 ixgbe_free_all_rx_resources(adapter);
5227 err_setup_tx:
5228 ixgbe_free_all_tx_resources(adapter);
5229 ixgbe_reset(adapter);
5230
5231 return err;
5232 }
5233
5234 /**
5235 * ixgbe_close - Disables a network interface
5236 * @netdev: network interface device structure
5237 *
5238 * Returns 0, this is not allowed to fail
5239 *
5240 * The close entry point is called when an interface is de-activated
5241 * by the OS. The hardware is still under the drivers control, but
5242 * needs to be disabled. A global MAC reset is issued to stop the
5243 * hardware, and all transmit and receive resources are freed.
5244 **/
5245 static int ixgbe_close(struct net_device *netdev)
5246 {
5247 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5248
5249 ixgbe_down(adapter);
5250 ixgbe_free_irq(adapter);
5251
5252 ixgbe_free_all_tx_resources(adapter);
5253 ixgbe_free_all_rx_resources(adapter);
5254
5255 ixgbe_release_hw_control(adapter);
5256
5257 return 0;
5258 }
5259
5260 #ifdef CONFIG_PM
5261 static int ixgbe_resume(struct pci_dev *pdev)
5262 {
5263 struct net_device *netdev = pci_get_drvdata(pdev);
5264 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5265 u32 err;
5266
5267 pci_set_power_state(pdev, PCI_D0);
5268 pci_restore_state(pdev);
5269 /*
5270 * pci_restore_state clears dev->state_saved so call
5271 * pci_save_state to restore it.
5272 */
5273 pci_save_state(pdev);
5274
5275 err = pci_enable_device_mem(pdev);
5276 if (err) {
5277 e_dev_err("Cannot enable PCI device from suspend\n");
5278 return err;
5279 }
5280 pci_set_master(pdev);
5281
5282 pci_wake_from_d3(pdev, false);
5283
5284 err = ixgbe_init_interrupt_scheme(adapter);
5285 if (err) {
5286 e_dev_err("Cannot initialize interrupts for device\n");
5287 return err;
5288 }
5289
5290 ixgbe_reset(adapter);
5291
5292 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5293
5294 if (netif_running(netdev)) {
5295 err = ixgbe_open(adapter->netdev);
5296 if (err)
5297 return err;
5298 }
5299
5300 netif_device_attach(netdev);
5301
5302 return 0;
5303 }
5304 #endif /* CONFIG_PM */
5305
5306 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5307 {
5308 struct net_device *netdev = pci_get_drvdata(pdev);
5309 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5310 struct ixgbe_hw *hw = &adapter->hw;
5311 u32 ctrl, fctrl;
5312 u32 wufc = adapter->wol;
5313 #ifdef CONFIG_PM
5314 int retval = 0;
5315 #endif
5316
5317 netif_device_detach(netdev);
5318
5319 if (netif_running(netdev)) {
5320 ixgbe_down(adapter);
5321 ixgbe_free_irq(adapter);
5322 ixgbe_free_all_tx_resources(adapter);
5323 ixgbe_free_all_rx_resources(adapter);
5324 }
5325
5326 #ifdef CONFIG_PM
5327 retval = pci_save_state(pdev);
5328 if (retval)
5329 return retval;
5330
5331 #endif
5332 if (wufc) {
5333 ixgbe_set_rx_mode(netdev);
5334
5335 /* turn on all-multi mode if wake on multicast is enabled */
5336 if (wufc & IXGBE_WUFC_MC) {
5337 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5338 fctrl |= IXGBE_FCTRL_MPE;
5339 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5340 }
5341
5342 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5343 ctrl |= IXGBE_CTRL_GIO_DIS;
5344 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5345
5346 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5347 } else {
5348 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5349 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5350 }
5351
5352 if (wufc && hw->mac.type == ixgbe_mac_82599EB)
5353 pci_wake_from_d3(pdev, true);
5354 else
5355 pci_wake_from_d3(pdev, false);
5356
5357 *enable_wake = !!wufc;
5358
5359 ixgbe_clear_interrupt_scheme(adapter);
5360
5361 ixgbe_release_hw_control(adapter);
5362
5363 pci_disable_device(pdev);
5364
5365 return 0;
5366 }
5367
5368 #ifdef CONFIG_PM
5369 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5370 {
5371 int retval;
5372 bool wake;
5373
5374 retval = __ixgbe_shutdown(pdev, &wake);
5375 if (retval)
5376 return retval;
5377
5378 if (wake) {
5379 pci_prepare_to_sleep(pdev);
5380 } else {
5381 pci_wake_from_d3(pdev, false);
5382 pci_set_power_state(pdev, PCI_D3hot);
5383 }
5384
5385 return 0;
5386 }
5387 #endif /* CONFIG_PM */
5388
5389 static void ixgbe_shutdown(struct pci_dev *pdev)
5390 {
5391 bool wake;
5392
5393 __ixgbe_shutdown(pdev, &wake);
5394
5395 if (system_state == SYSTEM_POWER_OFF) {
5396 pci_wake_from_d3(pdev, wake);
5397 pci_set_power_state(pdev, PCI_D3hot);
5398 }
5399 }
5400
5401 /**
5402 * ixgbe_update_stats - Update the board statistics counters.
5403 * @adapter: board private structure
5404 **/
5405 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5406 {
5407 struct net_device *netdev = adapter->netdev;
5408 struct ixgbe_hw *hw = &adapter->hw;
5409 u64 total_mpc = 0;
5410 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5411 u64 non_eop_descs = 0, restart_queue = 0;
5412 struct ixgbe_hw_stats *hwstats = &adapter->stats;
5413
5414 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5415 test_bit(__IXGBE_RESETTING, &adapter->state))
5416 return;
5417
5418 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
5419 u64 rsc_count = 0;
5420 u64 rsc_flush = 0;
5421 for (i = 0; i < 16; i++)
5422 adapter->hw_rx_no_dma_resources +=
5423 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5424 for (i = 0; i < adapter->num_rx_queues; i++) {
5425 rsc_count += adapter->rx_ring[i]->rsc_count;
5426 rsc_flush += adapter->rx_ring[i]->rsc_flush;
5427 }
5428 adapter->rsc_total_count = rsc_count;
5429 adapter->rsc_total_flush = rsc_flush;
5430 }
5431
5432 /* gather some stats to the adapter struct that are per queue */
5433 for (i = 0; i < adapter->num_tx_queues; i++)
5434 restart_queue += adapter->tx_ring[i]->restart_queue;
5435 adapter->restart_queue = restart_queue;
5436
5437 for (i = 0; i < adapter->num_rx_queues; i++)
5438 non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
5439 adapter->non_eop_descs = non_eop_descs;
5440
5441 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5442 for (i = 0; i < 8; i++) {
5443 /* for packet buffers not used, the register should read 0 */
5444 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5445 missed_rx += mpc;
5446 hwstats->mpc[i] += mpc;
5447 total_mpc += hwstats->mpc[i];
5448 if (hw->mac.type == ixgbe_mac_82598EB)
5449 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5450 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5451 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5452 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5453 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5454 if (hw->mac.type == ixgbe_mac_82599EB) {
5455 hwstats->pxonrxc[i] +=
5456 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5457 hwstats->pxoffrxc[i] +=
5458 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5459 hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5460 } else {
5461 hwstats->pxonrxc[i] +=
5462 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5463 hwstats->pxoffrxc[i] +=
5464 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
5465 }
5466 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5467 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
5468 }
5469 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5470 /* work around hardware counting issue */
5471 hwstats->gprc -= missed_rx;
5472
5473 /* 82598 hardware only has a 32 bit counter in the high register */
5474 if (hw->mac.type == ixgbe_mac_82599EB) {
5475 u64 tmp;
5476 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5477 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
5478 /* 4 high bits of GORC */
5479 hwstats->gorc += (tmp << 32);
5480 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5481 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
5482 /* 4 high bits of GOTC */
5483 hwstats->gotc += (tmp << 32);
5484 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5485 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5486 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5487 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5488 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5489 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5490 #ifdef IXGBE_FCOE
5491 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5492 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5493 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5494 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5495 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5496 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5497 #endif /* IXGBE_FCOE */
5498 } else {
5499 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5500 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
5501 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5502 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5503 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5504 }
5505 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5506 hwstats->bprc += bprc;
5507 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
5508 if (hw->mac.type == ixgbe_mac_82598EB)
5509 hwstats->mprc -= bprc;
5510 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5511 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5512 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5513 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5514 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5515 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5516 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5517 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
5518 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5519 hwstats->lxontxc += lxon;
5520 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5521 hwstats->lxofftxc += lxoff;
5522 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5523 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5524 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
5525 /*
5526 * 82598 errata - tx of flow control packets is included in tx counters
5527 */
5528 xon_off_tot = lxon + lxoff;
5529 hwstats->gptc -= xon_off_tot;
5530 hwstats->mptc -= xon_off_tot;
5531 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5532 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5533 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5534 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5535 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5536 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5537 hwstats->ptc64 -= xon_off_tot;
5538 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5539 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5540 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5541 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5542 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5543 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
5544
5545 /* Fill out the OS statistics structure */
5546 netdev->stats.multicast = hwstats->mprc;
5547
5548 /* Rx Errors */
5549 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
5550 netdev->stats.rx_dropped = 0;
5551 netdev->stats.rx_length_errors = hwstats->rlec;
5552 netdev->stats.rx_crc_errors = hwstats->crcerrs;
5553 netdev->stats.rx_missed_errors = total_mpc;
5554 }
5555
5556 /**
5557 * ixgbe_watchdog - Timer Call-back
5558 * @data: pointer to adapter cast into an unsigned long
5559 **/
5560 static void ixgbe_watchdog(unsigned long data)
5561 {
5562 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
5563 struct ixgbe_hw *hw = &adapter->hw;
5564 u64 eics = 0;
5565 int i;
5566
5567 /*
5568 * Do the watchdog outside of interrupt context due to the lovely
5569 * delays that some of the newer hardware requires
5570 */
5571
5572 if (test_bit(__IXGBE_DOWN, &adapter->state))
5573 goto watchdog_short_circuit;
5574
5575 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5576 /*
5577 * for legacy and MSI interrupts don't set any bits
5578 * that are enabled for EIAM, because this operation
5579 * would set *both* EIMS and EICS for any bit in EIAM
5580 */
5581 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5582 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5583 goto watchdog_reschedule;
5584 }
5585
5586 /* get one bit for every active tx/rx interrupt vector */
5587 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5588 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5589 if (qv->rxr_count || qv->txr_count)
5590 eics |= ((u64)1 << i);
5591 }
5592
5593 /* Cause software interrupt to ensure rx rings are cleaned */
5594 ixgbe_irq_rearm_queues(adapter, eics);
5595
5596 watchdog_reschedule:
5597 /* Reset the timer */
5598 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
5599
5600 watchdog_short_circuit:
5601 schedule_work(&adapter->watchdog_task);
5602 }
5603
5604 /**
5605 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
5606 * @work: pointer to work_struct containing our data
5607 **/
5608 static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5609 {
5610 struct ixgbe_adapter *adapter = container_of(work,
5611 struct ixgbe_adapter,
5612 multispeed_fiber_task);
5613 struct ixgbe_hw *hw = &adapter->hw;
5614 u32 autoneg;
5615 bool negotiation;
5616
5617 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
5618 autoneg = hw->phy.autoneg_advertised;
5619 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
5620 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
5621 hw->mac.autotry_restart = false;
5622 if (hw->mac.ops.setup_link)
5623 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
5624 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5625 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
5626 }
5627
5628 /**
5629 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
5630 * @work: pointer to work_struct containing our data
5631 **/
5632 static void ixgbe_sfp_config_module_task(struct work_struct *work)
5633 {
5634 struct ixgbe_adapter *adapter = container_of(work,
5635 struct ixgbe_adapter,
5636 sfp_config_module_task);
5637 struct ixgbe_hw *hw = &adapter->hw;
5638 u32 err;
5639
5640 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
5641
5642 /* Time for electrical oscillations to settle down */
5643 msleep(100);
5644 err = hw->phy.ops.identify_sfp(hw);
5645
5646 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
5647 e_dev_err("failed to initialize because an unsupported SFP+ "
5648 "module type was detected.\n");
5649 e_dev_err("Reload the driver after installing a supported "
5650 "module.\n");
5651 unregister_netdev(adapter->netdev);
5652 return;
5653 }
5654 hw->mac.ops.setup_sfp(hw);
5655
5656 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
5657 /* This will also work for DA Twinax connections */
5658 schedule_work(&adapter->multispeed_fiber_task);
5659 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
5660 }
5661
5662 /**
5663 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
5664 * @work: pointer to work_struct containing our data
5665 **/
5666 static void ixgbe_fdir_reinit_task(struct work_struct *work)
5667 {
5668 struct ixgbe_adapter *adapter = container_of(work,
5669 struct ixgbe_adapter,
5670 fdir_reinit_task);
5671 struct ixgbe_hw *hw = &adapter->hw;
5672 int i;
5673
5674 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5675 for (i = 0; i < adapter->num_tx_queues; i++)
5676 set_bit(__IXGBE_FDIR_INIT_DONE,
5677 &(adapter->tx_ring[i]->reinit_state));
5678 } else {
5679 e_err(probe, "failed to finish FDIR re-initialization, "
5680 "ignored adding FDIR ATR filters\n");
5681 }
5682 /* Done FDIR Re-initialization, enable transmits */
5683 netif_tx_start_all_queues(adapter->netdev);
5684 }
5685
5686 static DEFINE_MUTEX(ixgbe_watchdog_lock);
5687
5688 /**
5689 * ixgbe_watchdog_task - worker thread to bring link up
5690 * @work: pointer to work_struct containing our data
5691 **/
5692 static void ixgbe_watchdog_task(struct work_struct *work)
5693 {
5694 struct ixgbe_adapter *adapter = container_of(work,
5695 struct ixgbe_adapter,
5696 watchdog_task);
5697 struct net_device *netdev = adapter->netdev;
5698 struct ixgbe_hw *hw = &adapter->hw;
5699 u32 link_speed;
5700 bool link_up;
5701 int i;
5702 struct ixgbe_ring *tx_ring;
5703 int some_tx_pending = 0;
5704
5705 mutex_lock(&ixgbe_watchdog_lock);
5706
5707 link_up = adapter->link_up;
5708 link_speed = adapter->link_speed;
5709
5710 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
5711 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
5712 if (link_up) {
5713 #ifdef CONFIG_DCB
5714 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5715 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
5716 hw->mac.ops.fc_enable(hw, i);
5717 } else {
5718 hw->mac.ops.fc_enable(hw, 0);
5719 }
5720 #else
5721 hw->mac.ops.fc_enable(hw, 0);
5722 #endif
5723 }
5724
5725 if (link_up ||
5726 time_after(jiffies, (adapter->link_check_timeout +
5727 IXGBE_TRY_LINK_TIMEOUT))) {
5728 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5729 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5730 }
5731 adapter->link_up = link_up;
5732 adapter->link_speed = link_speed;
5733 }
5734
5735 if (link_up) {
5736 if (!netif_carrier_ok(netdev)) {
5737 bool flow_rx, flow_tx;
5738
5739 if (hw->mac.type == ixgbe_mac_82599EB) {
5740 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5741 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5742 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5743 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5744 } else {
5745 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5746 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5747 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5748 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5749 }
5750
5751 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5752 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
5753 "10 Gbps" :
5754 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5755 "1 Gbps" : "unknown speed")),
5756 ((flow_rx && flow_tx) ? "RX/TX" :
5757 (flow_rx ? "RX" :
5758 (flow_tx ? "TX" : "None"))));
5759
5760 netif_carrier_on(netdev);
5761 } else {
5762 /* Force detection of hung controller */
5763 adapter->detect_tx_hung = true;
5764 }
5765 } else {
5766 adapter->link_up = false;
5767 adapter->link_speed = 0;
5768 if (netif_carrier_ok(netdev)) {
5769 e_info(drv, "NIC Link is Down\n");
5770 netif_carrier_off(netdev);
5771 }
5772 }
5773
5774 if (!netif_carrier_ok(netdev)) {
5775 for (i = 0; i < adapter->num_tx_queues; i++) {
5776 tx_ring = adapter->tx_ring[i];
5777 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5778 some_tx_pending = 1;
5779 break;
5780 }
5781 }
5782
5783 if (some_tx_pending) {
5784 /* We've lost link, so the controller stops DMA,
5785 * but we've got queued Tx work that's never going
5786 * to get done, so reset controller to flush Tx.
5787 * (Do the reset outside of interrupt context).
5788 */
5789 schedule_work(&adapter->reset_task);
5790 }
5791 }
5792
5793 ixgbe_update_stats(adapter);
5794 mutex_unlock(&ixgbe_watchdog_lock);
5795 }
5796
5797 static int ixgbe_tso(struct ixgbe_adapter *adapter,
5798 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5799 u32 tx_flags, u8 *hdr_len, __be16 protocol)
5800 {
5801 struct ixgbe_adv_tx_context_desc *context_desc;
5802 unsigned int i;
5803 int err;
5804 struct ixgbe_tx_buffer *tx_buffer_info;
5805 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
5806 u32 mss_l4len_idx, l4len;
5807
5808 if (skb_is_gso(skb)) {
5809 if (skb_header_cloned(skb)) {
5810 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5811 if (err)
5812 return err;
5813 }
5814 l4len = tcp_hdrlen(skb);
5815 *hdr_len += l4len;
5816
5817 if (protocol == htons(ETH_P_IP)) {
5818 struct iphdr *iph = ip_hdr(skb);
5819 iph->tot_len = 0;
5820 iph->check = 0;
5821 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5822 iph->daddr, 0,
5823 IPPROTO_TCP,
5824 0);
5825 } else if (skb_is_gso_v6(skb)) {
5826 ipv6_hdr(skb)->payload_len = 0;
5827 tcp_hdr(skb)->check =
5828 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5829 &ipv6_hdr(skb)->daddr,
5830 0, IPPROTO_TCP, 0);
5831 }
5832
5833 i = tx_ring->next_to_use;
5834
5835 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5836 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
5837
5838 /* VLAN MACLEN IPLEN */
5839 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5840 vlan_macip_lens |=
5841 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5842 vlan_macip_lens |= ((skb_network_offset(skb)) <<
5843 IXGBE_ADVTXD_MACLEN_SHIFT);
5844 *hdr_len += skb_network_offset(skb);
5845 vlan_macip_lens |=
5846 (skb_transport_header(skb) - skb_network_header(skb));
5847 *hdr_len +=
5848 (skb_transport_header(skb) - skb_network_header(skb));
5849 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5850 context_desc->seqnum_seed = 0;
5851
5852 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5853 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
5854 IXGBE_ADVTXD_DTYP_CTXT);
5855
5856 if (protocol == htons(ETH_P_IP))
5857 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5858 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5859 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5860
5861 /* MSS L4LEN IDX */
5862 mss_l4len_idx =
5863 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
5864 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
5865 /* use index 1 for TSO */
5866 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
5867 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5868
5869 tx_buffer_info->time_stamp = jiffies;
5870 tx_buffer_info->next_to_watch = i;
5871
5872 i++;
5873 if (i == tx_ring->count)
5874 i = 0;
5875 tx_ring->next_to_use = i;
5876
5877 return true;
5878 }
5879 return false;
5880 }
5881
5882 static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5883 __be16 protocol)
5884 {
5885 u32 rtn = 0;
5886
5887 switch (protocol) {
5888 case cpu_to_be16(ETH_P_IP):
5889 rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
5890 switch (ip_hdr(skb)->protocol) {
5891 case IPPROTO_TCP:
5892 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5893 break;
5894 case IPPROTO_SCTP:
5895 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5896 break;
5897 }
5898 break;
5899 case cpu_to_be16(ETH_P_IPV6):
5900 /* XXX what about other V6 headers?? */
5901 switch (ipv6_hdr(skb)->nexthdr) {
5902 case IPPROTO_TCP:
5903 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5904 break;
5905 case IPPROTO_SCTP:
5906 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5907 break;
5908 }
5909 break;
5910 default:
5911 if (unlikely(net_ratelimit()))
5912 e_warn(probe, "partial checksum but proto=%x!\n",
5913 protocol);
5914 break;
5915 }
5916
5917 return rtn;
5918 }
5919
5920 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5921 struct ixgbe_ring *tx_ring,
5922 struct sk_buff *skb, u32 tx_flags,
5923 __be16 protocol)
5924 {
5925 struct ixgbe_adv_tx_context_desc *context_desc;
5926 unsigned int i;
5927 struct ixgbe_tx_buffer *tx_buffer_info;
5928 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
5929
5930 if (skb->ip_summed == CHECKSUM_PARTIAL ||
5931 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
5932 i = tx_ring->next_to_use;
5933 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5934 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
5935
5936 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5937 vlan_macip_lens |=
5938 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5939 vlan_macip_lens |= (skb_network_offset(skb) <<
5940 IXGBE_ADVTXD_MACLEN_SHIFT);
5941 if (skb->ip_summed == CHECKSUM_PARTIAL)
5942 vlan_macip_lens |= (skb_transport_header(skb) -
5943 skb_network_header(skb));
5944
5945 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5946 context_desc->seqnum_seed = 0;
5947
5948 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
5949 IXGBE_ADVTXD_DTYP_CTXT);
5950
5951 if (skb->ip_summed == CHECKSUM_PARTIAL)
5952 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
5953
5954 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5955 /* use index zero for tx checksum offload */
5956 context_desc->mss_l4len_idx = 0;
5957
5958 tx_buffer_info->time_stamp = jiffies;
5959 tx_buffer_info->next_to_watch = i;
5960
5961 i++;
5962 if (i == tx_ring->count)
5963 i = 0;
5964 tx_ring->next_to_use = i;
5965
5966 return true;
5967 }
5968
5969 return false;
5970 }
5971
5972 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5973 struct ixgbe_ring *tx_ring,
5974 struct sk_buff *skb, u32 tx_flags,
5975 unsigned int first, const u8 hdr_len)
5976 {
5977 struct device *dev = tx_ring->dev;
5978 struct ixgbe_tx_buffer *tx_buffer_info;
5979 unsigned int len;
5980 unsigned int total = skb->len;
5981 unsigned int offset = 0, size, count = 0, i;
5982 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
5983 unsigned int f;
5984 unsigned int bytecount = skb->len;
5985 u16 gso_segs = 1;
5986
5987 i = tx_ring->next_to_use;
5988
5989 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
5990 /* excluding fcoe_crc_eof for FCoE */
5991 total -= sizeof(struct fcoe_crc_eof);
5992
5993 len = min(skb_headlen(skb), total);
5994 while (len) {
5995 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5996 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5997
5998 tx_buffer_info->length = size;
5999 tx_buffer_info->mapped_as_page = false;
6000 tx_buffer_info->dma = dma_map_single(dev,
6001 skb->data + offset,
6002 size, DMA_TO_DEVICE);
6003 if (dma_mapping_error(dev, tx_buffer_info->dma))
6004 goto dma_error;
6005 tx_buffer_info->time_stamp = jiffies;
6006 tx_buffer_info->next_to_watch = i;
6007
6008 len -= size;
6009 total -= size;
6010 offset += size;
6011 count++;
6012
6013 if (len) {
6014 i++;
6015 if (i == tx_ring->count)
6016 i = 0;
6017 }
6018 }
6019
6020 for (f = 0; f < nr_frags; f++) {
6021 struct skb_frag_struct *frag;
6022
6023 frag = &skb_shinfo(skb)->frags[f];
6024 len = min((unsigned int)frag->size, total);
6025 offset = frag->page_offset;
6026
6027 while (len) {
6028 i++;
6029 if (i == tx_ring->count)
6030 i = 0;
6031
6032 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6033 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6034
6035 tx_buffer_info->length = size;
6036 tx_buffer_info->dma = dma_map_page(dev,
6037 frag->page,
6038 offset, size,
6039 DMA_TO_DEVICE);
6040 tx_buffer_info->mapped_as_page = true;
6041 if (dma_mapping_error(dev, tx_buffer_info->dma))
6042 goto dma_error;
6043 tx_buffer_info->time_stamp = jiffies;
6044 tx_buffer_info->next_to_watch = i;
6045
6046 len -= size;
6047 total -= size;
6048 offset += size;
6049 count++;
6050 }
6051 if (total == 0)
6052 break;
6053 }
6054
6055 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6056 gso_segs = skb_shinfo(skb)->gso_segs;
6057 #ifdef IXGBE_FCOE
6058 /* adjust for FCoE Sequence Offload */
6059 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6060 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6061 skb_shinfo(skb)->gso_size);
6062 #endif /* IXGBE_FCOE */
6063 bytecount += (gso_segs - 1) * hdr_len;
6064
6065 /* multiply data chunks by size of headers */
6066 tx_ring->tx_buffer_info[i].bytecount = bytecount;
6067 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
6068 tx_ring->tx_buffer_info[i].skb = skb;
6069 tx_ring->tx_buffer_info[first].next_to_watch = i;
6070
6071 return count;
6072
6073 dma_error:
6074 e_dev_err("TX DMA map failed\n");
6075
6076 /* clear timestamp and dma mappings for failed tx_buffer_info map */
6077 tx_buffer_info->dma = 0;
6078 tx_buffer_info->time_stamp = 0;
6079 tx_buffer_info->next_to_watch = 0;
6080 if (count)
6081 count--;
6082
6083 /* clear timestamp and dma mappings for remaining portion of packet */
6084 while (count--) {
6085 if (i == 0)
6086 i += tx_ring->count;
6087 i--;
6088 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6089 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
6090 }
6091
6092 return 0;
6093 }
6094
6095 static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
6096 int tx_flags, int count, u32 paylen, u8 hdr_len)
6097 {
6098 union ixgbe_adv_tx_desc *tx_desc = NULL;
6099 struct ixgbe_tx_buffer *tx_buffer_info;
6100 u32 olinfo_status = 0, cmd_type_len = 0;
6101 unsigned int i;
6102 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
6103
6104 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
6105
6106 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
6107
6108 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6109 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
6110
6111 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
6112 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6113
6114 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6115 IXGBE_ADVTXD_POPTS_SHIFT;
6116
6117 /* use index 1 context for tso */
6118 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6119 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6120 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
6121 IXGBE_ADVTXD_POPTS_SHIFT;
6122
6123 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6124 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6125 IXGBE_ADVTXD_POPTS_SHIFT;
6126
6127 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6128 olinfo_status |= IXGBE_ADVTXD_CC;
6129 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6130 if (tx_flags & IXGBE_TX_FLAGS_FSO)
6131 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6132 }
6133
6134 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
6135
6136 i = tx_ring->next_to_use;
6137 while (count--) {
6138 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6139 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
6140 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6141 tx_desc->read.cmd_type_len =
6142 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
6143 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6144 i++;
6145 if (i == tx_ring->count)
6146 i = 0;
6147 }
6148
6149 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
6150
6151 /*
6152 * Force memory writes to complete before letting h/w
6153 * know there are new descriptors to fetch. (Only
6154 * applicable for weak-ordered memory model archs,
6155 * such as IA-64).
6156 */
6157 wmb();
6158
6159 tx_ring->next_to_use = i;
6160 writel(i, tx_ring->tail);
6161 }
6162
6163 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6164 int queue, u32 tx_flags, __be16 protocol)
6165 {
6166 struct ixgbe_atr_input atr_input;
6167 struct tcphdr *th;
6168 struct iphdr *iph = ip_hdr(skb);
6169 struct ethhdr *eth = (struct ethhdr *)skb->data;
6170 u16 vlan_id, src_port, dst_port, flex_bytes;
6171 u32 src_ipv4_addr, dst_ipv4_addr;
6172 u8 l4type = 0;
6173
6174 /* Right now, we support IPv4 only */
6175 if (protocol != htons(ETH_P_IP))
6176 return;
6177 /* check if we're UDP or TCP */
6178 if (iph->protocol == IPPROTO_TCP) {
6179 th = tcp_hdr(skb);
6180 src_port = th->source;
6181 dst_port = th->dest;
6182 l4type |= IXGBE_ATR_L4TYPE_TCP;
6183 /* l4type IPv4 type is 0, no need to assign */
6184 } else {
6185 /* Unsupported L4 header, just bail here */
6186 return;
6187 }
6188
6189 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
6190
6191 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
6192 IXGBE_TX_FLAGS_VLAN_SHIFT;
6193 src_ipv4_addr = iph->saddr;
6194 dst_ipv4_addr = iph->daddr;
6195 flex_bytes = eth->h_proto;
6196
6197 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
6198 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
6199 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
6200 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
6201 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
6202 /* src and dst are inverted, think how the receiver sees them */
6203 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
6204 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
6205
6206 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6207 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
6208 }
6209
6210 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6211 struct ixgbe_ring *tx_ring, int size)
6212 {
6213 netif_stop_subqueue(netdev, tx_ring->queue_index);
6214 /* Herbert's original patch had:
6215 * smp_mb__after_netif_stop_queue();
6216 * but since that doesn't exist yet, just open code it. */
6217 smp_mb();
6218
6219 /* We need to check again in a case another CPU has just
6220 * made room available. */
6221 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
6222 return -EBUSY;
6223
6224 /* A reprieve! - use start_queue because it doesn't call schedule */
6225 netif_start_subqueue(netdev, tx_ring->queue_index);
6226 ++tx_ring->restart_queue;
6227 return 0;
6228 }
6229
6230 static int ixgbe_maybe_stop_tx(struct net_device *netdev,
6231 struct ixgbe_ring *tx_ring, int size)
6232 {
6233 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6234 return 0;
6235 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
6236 }
6237
6238 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6239 {
6240 struct ixgbe_adapter *adapter = netdev_priv(dev);
6241 int txq = smp_processor_id();
6242 #ifdef IXGBE_FCOE
6243 __be16 protocol;
6244
6245 protocol = vlan_get_protocol(skb);
6246
6247 if ((protocol == htons(ETH_P_FCOE)) ||
6248 (protocol == htons(ETH_P_FIP))) {
6249 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6250 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6251 txq += adapter->ring_feature[RING_F_FCOE].mask;
6252 return txq;
6253 #ifdef CONFIG_IXGBE_DCB
6254 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6255 txq = adapter->fcoe.up;
6256 return txq;
6257 #endif
6258 }
6259 }
6260 #endif
6261
6262 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
6263 while (unlikely(txq >= dev->real_num_tx_queues))
6264 txq -= dev->real_num_tx_queues;
6265 return txq;
6266 }
6267
6268 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6269 if (skb->priority == TC_PRIO_CONTROL)
6270 txq = adapter->ring_feature[RING_F_DCB].indices-1;
6271 else
6272 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
6273 >> 13;
6274 return txq;
6275 }
6276
6277 return skb_tx_hash(dev, skb);
6278 }
6279
6280 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
6281 struct ixgbe_adapter *adapter,
6282 struct ixgbe_ring *tx_ring)
6283 {
6284 struct netdev_queue *txq;
6285 unsigned int first;
6286 unsigned int tx_flags = 0;
6287 u8 hdr_len = 0;
6288 int tso;
6289 int count = 0;
6290 unsigned int f;
6291 __be16 protocol;
6292
6293 protocol = vlan_get_protocol(skb);
6294
6295 if (vlan_tx_tag_present(skb)) {
6296 tx_flags |= vlan_tx_tag_get(skb);
6297 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6298 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
6299 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
6300 }
6301 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6302 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6303 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6304 skb->priority != TC_PRIO_CONTROL) {
6305 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
6306 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6307 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6308 }
6309
6310 #ifdef IXGBE_FCOE
6311 /* for FCoE with DCB, we force the priority to what
6312 * was specified by the switch */
6313 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6314 (protocol == htons(ETH_P_FCOE) ||
6315 protocol == htons(ETH_P_FIP))) {
6316 #ifdef CONFIG_IXGBE_DCB
6317 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6318 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
6319 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6320 tx_flags |= ((adapter->fcoe.up << 13)
6321 << IXGBE_TX_FLAGS_VLAN_SHIFT);
6322 }
6323 #endif
6324 /* flag for FCoE offloads */
6325 if (protocol == htons(ETH_P_FCOE))
6326 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6327 }
6328 #endif
6329
6330 /* four things can cause us to need a context descriptor */
6331 if (skb_is_gso(skb) ||
6332 (skb->ip_summed == CHECKSUM_PARTIAL) ||
6333 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
6334 (tx_flags & IXGBE_TX_FLAGS_FCOE))
6335 count++;
6336
6337 count += TXD_USE_COUNT(skb_headlen(skb));
6338 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6339 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6340
6341 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
6342 adapter->tx_busy++;
6343 return NETDEV_TX_BUSY;
6344 }
6345
6346 first = tx_ring->next_to_use;
6347 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6348 #ifdef IXGBE_FCOE
6349 /* setup tx offload for FCoE */
6350 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
6351 if (tso < 0) {
6352 dev_kfree_skb_any(skb);
6353 return NETDEV_TX_OK;
6354 }
6355 if (tso)
6356 tx_flags |= IXGBE_TX_FLAGS_FSO;
6357 #endif /* IXGBE_FCOE */
6358 } else {
6359 if (protocol == htons(ETH_P_IP))
6360 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6361 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6362 protocol);
6363 if (tso < 0) {
6364 dev_kfree_skb_any(skb);
6365 return NETDEV_TX_OK;
6366 }
6367
6368 if (tso)
6369 tx_flags |= IXGBE_TX_FLAGS_TSO;
6370 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6371 protocol) &&
6372 (skb->ip_summed == CHECKSUM_PARTIAL))
6373 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6374 }
6375
6376 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
6377 if (count) {
6378 /* add the ATR filter if ATR is on */
6379 if (tx_ring->atr_sample_rate) {
6380 ++tx_ring->atr_count;
6381 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6382 test_bit(__IXGBE_FDIR_INIT_DONE,
6383 &tx_ring->reinit_state)) {
6384 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6385 tx_flags, protocol);
6386 tx_ring->atr_count = 0;
6387 }
6388 }
6389 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6390 txq->tx_bytes += skb->len;
6391 txq->tx_packets++;
6392 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
6393 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
6394
6395 } else {
6396 dev_kfree_skb_any(skb);
6397 tx_ring->tx_buffer_info[first].time_stamp = 0;
6398 tx_ring->next_to_use = first;
6399 }
6400
6401 return NETDEV_TX_OK;
6402 }
6403
6404 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
6405 {
6406 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6407 struct ixgbe_ring *tx_ring;
6408
6409 tx_ring = adapter->tx_ring[skb->queue_mapping];
6410 return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
6411 }
6412
6413 /**
6414 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6415 * @netdev: network interface device structure
6416 * @p: pointer to an address structure
6417 *
6418 * Returns 0 on success, negative on failure
6419 **/
6420 static int ixgbe_set_mac(struct net_device *netdev, void *p)
6421 {
6422 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6423 struct ixgbe_hw *hw = &adapter->hw;
6424 struct sockaddr *addr = p;
6425
6426 if (!is_valid_ether_addr(addr->sa_data))
6427 return -EADDRNOTAVAIL;
6428
6429 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
6430 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
6431
6432 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
6433 IXGBE_RAH_AV);
6434
6435 return 0;
6436 }
6437
6438 static int
6439 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
6440 {
6441 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6442 struct ixgbe_hw *hw = &adapter->hw;
6443 u16 value;
6444 int rc;
6445
6446 if (prtad != hw->phy.mdio.prtad)
6447 return -EINVAL;
6448 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
6449 if (!rc)
6450 rc = value;
6451 return rc;
6452 }
6453
6454 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
6455 u16 addr, u16 value)
6456 {
6457 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6458 struct ixgbe_hw *hw = &adapter->hw;
6459
6460 if (prtad != hw->phy.mdio.prtad)
6461 return -EINVAL;
6462 return hw->phy.ops.write_reg(hw, addr, devad, value);
6463 }
6464
6465 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6466 {
6467 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6468
6469 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6470 }
6471
6472 /**
6473 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
6474 * netdev->dev_addrs
6475 * @netdev: network interface device structure
6476 *
6477 * Returns non-zero on failure
6478 **/
6479 static int ixgbe_add_sanmac_netdev(struct net_device *dev)
6480 {
6481 int err = 0;
6482 struct ixgbe_adapter *adapter = netdev_priv(dev);
6483 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6484
6485 if (is_valid_ether_addr(mac->san_addr)) {
6486 rtnl_lock();
6487 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6488 rtnl_unlock();
6489 }
6490 return err;
6491 }
6492
6493 /**
6494 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
6495 * netdev->dev_addrs
6496 * @netdev: network interface device structure
6497 *
6498 * Returns non-zero on failure
6499 **/
6500 static int ixgbe_del_sanmac_netdev(struct net_device *dev)
6501 {
6502 int err = 0;
6503 struct ixgbe_adapter *adapter = netdev_priv(dev);
6504 struct ixgbe_mac_info *mac = &adapter->hw.mac;
6505
6506 if (is_valid_ether_addr(mac->san_addr)) {
6507 rtnl_lock();
6508 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
6509 rtnl_unlock();
6510 }
6511 return err;
6512 }
6513
6514 #ifdef CONFIG_NET_POLL_CONTROLLER
6515 /*
6516 * Polling 'interrupt' - used by things like netconsole to send skbs
6517 * without having to re-enable interrupts. It's not called while
6518 * the interrupt routine is executing.
6519 */
6520 static void ixgbe_netpoll(struct net_device *netdev)
6521 {
6522 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6523 int i;
6524
6525 /* if interface is down do nothing */
6526 if (test_bit(__IXGBE_DOWN, &adapter->state))
6527 return;
6528
6529 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
6530 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
6531 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
6532 for (i = 0; i < num_q_vectors; i++) {
6533 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
6534 ixgbe_msix_clean_many(0, q_vector);
6535 }
6536 } else {
6537 ixgbe_intr(adapter->pdev->irq, netdev);
6538 }
6539 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
6540 }
6541 #endif
6542
6543 static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6544 struct rtnl_link_stats64 *stats)
6545 {
6546 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6547 int i;
6548
6549 /* accurate rx/tx bytes/packets stats */
6550 dev_txq_stats_fold(netdev, stats);
6551 rcu_read_lock();
6552 for (i = 0; i < adapter->num_rx_queues; i++) {
6553 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
6554 u64 bytes, packets;
6555 unsigned int start;
6556
6557 if (ring) {
6558 do {
6559 start = u64_stats_fetch_begin_bh(&ring->syncp);
6560 packets = ring->stats.packets;
6561 bytes = ring->stats.bytes;
6562 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6563 stats->rx_packets += packets;
6564 stats->rx_bytes += bytes;
6565 }
6566 }
6567 rcu_read_unlock();
6568 /* following stats updated by ixgbe_watchdog_task() */
6569 stats->multicast = netdev->stats.multicast;
6570 stats->rx_errors = netdev->stats.rx_errors;
6571 stats->rx_length_errors = netdev->stats.rx_length_errors;
6572 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
6573 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
6574 return stats;
6575 }
6576
6577
6578 static const struct net_device_ops ixgbe_netdev_ops = {
6579 .ndo_open = ixgbe_open,
6580 .ndo_stop = ixgbe_close,
6581 .ndo_start_xmit = ixgbe_xmit_frame,
6582 .ndo_select_queue = ixgbe_select_queue,
6583 .ndo_set_rx_mode = ixgbe_set_rx_mode,
6584 .ndo_set_multicast_list = ixgbe_set_rx_mode,
6585 .ndo_validate_addr = eth_validate_addr,
6586 .ndo_set_mac_address = ixgbe_set_mac,
6587 .ndo_change_mtu = ixgbe_change_mtu,
6588 .ndo_tx_timeout = ixgbe_tx_timeout,
6589 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
6590 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
6591 .ndo_do_ioctl = ixgbe_ioctl,
6592 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6593 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6594 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6595 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
6596 .ndo_get_stats64 = ixgbe_get_stats64,
6597 #ifdef CONFIG_NET_POLL_CONTROLLER
6598 .ndo_poll_controller = ixgbe_netpoll,
6599 #endif
6600 #ifdef IXGBE_FCOE
6601 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
6602 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
6603 .ndo_fcoe_enable = ixgbe_fcoe_enable,
6604 .ndo_fcoe_disable = ixgbe_fcoe_disable,
6605 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
6606 #endif /* IXGBE_FCOE */
6607 };
6608
6609 static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6610 const struct ixgbe_info *ii)
6611 {
6612 #ifdef CONFIG_PCI_IOV
6613 struct ixgbe_hw *hw = &adapter->hw;
6614 int err;
6615
6616 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
6617 return;
6618
6619 /* The 82599 supports up to 64 VFs per physical function
6620 * but this implementation limits allocation to 63 so that
6621 * basic networking resources are still available to the
6622 * physical function
6623 */
6624 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
6625 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
6626 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
6627 if (err) {
6628 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
6629 goto err_novfs;
6630 }
6631 /* If call to enable VFs succeeded then allocate memory
6632 * for per VF control structures.
6633 */
6634 adapter->vfinfo =
6635 kcalloc(adapter->num_vfs,
6636 sizeof(struct vf_data_storage), GFP_KERNEL);
6637 if (adapter->vfinfo) {
6638 /* Now that we're sure SR-IOV is enabled
6639 * and memory allocated set up the mailbox parameters
6640 */
6641 ixgbe_init_mbx_params_pf(hw);
6642 memcpy(&hw->mbx.ops, ii->mbx_ops,
6643 sizeof(hw->mbx.ops));
6644
6645 /* Disable RSC when in SR-IOV mode */
6646 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
6647 IXGBE_FLAG2_RSC_ENABLED);
6648 return;
6649 }
6650
6651 /* Oh oh */
6652 e_err(probe, "Unable to allocate memory for VF Data Storage - "
6653 "SRIOV disabled\n");
6654 pci_disable_sriov(adapter->pdev);
6655
6656 err_novfs:
6657 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
6658 adapter->num_vfs = 0;
6659 #endif /* CONFIG_PCI_IOV */
6660 }
6661
6662 /**
6663 * ixgbe_probe - Device Initialization Routine
6664 * @pdev: PCI device information struct
6665 * @ent: entry in ixgbe_pci_tbl
6666 *
6667 * Returns 0 on success, negative on failure
6668 *
6669 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
6670 * The OS initialization, configuring of the adapter private structure,
6671 * and a hardware reset occur.
6672 **/
6673 static int __devinit ixgbe_probe(struct pci_dev *pdev,
6674 const struct pci_device_id *ent)
6675 {
6676 struct net_device *netdev;
6677 struct ixgbe_adapter *adapter = NULL;
6678 struct ixgbe_hw *hw;
6679 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
6680 static int cards_found;
6681 int i, err, pci_using_dac;
6682 unsigned int indices = num_possible_cpus();
6683 #ifdef IXGBE_FCOE
6684 u16 device_caps;
6685 #endif
6686 u32 part_num, eec;
6687
6688 /* Catch broken hardware that put the wrong VF device ID in
6689 * the PCIe SR-IOV capability.
6690 */
6691 if (pdev->is_virtfn) {
6692 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
6693 pci_name(pdev), pdev->vendor, pdev->device);
6694 return -EINVAL;
6695 }
6696
6697 err = pci_enable_device_mem(pdev);
6698 if (err)
6699 return err;
6700
6701 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6702 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
6703 pci_using_dac = 1;
6704 } else {
6705 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6706 if (err) {
6707 err = dma_set_coherent_mask(&pdev->dev,
6708 DMA_BIT_MASK(32));
6709 if (err) {
6710 dev_err(&pdev->dev,
6711 "No usable DMA configuration, aborting\n");
6712 goto err_dma;
6713 }
6714 }
6715 pci_using_dac = 0;
6716 }
6717
6718 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
6719 IORESOURCE_MEM), ixgbe_driver_name);
6720 if (err) {
6721 dev_err(&pdev->dev,
6722 "pci_request_selected_regions failed 0x%x\n", err);
6723 goto err_pci_reg;
6724 }
6725
6726 pci_enable_pcie_error_reporting(pdev);
6727
6728 pci_set_master(pdev);
6729 pci_save_state(pdev);
6730
6731 if (ii->mac == ixgbe_mac_82598EB)
6732 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
6733 else
6734 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
6735
6736 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
6737 #ifdef IXGBE_FCOE
6738 indices += min_t(unsigned int, num_possible_cpus(),
6739 IXGBE_MAX_FCOE_INDICES);
6740 #endif
6741 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
6742 if (!netdev) {
6743 err = -ENOMEM;
6744 goto err_alloc_etherdev;
6745 }
6746
6747 SET_NETDEV_DEV(netdev, &pdev->dev);
6748
6749 pci_set_drvdata(pdev, netdev);
6750 adapter = netdev_priv(netdev);
6751
6752 adapter->netdev = netdev;
6753 adapter->pdev = pdev;
6754 hw = &adapter->hw;
6755 hw->back = adapter;
6756 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
6757
6758 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6759 pci_resource_len(pdev, 0));
6760 if (!hw->hw_addr) {
6761 err = -EIO;
6762 goto err_ioremap;
6763 }
6764
6765 for (i = 1; i <= 5; i++) {
6766 if (pci_resource_len(pdev, i) == 0)
6767 continue;
6768 }
6769
6770 netdev->netdev_ops = &ixgbe_netdev_ops;
6771 ixgbe_set_ethtool_ops(netdev);
6772 netdev->watchdog_timeo = 5 * HZ;
6773 strcpy(netdev->name, pci_name(pdev));
6774
6775 adapter->bd_number = cards_found;
6776
6777 /* Setup hw api */
6778 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
6779 hw->mac.type = ii->mac;
6780
6781 /* EEPROM */
6782 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
6783 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
6784 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
6785 if (!(eec & (1 << 8)))
6786 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
6787
6788 /* PHY */
6789 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
6790 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
6791 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
6792 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
6793 hw->phy.mdio.mmds = 0;
6794 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
6795 hw->phy.mdio.dev = netdev;
6796 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
6797 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
6798
6799 /* set up this timer and work struct before calling get_invariants
6800 * which might start the timer
6801 */
6802 init_timer(&adapter->sfp_timer);
6803 adapter->sfp_timer.function = ixgbe_sfp_timer;
6804 adapter->sfp_timer.data = (unsigned long) adapter;
6805
6806 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
6807
6808 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
6809 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
6810
6811 /* a new SFP+ module arrival, called from GPI SDP2 context */
6812 INIT_WORK(&adapter->sfp_config_module_task,
6813 ixgbe_sfp_config_module_task);
6814
6815 ii->get_invariants(hw);
6816
6817 /* setup the private structure */
6818 err = ixgbe_sw_init(adapter);
6819 if (err)
6820 goto err_sw_init;
6821
6822 /* Make it possible the adapter to be woken up via WOL */
6823 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
6824 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6825
6826 /*
6827 * If there is a fan on this device and it has failed log the
6828 * failure.
6829 */
6830 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
6831 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
6832 if (esdp & IXGBE_ESDP_SDP1)
6833 e_crit(probe, "Fan has stopped, replace the adapter\n");
6834 }
6835
6836 /* reset_hw fills in the perm_addr as well */
6837 hw->phy.reset_if_overtemp = true;
6838 err = hw->mac.ops.reset_hw(hw);
6839 hw->phy.reset_if_overtemp = false;
6840 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
6841 hw->mac.type == ixgbe_mac_82598EB) {
6842 /*
6843 * Start a kernel thread to watch for a module to arrive.
6844 * Only do this for 82598, since 82599 will generate
6845 * interrupts on module arrival.
6846 */
6847 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
6848 mod_timer(&adapter->sfp_timer,
6849 round_jiffies(jiffies + (2 * HZ)));
6850 err = 0;
6851 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
6852 e_dev_err("failed to initialize because an unsupported SFP+ "
6853 "module type was detected.\n");
6854 e_dev_err("Reload the driver after installing a supported "
6855 "module.\n");
6856 goto err_sw_init;
6857 } else if (err) {
6858 e_dev_err("HW Init failed: %d\n", err);
6859 goto err_sw_init;
6860 }
6861
6862 ixgbe_probe_vf(adapter, ii);
6863
6864 netdev->features = NETIF_F_SG |
6865 NETIF_F_IP_CSUM |
6866 NETIF_F_HW_VLAN_TX |
6867 NETIF_F_HW_VLAN_RX |
6868 NETIF_F_HW_VLAN_FILTER;
6869
6870 netdev->features |= NETIF_F_IPV6_CSUM;
6871 netdev->features |= NETIF_F_TSO;
6872 netdev->features |= NETIF_F_TSO6;
6873 netdev->features |= NETIF_F_GRO;
6874
6875 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
6876 netdev->features |= NETIF_F_SCTP_CSUM;
6877
6878 netdev->vlan_features |= NETIF_F_TSO;
6879 netdev->vlan_features |= NETIF_F_TSO6;
6880 netdev->vlan_features |= NETIF_F_IP_CSUM;
6881 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
6882 netdev->vlan_features |= NETIF_F_SG;
6883
6884 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6885 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
6886 IXGBE_FLAG_DCB_ENABLED);
6887 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
6888 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
6889
6890 #ifdef CONFIG_IXGBE_DCB
6891 netdev->dcbnl_ops = &dcbnl_ops;
6892 #endif
6893
6894 #ifdef IXGBE_FCOE
6895 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
6896 if (hw->mac.ops.get_device_caps) {
6897 hw->mac.ops.get_device_caps(hw, &device_caps);
6898 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
6899 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6900 }
6901 }
6902 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
6903 netdev->vlan_features |= NETIF_F_FCOE_CRC;
6904 netdev->vlan_features |= NETIF_F_FSO;
6905 netdev->vlan_features |= NETIF_F_FCOE_MTU;
6906 }
6907 #endif /* IXGBE_FCOE */
6908 if (pci_using_dac) {
6909 netdev->features |= NETIF_F_HIGHDMA;
6910 netdev->vlan_features |= NETIF_F_HIGHDMA;
6911 }
6912
6913 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
6914 netdev->features |= NETIF_F_LRO;
6915
6916 /* make sure the EEPROM is good */
6917 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
6918 e_dev_err("The EEPROM Checksum Is Not Valid\n");
6919 err = -EIO;
6920 goto err_eeprom;
6921 }
6922
6923 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
6924 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
6925
6926 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
6927 e_dev_err("invalid MAC address\n");
6928 err = -EIO;
6929 goto err_eeprom;
6930 }
6931
6932 /* power down the optics */
6933 if (hw->phy.multispeed_fiber)
6934 hw->mac.ops.disable_tx_laser(hw);
6935
6936 init_timer(&adapter->watchdog_timer);
6937 adapter->watchdog_timer.function = ixgbe_watchdog;
6938 adapter->watchdog_timer.data = (unsigned long)adapter;
6939
6940 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
6941 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
6942
6943 err = ixgbe_init_interrupt_scheme(adapter);
6944 if (err)
6945 goto err_sw_init;
6946
6947 switch (pdev->device) {
6948 case IXGBE_DEV_ID_82599_KX4:
6949 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6950 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
6951 break;
6952 default:
6953 adapter->wol = 0;
6954 break;
6955 }
6956 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6957
6958 /* pick up the PCI bus settings for reporting later */
6959 hw->mac.ops.get_bus_info(hw);
6960
6961 /* print bus type/speed/width info */
6962 e_dev_info("(PCI Express:%s:%s) %pM\n",
6963 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
6964 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
6965 "Unknown"),
6966 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
6967 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
6968 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
6969 "Unknown"),
6970 netdev->dev_addr);
6971 ixgbe_read_pba_num_generic(hw, &part_num);
6972 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
6973 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
6974 "PBA No: %06x-%03x\n",
6975 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
6976 (part_num >> 8), (part_num & 0xff));
6977 else
6978 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
6979 hw->mac.type, hw->phy.type,
6980 (part_num >> 8), (part_num & 0xff));
6981
6982 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
6983 e_dev_warn("PCI-Express bandwidth available for this card is "
6984 "not sufficient for optimal performance.\n");
6985 e_dev_warn("For optimal performance a x8 PCI-Express slot "
6986 "is required.\n");
6987 }
6988
6989 /* save off EEPROM version number */
6990 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
6991
6992 /* reset the hardware with the new settings */
6993 err = hw->mac.ops.start_hw(hw);
6994
6995 if (err == IXGBE_ERR_EEPROM_VERSION) {
6996 /* We are running on a pre-production device, log a warning */
6997 e_dev_warn("This device is a pre-production adapter/LOM. "
6998 "Please be aware there may be issues associated "
6999 "with your hardware. If you are experiencing "
7000 "problems please contact your Intel or hardware "
7001 "representative who provided you with this "
7002 "hardware.\n");
7003 }
7004 strcpy(netdev->name, "eth%d");
7005 err = register_netdev(netdev);
7006 if (err)
7007 goto err_register;
7008
7009 /* carrier off reporting is important to ethtool even BEFORE open */
7010 netif_carrier_off(netdev);
7011
7012 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7013 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7014 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
7015
7016 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
7017 INIT_WORK(&adapter->check_overtemp_task,
7018 ixgbe_check_overtemp_task);
7019 #ifdef CONFIG_IXGBE_DCA
7020 if (dca_add_requester(&pdev->dev) == 0) {
7021 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
7022 ixgbe_setup_dca(adapter);
7023 }
7024 #endif
7025 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
7026 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
7027 for (i = 0; i < adapter->num_vfs; i++)
7028 ixgbe_vf_configuration(pdev, (i | 0x10000000));
7029 }
7030
7031 /* add san mac addr to netdev */
7032 ixgbe_add_sanmac_netdev(netdev);
7033
7034 e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
7035 cards_found++;
7036 return 0;
7037
7038 err_register:
7039 ixgbe_release_hw_control(adapter);
7040 ixgbe_clear_interrupt_scheme(adapter);
7041 err_sw_init:
7042 err_eeprom:
7043 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7044 ixgbe_disable_sriov(adapter);
7045 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7046 del_timer_sync(&adapter->sfp_timer);
7047 cancel_work_sync(&adapter->sfp_task);
7048 cancel_work_sync(&adapter->multispeed_fiber_task);
7049 cancel_work_sync(&adapter->sfp_config_module_task);
7050 iounmap(hw->hw_addr);
7051 err_ioremap:
7052 free_netdev(netdev);
7053 err_alloc_etherdev:
7054 pci_release_selected_regions(pdev,
7055 pci_select_bars(pdev, IORESOURCE_MEM));
7056 err_pci_reg:
7057 err_dma:
7058 pci_disable_device(pdev);
7059 return err;
7060 }
7061
7062 /**
7063 * ixgbe_remove - Device Removal Routine
7064 * @pdev: PCI device information struct
7065 *
7066 * ixgbe_remove is called by the PCI subsystem to alert the driver
7067 * that it should release a PCI device. The could be caused by a
7068 * Hot-Plug event, or because the driver is going to be removed from
7069 * memory.
7070 **/
7071 static void __devexit ixgbe_remove(struct pci_dev *pdev)
7072 {
7073 struct net_device *netdev = pci_get_drvdata(pdev);
7074 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7075
7076 set_bit(__IXGBE_DOWN, &adapter->state);
7077 /* clear the module not found bit to make sure the worker won't
7078 * reschedule
7079 */
7080 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7081 del_timer_sync(&adapter->watchdog_timer);
7082
7083 del_timer_sync(&adapter->sfp_timer);
7084 cancel_work_sync(&adapter->watchdog_task);
7085 cancel_work_sync(&adapter->sfp_task);
7086 cancel_work_sync(&adapter->multispeed_fiber_task);
7087 cancel_work_sync(&adapter->sfp_config_module_task);
7088 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7089 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7090 cancel_work_sync(&adapter->fdir_reinit_task);
7091 flush_scheduled_work();
7092
7093 #ifdef CONFIG_IXGBE_DCA
7094 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7095 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
7096 dca_remove_requester(&pdev->dev);
7097 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
7098 }
7099
7100 #endif
7101 #ifdef IXGBE_FCOE
7102 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7103 ixgbe_cleanup_fcoe(adapter);
7104
7105 #endif /* IXGBE_FCOE */
7106
7107 /* remove the added san mac */
7108 ixgbe_del_sanmac_netdev(netdev);
7109
7110 if (netdev->reg_state == NETREG_REGISTERED)
7111 unregister_netdev(netdev);
7112
7113 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7114 ixgbe_disable_sriov(adapter);
7115
7116 ixgbe_clear_interrupt_scheme(adapter);
7117
7118 ixgbe_release_hw_control(adapter);
7119
7120 iounmap(adapter->hw.hw_addr);
7121 pci_release_selected_regions(pdev, pci_select_bars(pdev,
7122 IORESOURCE_MEM));
7123
7124 e_dev_info("complete\n");
7125
7126 free_netdev(netdev);
7127
7128 pci_disable_pcie_error_reporting(pdev);
7129
7130 pci_disable_device(pdev);
7131 }
7132
7133 /**
7134 * ixgbe_io_error_detected - called when PCI error is detected
7135 * @pdev: Pointer to PCI device
7136 * @state: The current pci connection state
7137 *
7138 * This function is called after a PCI bus error affecting
7139 * this device has been detected.
7140 */
7141 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7142 pci_channel_state_t state)
7143 {
7144 struct net_device *netdev = pci_get_drvdata(pdev);
7145 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7146
7147 netif_device_detach(netdev);
7148
7149 if (state == pci_channel_io_perm_failure)
7150 return PCI_ERS_RESULT_DISCONNECT;
7151
7152 if (netif_running(netdev))
7153 ixgbe_down(adapter);
7154 pci_disable_device(pdev);
7155
7156 /* Request a slot reset. */
7157 return PCI_ERS_RESULT_NEED_RESET;
7158 }
7159
7160 /**
7161 * ixgbe_io_slot_reset - called after the pci bus has been reset.
7162 * @pdev: Pointer to PCI device
7163 *
7164 * Restart the card from scratch, as if from a cold-boot.
7165 */
7166 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7167 {
7168 struct net_device *netdev = pci_get_drvdata(pdev);
7169 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7170 pci_ers_result_t result;
7171 int err;
7172
7173 if (pci_enable_device_mem(pdev)) {
7174 e_err(probe, "Cannot re-enable PCI device after reset.\n");
7175 result = PCI_ERS_RESULT_DISCONNECT;
7176 } else {
7177 pci_set_master(pdev);
7178 pci_restore_state(pdev);
7179 pci_save_state(pdev);
7180
7181 pci_wake_from_d3(pdev, false);
7182
7183 ixgbe_reset(adapter);
7184 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
7185 result = PCI_ERS_RESULT_RECOVERED;
7186 }
7187
7188 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7189 if (err) {
7190 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
7191 "failed 0x%0x\n", err);
7192 /* non-fatal, continue */
7193 }
7194
7195 return result;
7196 }
7197
7198 /**
7199 * ixgbe_io_resume - called when traffic can start flowing again.
7200 * @pdev: Pointer to PCI device
7201 *
7202 * This callback is called when the error recovery driver tells us that
7203 * its OK to resume normal operation.
7204 */
7205 static void ixgbe_io_resume(struct pci_dev *pdev)
7206 {
7207 struct net_device *netdev = pci_get_drvdata(pdev);
7208 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7209
7210 if (netif_running(netdev)) {
7211 if (ixgbe_up(adapter)) {
7212 e_info(probe, "ixgbe_up failed after reset\n");
7213 return;
7214 }
7215 }
7216
7217 netif_device_attach(netdev);
7218 }
7219
7220 static struct pci_error_handlers ixgbe_err_handler = {
7221 .error_detected = ixgbe_io_error_detected,
7222 .slot_reset = ixgbe_io_slot_reset,
7223 .resume = ixgbe_io_resume,
7224 };
7225
7226 static struct pci_driver ixgbe_driver = {
7227 .name = ixgbe_driver_name,
7228 .id_table = ixgbe_pci_tbl,
7229 .probe = ixgbe_probe,
7230 .remove = __devexit_p(ixgbe_remove),
7231 #ifdef CONFIG_PM
7232 .suspend = ixgbe_suspend,
7233 .resume = ixgbe_resume,
7234 #endif
7235 .shutdown = ixgbe_shutdown,
7236 .err_handler = &ixgbe_err_handler
7237 };
7238
7239 /**
7240 * ixgbe_init_module - Driver Registration Routine
7241 *
7242 * ixgbe_init_module is the first routine called when the driver is
7243 * loaded. All it does is register with the PCI subsystem.
7244 **/
7245 static int __init ixgbe_init_module(void)
7246 {
7247 int ret;
7248 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
7249 pr_info("%s\n", ixgbe_copyright);
7250
7251 #ifdef CONFIG_IXGBE_DCA
7252 dca_register_notify(&dca_notifier);
7253 #endif
7254
7255 ret = pci_register_driver(&ixgbe_driver);
7256 return ret;
7257 }
7258
7259 module_init(ixgbe_init_module);
7260
7261 /**
7262 * ixgbe_exit_module - Driver Exit Cleanup Routine
7263 *
7264 * ixgbe_exit_module is called just before the driver is removed
7265 * from memory.
7266 **/
7267 static void __exit ixgbe_exit_module(void)
7268 {
7269 #ifdef CONFIG_IXGBE_DCA
7270 dca_unregister_notify(&dca_notifier);
7271 #endif
7272 pci_unregister_driver(&ixgbe_driver);
7273 rcu_barrier(); /* Wait for completion of call_rcu()'s */
7274 }
7275
7276 #ifdef CONFIG_IXGBE_DCA
7277 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7278 void *p)
7279 {
7280 int ret_val;
7281
7282 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
7283 __ixgbe_notify_dca);
7284
7285 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7286 }
7287
7288 #endif /* CONFIG_IXGBE_DCA */
7289
7290 /**
7291 * ixgbe_get_hw_dev return device
7292 * used by hardware layer to print debugging information
7293 **/
7294 struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
7295 {
7296 struct ixgbe_adapter *adapter = hw->back;
7297 return adapter->netdev;
7298 }
7299
7300 module_exit(ixgbe_exit_module);
7301
7302 /* ixgbe_main.c */
This page took 0.177727 seconds and 6 git commands to generate.