ixgbe: fix bug when EITR=0 causing no writebacks
[deliverable/linux.git] / drivers / net / ixgbe / ixgbe_main.c
1 /*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/vmalloc.h>
33 #include <linux/string.h>
34 #include <linux/in.h>
35 #include <linux/ip.h>
36 #include <linux/tcp.h>
37 #include <linux/pkt_sched.h>
38 #include <linux/ipv6.h>
39 #include <linux/slab.h>
40 #include <net/checksum.h>
41 #include <net/ip6_checksum.h>
42 #include <linux/ethtool.h>
43 #include <linux/if_vlan.h>
44 #include <scsi/fc/fc_fcoe.h>
45
46 #include "ixgbe.h"
47 #include "ixgbe_common.h"
48 #include "ixgbe_dcb_82599.h"
49 #include "ixgbe_sriov.h"
50
51 char ixgbe_driver_name[] = "ixgbe";
52 static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54
55 #define DRV_VERSION "2.0.62-k2"
56 const char ixgbe_driver_version[] = DRV_VERSION;
57 static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
58
59 static const struct ixgbe_info *ixgbe_info_tbl[] = {
60 [board_82598] = &ixgbe_82598_info,
61 [board_82599] = &ixgbe_82599_info,
62 };
63
64 /* ixgbe_pci_tbl - PCI Device ID Table
65 *
66 * Wildcard entries (PCI_ANY_ID) should come last
67 * Last entry must be all 0s
68 *
69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
70 * Class, Class Mask, private data (not used) }
71 */
72 static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
74 board_82598 },
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
76 board_82598 },
77 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
78 board_82598 },
79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
80 board_82598 },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
82 board_82598 },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
84 board_82598 },
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
86 board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
88 board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
90 board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
92 board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
94 board_82598 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
96 board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
98 board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
100 board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
102 board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
104 board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
106 board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
108 board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
110 board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
112 board_82599 },
113
114 /* required last entry */
115 {0, }
116 };
117 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
118
119 #ifdef CONFIG_IXGBE_DCA
120 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
121 void *p);
122 static struct notifier_block dca_notifier = {
123 .notifier_call = ixgbe_notify_dca,
124 .next = NULL,
125 .priority = 0
126 };
127 #endif
128
129 #ifdef CONFIG_PCI_IOV
130 static unsigned int max_vfs;
131 module_param(max_vfs, uint, 0);
132 MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
133 "per physical function");
134 #endif /* CONFIG_PCI_IOV */
135
136 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
137 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
138 MODULE_LICENSE("GPL");
139 MODULE_VERSION(DRV_VERSION);
140
141 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
142
143 static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
144 {
145 struct ixgbe_hw *hw = &adapter->hw;
146 u32 gcr;
147 u32 gpie;
148 u32 vmdctl;
149
150 #ifdef CONFIG_PCI_IOV
151 /* disable iov and allow time for transactions to clear */
152 pci_disable_sriov(adapter->pdev);
153 #endif
154
155 /* turn off device IOV mode */
156 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
157 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
158 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
159 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
160 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
161 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
162
163 /* set default pool back to 0 */
164 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
165 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
166 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
167
168 /* take a breather then clean up driver data */
169 msleep(100);
170 if (adapter->vfinfo)
171 kfree(adapter->vfinfo);
172 adapter->vfinfo = NULL;
173
174 adapter->num_vfs = 0;
175 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
176 }
177
178 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
179 {
180 u32 ctrl_ext;
181
182 /* Let firmware take over control of h/w */
183 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
184 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
185 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
186 }
187
188 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
189 {
190 u32 ctrl_ext;
191
192 /* Let firmware know the driver has taken over */
193 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
194 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
195 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
196 }
197
198 /*
199 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
200 * @adapter: pointer to adapter struct
201 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
202 * @queue: queue to map the corresponding interrupt to
203 * @msix_vector: the vector to map to the corresponding queue
204 *
205 */
206 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
207 u8 queue, u8 msix_vector)
208 {
209 u32 ivar, index;
210 struct ixgbe_hw *hw = &adapter->hw;
211 switch (hw->mac.type) {
212 case ixgbe_mac_82598EB:
213 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
214 if (direction == -1)
215 direction = 0;
216 index = (((direction * 64) + queue) >> 2) & 0x1F;
217 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
218 ivar &= ~(0xFF << (8 * (queue & 0x3)));
219 ivar |= (msix_vector << (8 * (queue & 0x3)));
220 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
221 break;
222 case ixgbe_mac_82599EB:
223 if (direction == -1) {
224 /* other causes */
225 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
226 index = ((queue & 1) * 8);
227 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
228 ivar &= ~(0xFF << index);
229 ivar |= (msix_vector << index);
230 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
231 break;
232 } else {
233 /* tx or rx causes */
234 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
235 index = ((16 * (queue & 1)) + (8 * direction));
236 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
237 ivar &= ~(0xFF << index);
238 ivar |= (msix_vector << index);
239 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
240 break;
241 }
242 default:
243 break;
244 }
245 }
246
247 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
248 u64 qmask)
249 {
250 u32 mask;
251
252 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
253 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
254 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
255 } else {
256 mask = (qmask & 0xFFFFFFFF);
257 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
258 mask = (qmask >> 32);
259 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
260 }
261 }
262
263 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
264 struct ixgbe_tx_buffer
265 *tx_buffer_info)
266 {
267 if (tx_buffer_info->dma) {
268 if (tx_buffer_info->mapped_as_page)
269 pci_unmap_page(adapter->pdev,
270 tx_buffer_info->dma,
271 tx_buffer_info->length,
272 PCI_DMA_TODEVICE);
273 else
274 pci_unmap_single(adapter->pdev,
275 tx_buffer_info->dma,
276 tx_buffer_info->length,
277 PCI_DMA_TODEVICE);
278 tx_buffer_info->dma = 0;
279 }
280 if (tx_buffer_info->skb) {
281 dev_kfree_skb_any(tx_buffer_info->skb);
282 tx_buffer_info->skb = NULL;
283 }
284 tx_buffer_info->time_stamp = 0;
285 /* tx_buffer_info must be completely set up in the transmit path */
286 }
287
288 /**
289 * ixgbe_tx_is_paused - check if the tx ring is paused
290 * @adapter: the ixgbe adapter
291 * @tx_ring: the corresponding tx_ring
292 *
293 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
294 * corresponding TC of this tx_ring when checking TFCS.
295 *
296 * Returns : true if paused
297 */
298 static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
299 struct ixgbe_ring *tx_ring)
300 {
301 u32 txoff = IXGBE_TFCS_TXOFF;
302
303 #ifdef CONFIG_IXGBE_DCB
304 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
305 int tc;
306 int reg_idx = tx_ring->reg_idx;
307 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
308
309 switch (adapter->hw.mac.type) {
310 case ixgbe_mac_82598EB:
311 tc = reg_idx >> 2;
312 txoff = IXGBE_TFCS_TXOFF0;
313 break;
314 case ixgbe_mac_82599EB:
315 tc = 0;
316 txoff = IXGBE_TFCS_TXOFF;
317 if (dcb_i == 8) {
318 /* TC0, TC1 */
319 tc = reg_idx >> 5;
320 if (tc == 2) /* TC2, TC3 */
321 tc += (reg_idx - 64) >> 4;
322 else if (tc == 3) /* TC4, TC5, TC6, TC7 */
323 tc += 1 + ((reg_idx - 96) >> 3);
324 } else if (dcb_i == 4) {
325 /* TC0, TC1 */
326 tc = reg_idx >> 6;
327 if (tc == 1) {
328 tc += (reg_idx - 64) >> 5;
329 if (tc == 2) /* TC2, TC3 */
330 tc += (reg_idx - 96) >> 4;
331 }
332 }
333 break;
334 default:
335 tc = 0;
336 }
337 txoff <<= tc;
338 }
339 #endif
340 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
341 }
342
343 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
344 struct ixgbe_ring *tx_ring,
345 unsigned int eop)
346 {
347 struct ixgbe_hw *hw = &adapter->hw;
348
349 /* Detect a transmit hang in hardware, this serializes the
350 * check with the clearing of time_stamp and movement of eop */
351 adapter->detect_tx_hung = false;
352 if (tx_ring->tx_buffer_info[eop].time_stamp &&
353 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
354 !ixgbe_tx_is_paused(adapter, tx_ring)) {
355 /* detected Tx unit hang */
356 union ixgbe_adv_tx_desc *tx_desc;
357 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
358 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
359 " Tx Queue <%d>\n"
360 " TDH, TDT <%x>, <%x>\n"
361 " next_to_use <%x>\n"
362 " next_to_clean <%x>\n"
363 "tx_buffer_info[next_to_clean]\n"
364 " time_stamp <%lx>\n"
365 " jiffies <%lx>\n",
366 tx_ring->queue_index,
367 IXGBE_READ_REG(hw, tx_ring->head),
368 IXGBE_READ_REG(hw, tx_ring->tail),
369 tx_ring->next_to_use, eop,
370 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
371 return true;
372 }
373
374 return false;
375 }
376
377 #define IXGBE_MAX_TXD_PWR 14
378 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
379
380 /* Tx Descriptors needed, worst case */
381 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
382 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
383 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
384 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
385
386 static void ixgbe_tx_timeout(struct net_device *netdev);
387
388 /**
389 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
390 * @q_vector: structure containing interrupt and ring information
391 * @tx_ring: tx ring to clean
392 **/
393 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
394 struct ixgbe_ring *tx_ring)
395 {
396 struct ixgbe_adapter *adapter = q_vector->adapter;
397 struct net_device *netdev = adapter->netdev;
398 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
399 struct ixgbe_tx_buffer *tx_buffer_info;
400 unsigned int i, eop, count = 0;
401 unsigned int total_bytes = 0, total_packets = 0;
402
403 i = tx_ring->next_to_clean;
404 eop = tx_ring->tx_buffer_info[i].next_to_watch;
405 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
406
407 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
408 (count < tx_ring->work_limit)) {
409 bool cleaned = false;
410 for ( ; !cleaned; count++) {
411 struct sk_buff *skb;
412 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
413 tx_buffer_info = &tx_ring->tx_buffer_info[i];
414 cleaned = (i == eop);
415 skb = tx_buffer_info->skb;
416
417 if (cleaned && skb) {
418 unsigned int segs, bytecount;
419 unsigned int hlen = skb_headlen(skb);
420
421 /* gso_segs is currently only valid for tcp */
422 segs = skb_shinfo(skb)->gso_segs ?: 1;
423 #ifdef IXGBE_FCOE
424 /* adjust for FCoE Sequence Offload */
425 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
426 && (skb->protocol == htons(ETH_P_FCOE)) &&
427 skb_is_gso(skb)) {
428 hlen = skb_transport_offset(skb) +
429 sizeof(struct fc_frame_header) +
430 sizeof(struct fcoe_crc_eof);
431 segs = DIV_ROUND_UP(skb->len - hlen,
432 skb_shinfo(skb)->gso_size);
433 }
434 #endif /* IXGBE_FCOE */
435 /* multiply data chunks by size of headers */
436 bytecount = ((segs - 1) * hlen) + skb->len;
437 total_packets += segs;
438 total_bytes += bytecount;
439 }
440
441 ixgbe_unmap_and_free_tx_resource(adapter,
442 tx_buffer_info);
443
444 tx_desc->wb.status = 0;
445
446 i++;
447 if (i == tx_ring->count)
448 i = 0;
449 }
450
451 eop = tx_ring->tx_buffer_info[i].next_to_watch;
452 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
453 }
454
455 tx_ring->next_to_clean = i;
456
457 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
458 if (unlikely(count && netif_carrier_ok(netdev) &&
459 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
460 /* Make sure that anybody stopping the queue after this
461 * sees the new next_to_clean.
462 */
463 smp_mb();
464 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
465 !test_bit(__IXGBE_DOWN, &adapter->state)) {
466 netif_wake_subqueue(netdev, tx_ring->queue_index);
467 ++tx_ring->restart_queue;
468 }
469 }
470
471 if (adapter->detect_tx_hung) {
472 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
473 /* schedule immediate reset if we believe we hung */
474 DPRINTK(PROBE, INFO,
475 "tx hang %d detected, resetting adapter\n",
476 adapter->tx_timeout_count + 1);
477 ixgbe_tx_timeout(adapter->netdev);
478 }
479 }
480
481 /* re-arm the interrupt */
482 if (count >= tx_ring->work_limit)
483 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
484
485 tx_ring->total_bytes += total_bytes;
486 tx_ring->total_packets += total_packets;
487 tx_ring->stats.packets += total_packets;
488 tx_ring->stats.bytes += total_bytes;
489 return (count < tx_ring->work_limit);
490 }
491
492 #ifdef CONFIG_IXGBE_DCA
493 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
494 struct ixgbe_ring *rx_ring)
495 {
496 u32 rxctrl;
497 int cpu = get_cpu();
498 int q = rx_ring->reg_idx;
499
500 if (rx_ring->cpu != cpu) {
501 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
502 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
503 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
504 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
505 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
506 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
507 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
508 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
509 }
510 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
511 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
512 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
513 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
514 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
515 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
516 rx_ring->cpu = cpu;
517 }
518 put_cpu();
519 }
520
521 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
522 struct ixgbe_ring *tx_ring)
523 {
524 u32 txctrl;
525 int cpu = get_cpu();
526 int q = tx_ring->reg_idx;
527 struct ixgbe_hw *hw = &adapter->hw;
528
529 if (tx_ring->cpu != cpu) {
530 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
531 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
532 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
533 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
534 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
535 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
536 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
537 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
538 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
539 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
540 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
541 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
542 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
543 }
544 tx_ring->cpu = cpu;
545 }
546 put_cpu();
547 }
548
549 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
550 {
551 int i;
552
553 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
554 return;
555
556 /* always use CB2 mode, difference is masked in the CB driver */
557 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
558
559 for (i = 0; i < adapter->num_tx_queues; i++) {
560 adapter->tx_ring[i]->cpu = -1;
561 ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
562 }
563 for (i = 0; i < adapter->num_rx_queues; i++) {
564 adapter->rx_ring[i]->cpu = -1;
565 ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
566 }
567 }
568
569 static int __ixgbe_notify_dca(struct device *dev, void *data)
570 {
571 struct net_device *netdev = dev_get_drvdata(dev);
572 struct ixgbe_adapter *adapter = netdev_priv(netdev);
573 unsigned long event = *(unsigned long *)data;
574
575 switch (event) {
576 case DCA_PROVIDER_ADD:
577 /* if we're already enabled, don't do it again */
578 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
579 break;
580 if (dca_add_requester(dev) == 0) {
581 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
582 ixgbe_setup_dca(adapter);
583 break;
584 }
585 /* Fall Through since DCA is disabled. */
586 case DCA_PROVIDER_REMOVE:
587 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
588 dca_remove_requester(dev);
589 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
590 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
591 }
592 break;
593 }
594
595 return 0;
596 }
597
598 #endif /* CONFIG_IXGBE_DCA */
599 /**
600 * ixgbe_receive_skb - Send a completed packet up the stack
601 * @adapter: board private structure
602 * @skb: packet to send up
603 * @status: hardware indication of status of receive
604 * @rx_ring: rx descriptor ring (for a specific queue) to setup
605 * @rx_desc: rx descriptor
606 **/
607 static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
608 struct sk_buff *skb, u8 status,
609 struct ixgbe_ring *ring,
610 union ixgbe_adv_rx_desc *rx_desc)
611 {
612 struct ixgbe_adapter *adapter = q_vector->adapter;
613 struct napi_struct *napi = &q_vector->napi;
614 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
615 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
616
617 skb_record_rx_queue(skb, ring->queue_index);
618 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
619 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
620 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
621 else
622 napi_gro_receive(napi, skb);
623 } else {
624 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
625 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
626 else
627 netif_rx(skb);
628 }
629 }
630
631 /**
632 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
633 * @adapter: address of board private structure
634 * @status_err: hardware indication of status of receive
635 * @skb: skb currently being received and modified
636 **/
637 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
638 union ixgbe_adv_rx_desc *rx_desc,
639 struct sk_buff *skb)
640 {
641 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
642
643 skb->ip_summed = CHECKSUM_NONE;
644
645 /* Rx csum disabled */
646 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
647 return;
648
649 /* if IP and error */
650 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
651 (status_err & IXGBE_RXDADV_ERR_IPE)) {
652 adapter->hw_csum_rx_error++;
653 return;
654 }
655
656 if (!(status_err & IXGBE_RXD_STAT_L4CS))
657 return;
658
659 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
660 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
661
662 /*
663 * 82599 errata, UDP frames with a 0 checksum can be marked as
664 * checksum errors.
665 */
666 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
667 (adapter->hw.mac.type == ixgbe_mac_82599EB))
668 return;
669
670 adapter->hw_csum_rx_error++;
671 return;
672 }
673
674 /* It must be a TCP or UDP packet with a valid checksum */
675 skb->ip_summed = CHECKSUM_UNNECESSARY;
676 }
677
678 static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
679 struct ixgbe_ring *rx_ring, u32 val)
680 {
681 /*
682 * Force memory writes to complete before letting h/w
683 * know there are new descriptors to fetch. (Only
684 * applicable for weak-ordered memory model archs,
685 * such as IA-64).
686 */
687 wmb();
688 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
689 }
690
691 /**
692 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
693 * @adapter: address of board private structure
694 **/
695 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
696 struct ixgbe_ring *rx_ring,
697 int cleaned_count)
698 {
699 struct pci_dev *pdev = adapter->pdev;
700 union ixgbe_adv_rx_desc *rx_desc;
701 struct ixgbe_rx_buffer *bi;
702 unsigned int i;
703
704 i = rx_ring->next_to_use;
705 bi = &rx_ring->rx_buffer_info[i];
706
707 while (cleaned_count--) {
708 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
709
710 if (!bi->page_dma &&
711 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
712 if (!bi->page) {
713 bi->page = alloc_page(GFP_ATOMIC);
714 if (!bi->page) {
715 adapter->alloc_rx_page_failed++;
716 goto no_buffers;
717 }
718 bi->page_offset = 0;
719 } else {
720 /* use a half page if we're re-using */
721 bi->page_offset ^= (PAGE_SIZE / 2);
722 }
723
724 bi->page_dma = pci_map_page(pdev, bi->page,
725 bi->page_offset,
726 (PAGE_SIZE / 2),
727 PCI_DMA_FROMDEVICE);
728 }
729
730 if (!bi->skb) {
731 struct sk_buff *skb;
732 /* netdev_alloc_skb reserves 32 bytes up front!! */
733 uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES;
734 skb = netdev_alloc_skb(adapter->netdev, bufsz);
735
736 if (!skb) {
737 adapter->alloc_rx_buff_failed++;
738 goto no_buffers;
739 }
740
741 /* advance the data pointer to the next cache line */
742 skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES)
743 - skb->data));
744
745 bi->skb = skb;
746 bi->dma = pci_map_single(pdev, skb->data,
747 rx_ring->rx_buf_len,
748 PCI_DMA_FROMDEVICE);
749 }
750 /* Refresh the desc even if buffer_addrs didn't change because
751 * each write-back erases this info. */
752 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
753 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
754 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
755 } else {
756 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
757 }
758
759 i++;
760 if (i == rx_ring->count)
761 i = 0;
762 bi = &rx_ring->rx_buffer_info[i];
763 }
764
765 no_buffers:
766 if (rx_ring->next_to_use != i) {
767 rx_ring->next_to_use = i;
768 if (i-- == 0)
769 i = (rx_ring->count - 1);
770
771 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
772 }
773 }
774
775 static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
776 {
777 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
778 }
779
780 static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
781 {
782 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
783 }
784
785 static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
786 {
787 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
788 IXGBE_RXDADV_RSCCNT_MASK) >>
789 IXGBE_RXDADV_RSCCNT_SHIFT;
790 }
791
792 /**
793 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
794 * @skb: pointer to the last skb in the rsc queue
795 * @count: pointer to number of packets coalesced in this context
796 *
797 * This function changes a queue full of hw rsc buffers into a completed
798 * packet. It uses the ->prev pointers to find the first packet and then
799 * turns it into the frag list owner.
800 **/
801 static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
802 u64 *count)
803 {
804 unsigned int frag_list_size = 0;
805
806 while (skb->prev) {
807 struct sk_buff *prev = skb->prev;
808 frag_list_size += skb->len;
809 skb->prev = NULL;
810 skb = prev;
811 *count += 1;
812 }
813
814 skb_shinfo(skb)->frag_list = skb->next;
815 skb->next = NULL;
816 skb->len += frag_list_size;
817 skb->data_len += frag_list_size;
818 skb->truesize += frag_list_size;
819 return skb;
820 }
821
822 struct ixgbe_rsc_cb {
823 dma_addr_t dma;
824 };
825
826 #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
827
828 static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
829 struct ixgbe_ring *rx_ring,
830 int *work_done, int work_to_do)
831 {
832 struct ixgbe_adapter *adapter = q_vector->adapter;
833 struct net_device *netdev = adapter->netdev;
834 struct pci_dev *pdev = adapter->pdev;
835 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
836 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
837 struct sk_buff *skb;
838 unsigned int i, rsc_count = 0;
839 u32 len, staterr;
840 u16 hdr_info;
841 bool cleaned = false;
842 int cleaned_count = 0;
843 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
844 #ifdef IXGBE_FCOE
845 int ddp_bytes = 0;
846 #endif /* IXGBE_FCOE */
847
848 i = rx_ring->next_to_clean;
849 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
850 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
851 rx_buffer_info = &rx_ring->rx_buffer_info[i];
852
853 while (staterr & IXGBE_RXD_STAT_DD) {
854 u32 upper_len = 0;
855 if (*work_done >= work_to_do)
856 break;
857 (*work_done)++;
858
859 rmb(); /* read descriptor and rx_buffer_info after status DD */
860 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
861 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
862 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
863 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
864 if (len > IXGBE_RX_HDR_SIZE)
865 len = IXGBE_RX_HDR_SIZE;
866 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
867 } else {
868 len = le16_to_cpu(rx_desc->wb.upper.length);
869 }
870
871 cleaned = true;
872 skb = rx_buffer_info->skb;
873 prefetch(skb->data);
874 rx_buffer_info->skb = NULL;
875
876 if (rx_buffer_info->dma) {
877 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
878 (!(staterr & IXGBE_RXD_STAT_EOP)) &&
879 (!(skb->prev)))
880 /*
881 * When HWRSC is enabled, delay unmapping
882 * of the first packet. It carries the
883 * header information, HW may still
884 * access the header after the writeback.
885 * Only unmap it when EOP is reached
886 */
887 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
888 else
889 pci_unmap_single(pdev, rx_buffer_info->dma,
890 rx_ring->rx_buf_len,
891 PCI_DMA_FROMDEVICE);
892 rx_buffer_info->dma = 0;
893 skb_put(skb, len);
894 }
895
896 if (upper_len) {
897 pci_unmap_page(pdev, rx_buffer_info->page_dma,
898 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
899 rx_buffer_info->page_dma = 0;
900 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
901 rx_buffer_info->page,
902 rx_buffer_info->page_offset,
903 upper_len);
904
905 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
906 (page_count(rx_buffer_info->page) != 1))
907 rx_buffer_info->page = NULL;
908 else
909 get_page(rx_buffer_info->page);
910
911 skb->len += upper_len;
912 skb->data_len += upper_len;
913 skb->truesize += upper_len;
914 }
915
916 i++;
917 if (i == rx_ring->count)
918 i = 0;
919
920 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
921 prefetch(next_rxd);
922 cleaned_count++;
923
924 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
925 rsc_count = ixgbe_get_rsc_count(rx_desc);
926
927 if (rsc_count) {
928 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
929 IXGBE_RXDADV_NEXTP_SHIFT;
930 next_buffer = &rx_ring->rx_buffer_info[nextp];
931 } else {
932 next_buffer = &rx_ring->rx_buffer_info[i];
933 }
934
935 if (staterr & IXGBE_RXD_STAT_EOP) {
936 if (skb->prev)
937 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
938 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
939 if (IXGBE_RSC_CB(skb)->dma) {
940 pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
941 rx_ring->rx_buf_len,
942 PCI_DMA_FROMDEVICE);
943 IXGBE_RSC_CB(skb)->dma = 0;
944 }
945 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
946 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
947 else
948 rx_ring->rsc_count++;
949 rx_ring->rsc_flush++;
950 }
951 rx_ring->stats.packets++;
952 rx_ring->stats.bytes += skb->len;
953 } else {
954 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
955 rx_buffer_info->skb = next_buffer->skb;
956 rx_buffer_info->dma = next_buffer->dma;
957 next_buffer->skb = skb;
958 next_buffer->dma = 0;
959 } else {
960 skb->next = next_buffer->skb;
961 skb->next->prev = skb;
962 }
963 rx_ring->non_eop_descs++;
964 goto next_desc;
965 }
966
967 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
968 dev_kfree_skb_irq(skb);
969 goto next_desc;
970 }
971
972 ixgbe_rx_checksum(adapter, rx_desc, skb);
973
974 /* probably a little skewed due to removing CRC */
975 total_rx_bytes += skb->len;
976 total_rx_packets++;
977
978 skb->protocol = eth_type_trans(skb, adapter->netdev);
979 #ifdef IXGBE_FCOE
980 /* if ddp, not passing to ULD unless for FCP_RSP or error */
981 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
982 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
983 if (!ddp_bytes)
984 goto next_desc;
985 }
986 #endif /* IXGBE_FCOE */
987 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
988
989 next_desc:
990 rx_desc->wb.upper.status_error = 0;
991
992 /* return some buffers to hardware, one at a time is too slow */
993 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
994 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
995 cleaned_count = 0;
996 }
997
998 /* use prefetched values */
999 rx_desc = next_rxd;
1000 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1001
1002 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1003 }
1004
1005 rx_ring->next_to_clean = i;
1006 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1007
1008 if (cleaned_count)
1009 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1010
1011 #ifdef IXGBE_FCOE
1012 /* include DDPed FCoE data */
1013 if (ddp_bytes > 0) {
1014 unsigned int mss;
1015
1016 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
1017 sizeof(struct fc_frame_header) -
1018 sizeof(struct fcoe_crc_eof);
1019 if (mss > 512)
1020 mss &= ~511;
1021 total_rx_bytes += ddp_bytes;
1022 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1023 }
1024 #endif /* IXGBE_FCOE */
1025
1026 rx_ring->total_packets += total_rx_packets;
1027 rx_ring->total_bytes += total_rx_bytes;
1028 netdev->stats.rx_bytes += total_rx_bytes;
1029 netdev->stats.rx_packets += total_rx_packets;
1030
1031 return cleaned;
1032 }
1033
1034 static int ixgbe_clean_rxonly(struct napi_struct *, int);
1035 /**
1036 * ixgbe_configure_msix - Configure MSI-X hardware
1037 * @adapter: board private structure
1038 *
1039 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1040 * interrupts.
1041 **/
1042 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1043 {
1044 struct ixgbe_q_vector *q_vector;
1045 int i, j, q_vectors, v_idx, r_idx;
1046 u32 mask;
1047
1048 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1049
1050 /*
1051 * Populate the IVAR table and set the ITR values to the
1052 * corresponding register.
1053 */
1054 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1055 q_vector = adapter->q_vector[v_idx];
1056 /* XXX for_each_set_bit(...) */
1057 r_idx = find_first_bit(q_vector->rxr_idx,
1058 adapter->num_rx_queues);
1059
1060 for (i = 0; i < q_vector->rxr_count; i++) {
1061 j = adapter->rx_ring[r_idx]->reg_idx;
1062 ixgbe_set_ivar(adapter, 0, j, v_idx);
1063 r_idx = find_next_bit(q_vector->rxr_idx,
1064 adapter->num_rx_queues,
1065 r_idx + 1);
1066 }
1067 r_idx = find_first_bit(q_vector->txr_idx,
1068 adapter->num_tx_queues);
1069
1070 for (i = 0; i < q_vector->txr_count; i++) {
1071 j = adapter->tx_ring[r_idx]->reg_idx;
1072 ixgbe_set_ivar(adapter, 1, j, v_idx);
1073 r_idx = find_next_bit(q_vector->txr_idx,
1074 adapter->num_tx_queues,
1075 r_idx + 1);
1076 }
1077
1078 if (q_vector->txr_count && !q_vector->rxr_count)
1079 /* tx only */
1080 q_vector->eitr = adapter->tx_eitr_param;
1081 else if (q_vector->rxr_count)
1082 /* rx or mixed */
1083 q_vector->eitr = adapter->rx_eitr_param;
1084
1085 ixgbe_write_eitr(q_vector);
1086 }
1087
1088 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1089 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1090 v_idx);
1091 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1092 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1093 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1094
1095 /* set up to autoclear timer, and the vectors */
1096 mask = IXGBE_EIMS_ENABLE_MASK;
1097 if (adapter->num_vfs)
1098 mask &= ~(IXGBE_EIMS_OTHER |
1099 IXGBE_EIMS_MAILBOX |
1100 IXGBE_EIMS_LSC);
1101 else
1102 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1103 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
1104 }
1105
1106 enum latency_range {
1107 lowest_latency = 0,
1108 low_latency = 1,
1109 bulk_latency = 2,
1110 latency_invalid = 255
1111 };
1112
1113 /**
1114 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1115 * @adapter: pointer to adapter
1116 * @eitr: eitr setting (ints per sec) to give last timeslice
1117 * @itr_setting: current throttle rate in ints/second
1118 * @packets: the number of packets during this measurement interval
1119 * @bytes: the number of bytes during this measurement interval
1120 *
1121 * Stores a new ITR value based on packets and byte
1122 * counts during the last interrupt. The advantage of per interrupt
1123 * computation is faster updates and more accurate ITR for the current
1124 * traffic pattern. Constants in this function were computed
1125 * based on theoretical maximum wire speed and thresholds were set based
1126 * on testing data as well as attempting to minimize response time
1127 * while increasing bulk throughput.
1128 * this functionality is controlled by the InterruptThrottleRate module
1129 * parameter (see ixgbe_param.c)
1130 **/
1131 static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
1132 u32 eitr, u8 itr_setting,
1133 int packets, int bytes)
1134 {
1135 unsigned int retval = itr_setting;
1136 u32 timepassed_us;
1137 u64 bytes_perint;
1138
1139 if (packets == 0)
1140 goto update_itr_done;
1141
1142
1143 /* simple throttlerate management
1144 * 0-20MB/s lowest (100000 ints/s)
1145 * 20-100MB/s low (20000 ints/s)
1146 * 100-1249MB/s bulk (8000 ints/s)
1147 */
1148 /* what was last interrupt timeslice? */
1149 timepassed_us = 1000000/eitr;
1150 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1151
1152 switch (itr_setting) {
1153 case lowest_latency:
1154 if (bytes_perint > adapter->eitr_low)
1155 retval = low_latency;
1156 break;
1157 case low_latency:
1158 if (bytes_perint > adapter->eitr_high)
1159 retval = bulk_latency;
1160 else if (bytes_perint <= adapter->eitr_low)
1161 retval = lowest_latency;
1162 break;
1163 case bulk_latency:
1164 if (bytes_perint <= adapter->eitr_high)
1165 retval = low_latency;
1166 break;
1167 }
1168
1169 update_itr_done:
1170 return retval;
1171 }
1172
1173 /**
1174 * ixgbe_write_eitr - write EITR register in hardware specific way
1175 * @q_vector: structure containing interrupt and ring information
1176 *
1177 * This function is made to be called by ethtool and by the driver
1178 * when it needs to update EITR registers at runtime. Hardware
1179 * specific quirks/differences are taken care of here.
1180 */
1181 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1182 {
1183 struct ixgbe_adapter *adapter = q_vector->adapter;
1184 struct ixgbe_hw *hw = &adapter->hw;
1185 int v_idx = q_vector->v_idx;
1186 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1187
1188 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1189 /* must write high and low 16 bits to reset counter */
1190 itr_reg |= (itr_reg << 16);
1191 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1192 /*
1193 * 82599 can support a value of zero, so allow it for
1194 * max interrupt rate, but there is an errata where it can
1195 * not be zero with RSC
1196 */
1197 if (itr_reg == 8 &&
1198 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1199 itr_reg = 0;
1200
1201 /*
1202 * set the WDIS bit to not clear the timer bits and cause an
1203 * immediate assertion of the interrupt
1204 */
1205 itr_reg |= IXGBE_EITR_CNT_WDIS;
1206 }
1207 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1208 }
1209
1210 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1211 {
1212 struct ixgbe_adapter *adapter = q_vector->adapter;
1213 u32 new_itr;
1214 u8 current_itr, ret_itr;
1215 int i, r_idx;
1216 struct ixgbe_ring *rx_ring, *tx_ring;
1217
1218 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1219 for (i = 0; i < q_vector->txr_count; i++) {
1220 tx_ring = adapter->tx_ring[r_idx];
1221 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1222 q_vector->tx_itr,
1223 tx_ring->total_packets,
1224 tx_ring->total_bytes);
1225 /* if the result for this queue would decrease interrupt
1226 * rate for this vector then use that result */
1227 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
1228 q_vector->tx_itr - 1 : ret_itr);
1229 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1230 r_idx + 1);
1231 }
1232
1233 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1234 for (i = 0; i < q_vector->rxr_count; i++) {
1235 rx_ring = adapter->rx_ring[r_idx];
1236 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1237 q_vector->rx_itr,
1238 rx_ring->total_packets,
1239 rx_ring->total_bytes);
1240 /* if the result for this queue would decrease interrupt
1241 * rate for this vector then use that result */
1242 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
1243 q_vector->rx_itr - 1 : ret_itr);
1244 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1245 r_idx + 1);
1246 }
1247
1248 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
1249
1250 switch (current_itr) {
1251 /* counts and packets in update_itr are dependent on these numbers */
1252 case lowest_latency:
1253 new_itr = 100000;
1254 break;
1255 case low_latency:
1256 new_itr = 20000; /* aka hwitr = ~200 */
1257 break;
1258 case bulk_latency:
1259 default:
1260 new_itr = 8000;
1261 break;
1262 }
1263
1264 if (new_itr != q_vector->eitr) {
1265 /* do an exponential smoothing */
1266 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1267
1268 /* save the algorithm value here, not the smoothed one */
1269 q_vector->eitr = new_itr;
1270
1271 ixgbe_write_eitr(q_vector);
1272 }
1273
1274 return;
1275 }
1276
1277 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1278 {
1279 struct ixgbe_hw *hw = &adapter->hw;
1280
1281 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1282 (eicr & IXGBE_EICR_GPI_SDP1)) {
1283 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
1284 /* write to clear the interrupt */
1285 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1286 }
1287 }
1288
1289 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1290 {
1291 struct ixgbe_hw *hw = &adapter->hw;
1292
1293 if (eicr & IXGBE_EICR_GPI_SDP1) {
1294 /* Clear the interrupt */
1295 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1296 schedule_work(&adapter->multispeed_fiber_task);
1297 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1298 /* Clear the interrupt */
1299 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1300 schedule_work(&adapter->sfp_config_module_task);
1301 } else {
1302 /* Interrupt isn't for us... */
1303 return;
1304 }
1305 }
1306
1307 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1308 {
1309 struct ixgbe_hw *hw = &adapter->hw;
1310
1311 adapter->lsc_int++;
1312 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1313 adapter->link_check_timeout = jiffies;
1314 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1315 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1316 IXGBE_WRITE_FLUSH(hw);
1317 schedule_work(&adapter->watchdog_task);
1318 }
1319 }
1320
1321 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1322 {
1323 struct net_device *netdev = data;
1324 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1325 struct ixgbe_hw *hw = &adapter->hw;
1326 u32 eicr;
1327
1328 /*
1329 * Workaround for Silicon errata. Use clear-by-write instead
1330 * of clear-by-read. Reading with EICS will return the
1331 * interrupt causes without clearing, which later be done
1332 * with the write to EICR.
1333 */
1334 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1335 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
1336
1337 if (eicr & IXGBE_EICR_LSC)
1338 ixgbe_check_lsc(adapter);
1339
1340 if (eicr & IXGBE_EICR_MAILBOX)
1341 ixgbe_msg_task(adapter);
1342
1343 if (hw->mac.type == ixgbe_mac_82598EB)
1344 ixgbe_check_fan_failure(adapter, eicr);
1345
1346 if (hw->mac.type == ixgbe_mac_82599EB) {
1347 ixgbe_check_sfp_event(adapter, eicr);
1348
1349 /* Handle Flow Director Full threshold interrupt */
1350 if (eicr & IXGBE_EICR_FLOW_DIR) {
1351 int i;
1352 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1353 /* Disable transmits before FDIR Re-initialization */
1354 netif_tx_stop_all_queues(netdev);
1355 for (i = 0; i < adapter->num_tx_queues; i++) {
1356 struct ixgbe_ring *tx_ring =
1357 adapter->tx_ring[i];
1358 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1359 &tx_ring->reinit_state))
1360 schedule_work(&adapter->fdir_reinit_task);
1361 }
1362 }
1363 }
1364 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1365 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1366
1367 return IRQ_HANDLED;
1368 }
1369
1370 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1371 u64 qmask)
1372 {
1373 u32 mask;
1374
1375 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1376 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1377 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1378 } else {
1379 mask = (qmask & 0xFFFFFFFF);
1380 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1381 mask = (qmask >> 32);
1382 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1383 }
1384 /* skip the flush */
1385 }
1386
1387 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1388 u64 qmask)
1389 {
1390 u32 mask;
1391
1392 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1393 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1394 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1395 } else {
1396 mask = (qmask & 0xFFFFFFFF);
1397 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1398 mask = (qmask >> 32);
1399 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1400 }
1401 /* skip the flush */
1402 }
1403
1404 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1405 {
1406 struct ixgbe_q_vector *q_vector = data;
1407 struct ixgbe_adapter *adapter = q_vector->adapter;
1408 struct ixgbe_ring *tx_ring;
1409 int i, r_idx;
1410
1411 if (!q_vector->txr_count)
1412 return IRQ_HANDLED;
1413
1414 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1415 for (i = 0; i < q_vector->txr_count; i++) {
1416 tx_ring = adapter->tx_ring[r_idx];
1417 tx_ring->total_bytes = 0;
1418 tx_ring->total_packets = 0;
1419 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1420 r_idx + 1);
1421 }
1422
1423 /* EIAM disabled interrupts (on this vector) for us */
1424 napi_schedule(&q_vector->napi);
1425
1426 return IRQ_HANDLED;
1427 }
1428
1429 /**
1430 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1431 * @irq: unused
1432 * @data: pointer to our q_vector struct for this interrupt vector
1433 **/
1434 static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1435 {
1436 struct ixgbe_q_vector *q_vector = data;
1437 struct ixgbe_adapter *adapter = q_vector->adapter;
1438 struct ixgbe_ring *rx_ring;
1439 int r_idx;
1440 int i;
1441
1442 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1443 for (i = 0; i < q_vector->rxr_count; i++) {
1444 rx_ring = adapter->rx_ring[r_idx];
1445 rx_ring->total_bytes = 0;
1446 rx_ring->total_packets = 0;
1447 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1448 r_idx + 1);
1449 }
1450
1451 if (!q_vector->rxr_count)
1452 return IRQ_HANDLED;
1453
1454 /* disable interrupts on this vector only */
1455 /* EIAM disabled interrupts (on this vector) for us */
1456 napi_schedule(&q_vector->napi);
1457
1458 return IRQ_HANDLED;
1459 }
1460
1461 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1462 {
1463 struct ixgbe_q_vector *q_vector = data;
1464 struct ixgbe_adapter *adapter = q_vector->adapter;
1465 struct ixgbe_ring *ring;
1466 int r_idx;
1467 int i;
1468
1469 if (!q_vector->txr_count && !q_vector->rxr_count)
1470 return IRQ_HANDLED;
1471
1472 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1473 for (i = 0; i < q_vector->txr_count; i++) {
1474 ring = adapter->tx_ring[r_idx];
1475 ring->total_bytes = 0;
1476 ring->total_packets = 0;
1477 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1478 r_idx + 1);
1479 }
1480
1481 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1482 for (i = 0; i < q_vector->rxr_count; i++) {
1483 ring = adapter->rx_ring[r_idx];
1484 ring->total_bytes = 0;
1485 ring->total_packets = 0;
1486 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1487 r_idx + 1);
1488 }
1489
1490 /* EIAM disabled interrupts (on this vector) for us */
1491 napi_schedule(&q_vector->napi);
1492
1493 return IRQ_HANDLED;
1494 }
1495
1496 /**
1497 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1498 * @napi: napi struct with our devices info in it
1499 * @budget: amount of work driver is allowed to do this pass, in packets
1500 *
1501 * This function is optimized for cleaning one queue only on a single
1502 * q_vector!!!
1503 **/
1504 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1505 {
1506 struct ixgbe_q_vector *q_vector =
1507 container_of(napi, struct ixgbe_q_vector, napi);
1508 struct ixgbe_adapter *adapter = q_vector->adapter;
1509 struct ixgbe_ring *rx_ring = NULL;
1510 int work_done = 0;
1511 long r_idx;
1512
1513 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1514 rx_ring = adapter->rx_ring[r_idx];
1515 #ifdef CONFIG_IXGBE_DCA
1516 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1517 ixgbe_update_rx_dca(adapter, rx_ring);
1518 #endif
1519
1520 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1521
1522 /* If all Rx work done, exit the polling mode */
1523 if (work_done < budget) {
1524 napi_complete(napi);
1525 if (adapter->rx_itr_setting & 1)
1526 ixgbe_set_itr_msix(q_vector);
1527 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1528 ixgbe_irq_enable_queues(adapter,
1529 ((u64)1 << q_vector->v_idx));
1530 }
1531
1532 return work_done;
1533 }
1534
1535 /**
1536 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
1537 * @napi: napi struct with our devices info in it
1538 * @budget: amount of work driver is allowed to do this pass, in packets
1539 *
1540 * This function will clean more than one rx queue associated with a
1541 * q_vector.
1542 **/
1543 static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1544 {
1545 struct ixgbe_q_vector *q_vector =
1546 container_of(napi, struct ixgbe_q_vector, napi);
1547 struct ixgbe_adapter *adapter = q_vector->adapter;
1548 struct ixgbe_ring *ring = NULL;
1549 int work_done = 0, i;
1550 long r_idx;
1551 bool tx_clean_complete = true;
1552
1553 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1554 for (i = 0; i < q_vector->txr_count; i++) {
1555 ring = adapter->tx_ring[r_idx];
1556 #ifdef CONFIG_IXGBE_DCA
1557 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1558 ixgbe_update_tx_dca(adapter, ring);
1559 #endif
1560 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1561 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1562 r_idx + 1);
1563 }
1564
1565 /* attempt to distribute budget to each queue fairly, but don't allow
1566 * the budget to go below 1 because we'll exit polling */
1567 budget /= (q_vector->rxr_count ?: 1);
1568 budget = max(budget, 1);
1569 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1570 for (i = 0; i < q_vector->rxr_count; i++) {
1571 ring = adapter->rx_ring[r_idx];
1572 #ifdef CONFIG_IXGBE_DCA
1573 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1574 ixgbe_update_rx_dca(adapter, ring);
1575 #endif
1576 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1577 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1578 r_idx + 1);
1579 }
1580
1581 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1582 ring = adapter->rx_ring[r_idx];
1583 /* If all Rx work done, exit the polling mode */
1584 if (work_done < budget) {
1585 napi_complete(napi);
1586 if (adapter->rx_itr_setting & 1)
1587 ixgbe_set_itr_msix(q_vector);
1588 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1589 ixgbe_irq_enable_queues(adapter,
1590 ((u64)1 << q_vector->v_idx));
1591 return 0;
1592 }
1593
1594 return work_done;
1595 }
1596
1597 /**
1598 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
1599 * @napi: napi struct with our devices info in it
1600 * @budget: amount of work driver is allowed to do this pass, in packets
1601 *
1602 * This function is optimized for cleaning one queue only on a single
1603 * q_vector!!!
1604 **/
1605 static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1606 {
1607 struct ixgbe_q_vector *q_vector =
1608 container_of(napi, struct ixgbe_q_vector, napi);
1609 struct ixgbe_adapter *adapter = q_vector->adapter;
1610 struct ixgbe_ring *tx_ring = NULL;
1611 int work_done = 0;
1612 long r_idx;
1613
1614 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1615 tx_ring = adapter->tx_ring[r_idx];
1616 #ifdef CONFIG_IXGBE_DCA
1617 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1618 ixgbe_update_tx_dca(adapter, tx_ring);
1619 #endif
1620
1621 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
1622 work_done = budget;
1623
1624 /* If all Tx work done, exit the polling mode */
1625 if (work_done < budget) {
1626 napi_complete(napi);
1627 if (adapter->tx_itr_setting & 1)
1628 ixgbe_set_itr_msix(q_vector);
1629 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1630 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
1631 }
1632
1633 return work_done;
1634 }
1635
1636 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1637 int r_idx)
1638 {
1639 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1640
1641 set_bit(r_idx, q_vector->rxr_idx);
1642 q_vector->rxr_count++;
1643 }
1644
1645 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1646 int t_idx)
1647 {
1648 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1649
1650 set_bit(t_idx, q_vector->txr_idx);
1651 q_vector->txr_count++;
1652 }
1653
1654 /**
1655 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1656 * @adapter: board private structure to initialize
1657 * @vectors: allotted vector count for descriptor rings
1658 *
1659 * This function maps descriptor rings to the queue-specific vectors
1660 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1661 * one vector per ring/queue, but on a constrained vector budget, we
1662 * group the rings as "efficiently" as possible. You would add new
1663 * mapping configurations in here.
1664 **/
1665 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
1666 int vectors)
1667 {
1668 int v_start = 0;
1669 int rxr_idx = 0, txr_idx = 0;
1670 int rxr_remaining = adapter->num_rx_queues;
1671 int txr_remaining = adapter->num_tx_queues;
1672 int i, j;
1673 int rqpv, tqpv;
1674 int err = 0;
1675
1676 /* No mapping required if MSI-X is disabled. */
1677 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1678 goto out;
1679
1680 /*
1681 * The ideal configuration...
1682 * We have enough vectors to map one per queue.
1683 */
1684 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1685 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1686 map_vector_to_rxq(adapter, v_start, rxr_idx);
1687
1688 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1689 map_vector_to_txq(adapter, v_start, txr_idx);
1690
1691 goto out;
1692 }
1693
1694 /*
1695 * If we don't have enough vectors for a 1-to-1
1696 * mapping, we'll have to group them so there are
1697 * multiple queues per vector.
1698 */
1699 /* Re-adjusting *qpv takes care of the remainder. */
1700 for (i = v_start; i < vectors; i++) {
1701 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1702 for (j = 0; j < rqpv; j++) {
1703 map_vector_to_rxq(adapter, i, rxr_idx);
1704 rxr_idx++;
1705 rxr_remaining--;
1706 }
1707 }
1708 for (i = v_start; i < vectors; i++) {
1709 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1710 for (j = 0; j < tqpv; j++) {
1711 map_vector_to_txq(adapter, i, txr_idx);
1712 txr_idx++;
1713 txr_remaining--;
1714 }
1715 }
1716
1717 out:
1718 return err;
1719 }
1720
1721 /**
1722 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1723 * @adapter: board private structure
1724 *
1725 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1726 * interrupts from the kernel.
1727 **/
1728 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1729 {
1730 struct net_device *netdev = adapter->netdev;
1731 irqreturn_t (*handler)(int, void *);
1732 int i, vector, q_vectors, err;
1733 int ri=0, ti=0;
1734
1735 /* Decrement for Other and TCP Timer vectors */
1736 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1737
1738 /* Map the Tx/Rx rings to the vectors we were allotted. */
1739 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1740 if (err)
1741 goto out;
1742
1743 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1744 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1745 &ixgbe_msix_clean_many)
1746 for (vector = 0; vector < q_vectors; vector++) {
1747 handler = SET_HANDLER(adapter->q_vector[vector]);
1748
1749 if(handler == &ixgbe_msix_clean_rx) {
1750 sprintf(adapter->name[vector], "%s-%s-%d",
1751 netdev->name, "rx", ri++);
1752 }
1753 else if(handler == &ixgbe_msix_clean_tx) {
1754 sprintf(adapter->name[vector], "%s-%s-%d",
1755 netdev->name, "tx", ti++);
1756 }
1757 else
1758 sprintf(adapter->name[vector], "%s-%s-%d",
1759 netdev->name, "TxRx", vector);
1760
1761 err = request_irq(adapter->msix_entries[vector].vector,
1762 handler, 0, adapter->name[vector],
1763 adapter->q_vector[vector]);
1764 if (err) {
1765 DPRINTK(PROBE, ERR,
1766 "request_irq failed for MSIX interrupt "
1767 "Error: %d\n", err);
1768 goto free_queue_irqs;
1769 }
1770 }
1771
1772 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1773 err = request_irq(adapter->msix_entries[vector].vector,
1774 ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1775 if (err) {
1776 DPRINTK(PROBE, ERR,
1777 "request_irq for msix_lsc failed: %d\n", err);
1778 goto free_queue_irqs;
1779 }
1780
1781 return 0;
1782
1783 free_queue_irqs:
1784 for (i = vector - 1; i >= 0; i--)
1785 free_irq(adapter->msix_entries[--vector].vector,
1786 adapter->q_vector[i]);
1787 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1788 pci_disable_msix(adapter->pdev);
1789 kfree(adapter->msix_entries);
1790 adapter->msix_entries = NULL;
1791 out:
1792 return err;
1793 }
1794
1795 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1796 {
1797 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
1798 u8 current_itr;
1799 u32 new_itr = q_vector->eitr;
1800 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1801 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1802
1803 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
1804 q_vector->tx_itr,
1805 tx_ring->total_packets,
1806 tx_ring->total_bytes);
1807 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
1808 q_vector->rx_itr,
1809 rx_ring->total_packets,
1810 rx_ring->total_bytes);
1811
1812 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
1813
1814 switch (current_itr) {
1815 /* counts and packets in update_itr are dependent on these numbers */
1816 case lowest_latency:
1817 new_itr = 100000;
1818 break;
1819 case low_latency:
1820 new_itr = 20000; /* aka hwitr = ~200 */
1821 break;
1822 case bulk_latency:
1823 new_itr = 8000;
1824 break;
1825 default:
1826 break;
1827 }
1828
1829 if (new_itr != q_vector->eitr) {
1830 /* do an exponential smoothing */
1831 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1832
1833 /* save the algorithm value here, not the smoothed one */
1834 q_vector->eitr = new_itr;
1835
1836 ixgbe_write_eitr(q_vector);
1837 }
1838
1839 return;
1840 }
1841
1842 /**
1843 * ixgbe_irq_enable - Enable default interrupt generation settings
1844 * @adapter: board private structure
1845 **/
1846 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1847 {
1848 u32 mask;
1849
1850 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1851 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1852 mask |= IXGBE_EIMS_GPI_SDP1;
1853 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1854 mask |= IXGBE_EIMS_ECC;
1855 mask |= IXGBE_EIMS_GPI_SDP1;
1856 mask |= IXGBE_EIMS_GPI_SDP2;
1857 if (adapter->num_vfs)
1858 mask |= IXGBE_EIMS_MAILBOX;
1859 }
1860 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
1861 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
1862 mask |= IXGBE_EIMS_FLOW_DIR;
1863
1864 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1865 ixgbe_irq_enable_queues(adapter, ~0);
1866 IXGBE_WRITE_FLUSH(&adapter->hw);
1867
1868 if (adapter->num_vfs > 32) {
1869 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
1870 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
1871 }
1872 }
1873
1874 /**
1875 * ixgbe_intr - legacy mode Interrupt Handler
1876 * @irq: interrupt number
1877 * @data: pointer to a network interface device structure
1878 **/
1879 static irqreturn_t ixgbe_intr(int irq, void *data)
1880 {
1881 struct net_device *netdev = data;
1882 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1883 struct ixgbe_hw *hw = &adapter->hw;
1884 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
1885 u32 eicr;
1886
1887 /*
1888 * Workaround for silicon errata. Mask the interrupts
1889 * before the read of EICR.
1890 */
1891 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1892
1893 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1894 * therefore no explict interrupt disable is necessary */
1895 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1896 if (!eicr) {
1897 /* shared interrupt alert!
1898 * make sure interrupts are enabled because the read will
1899 * have disabled interrupts due to EIAM */
1900 ixgbe_irq_enable(adapter);
1901 return IRQ_NONE; /* Not our interrupt */
1902 }
1903
1904 if (eicr & IXGBE_EICR_LSC)
1905 ixgbe_check_lsc(adapter);
1906
1907 if (hw->mac.type == ixgbe_mac_82599EB)
1908 ixgbe_check_sfp_event(adapter, eicr);
1909
1910 ixgbe_check_fan_failure(adapter, eicr);
1911
1912 if (napi_schedule_prep(&(q_vector->napi))) {
1913 adapter->tx_ring[0]->total_packets = 0;
1914 adapter->tx_ring[0]->total_bytes = 0;
1915 adapter->rx_ring[0]->total_packets = 0;
1916 adapter->rx_ring[0]->total_bytes = 0;
1917 /* would disable interrupts here but EIAM disabled it */
1918 __napi_schedule(&(q_vector->napi));
1919 }
1920
1921 return IRQ_HANDLED;
1922 }
1923
1924 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1925 {
1926 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1927
1928 for (i = 0; i < q_vectors; i++) {
1929 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
1930 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1931 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1932 q_vector->rxr_count = 0;
1933 q_vector->txr_count = 0;
1934 }
1935 }
1936
1937 /**
1938 * ixgbe_request_irq - initialize interrupts
1939 * @adapter: board private structure
1940 *
1941 * Attempts to configure interrupts using the best available
1942 * capabilities of the hardware and kernel.
1943 **/
1944 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
1945 {
1946 struct net_device *netdev = adapter->netdev;
1947 int err;
1948
1949 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1950 err = ixgbe_request_msix_irqs(adapter);
1951 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1952 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
1953 netdev->name, netdev);
1954 } else {
1955 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
1956 netdev->name, netdev);
1957 }
1958
1959 if (err)
1960 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1961
1962 return err;
1963 }
1964
1965 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1966 {
1967 struct net_device *netdev = adapter->netdev;
1968
1969 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1970 int i, q_vectors;
1971
1972 q_vectors = adapter->num_msix_vectors;
1973
1974 i = q_vectors - 1;
1975 free_irq(adapter->msix_entries[i].vector, netdev);
1976
1977 i--;
1978 for (; i >= 0; i--) {
1979 free_irq(adapter->msix_entries[i].vector,
1980 adapter->q_vector[i]);
1981 }
1982
1983 ixgbe_reset_q_vectors(adapter);
1984 } else {
1985 free_irq(adapter->pdev->irq, netdev);
1986 }
1987 }
1988
1989 /**
1990 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1991 * @adapter: board private structure
1992 **/
1993 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1994 {
1995 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1996 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1997 } else {
1998 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1999 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2000 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2001 if (adapter->num_vfs > 32)
2002 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
2003 }
2004 IXGBE_WRITE_FLUSH(&adapter->hw);
2005 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2006 int i;
2007 for (i = 0; i < adapter->num_msix_vectors; i++)
2008 synchronize_irq(adapter->msix_entries[i].vector);
2009 } else {
2010 synchronize_irq(adapter->pdev->irq);
2011 }
2012 }
2013
2014 /**
2015 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2016 *
2017 **/
2018 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2019 {
2020 struct ixgbe_hw *hw = &adapter->hw;
2021
2022 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
2023 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
2024
2025 ixgbe_set_ivar(adapter, 0, 0, 0);
2026 ixgbe_set_ivar(adapter, 1, 0, 0);
2027
2028 map_vector_to_rxq(adapter, 0, 0);
2029 map_vector_to_txq(adapter, 0, 0);
2030
2031 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
2032 }
2033
2034 /**
2035 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
2036 * @adapter: board private structure
2037 *
2038 * Configure the Tx unit of the MAC after a reset.
2039 **/
2040 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2041 {
2042 u64 tdba;
2043 struct ixgbe_hw *hw = &adapter->hw;
2044 u32 i, j, tdlen, txctrl;
2045
2046 /* Setup the HW Tx Head and Tail descriptor pointers */
2047 for (i = 0; i < adapter->num_tx_queues; i++) {
2048 struct ixgbe_ring *ring = adapter->tx_ring[i];
2049 j = ring->reg_idx;
2050 tdba = ring->dma;
2051 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
2052 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
2053 (tdba & DMA_BIT_MASK(32)));
2054 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
2055 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
2056 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
2057 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
2058 adapter->tx_ring[i]->head = IXGBE_TDH(j);
2059 adapter->tx_ring[i]->tail = IXGBE_TDT(j);
2060 /*
2061 * Disable Tx Head Writeback RO bit, since this hoses
2062 * bookkeeping if things aren't delivered in order.
2063 */
2064 switch (hw->mac.type) {
2065 case ixgbe_mac_82598EB:
2066 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
2067 break;
2068 case ixgbe_mac_82599EB:
2069 default:
2070 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
2071 break;
2072 }
2073 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2074 switch (hw->mac.type) {
2075 case ixgbe_mac_82598EB:
2076 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
2077 break;
2078 case ixgbe_mac_82599EB:
2079 default:
2080 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
2081 break;
2082 }
2083 }
2084
2085 if (hw->mac.type == ixgbe_mac_82599EB) {
2086 u32 rttdcs;
2087 u32 mask;
2088
2089 /* disable the arbiter while setting MTQC */
2090 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2091 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2092 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2093
2094 /* set transmit pool layout */
2095 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2096 switch (adapter->flags & mask) {
2097
2098 case (IXGBE_FLAG_SRIOV_ENABLED):
2099 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2100 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2101 break;
2102
2103 case (IXGBE_FLAG_DCB_ENABLED):
2104 /* We enable 8 traffic classes, DCB only */
2105 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2106 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2107 break;
2108
2109 default:
2110 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2111 break;
2112 }
2113
2114 /* re-eable the arbiter */
2115 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2116 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2117 }
2118 }
2119
2120 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2121
2122 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2123 struct ixgbe_ring *rx_ring)
2124 {
2125 u32 srrctl;
2126 int index;
2127 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2128
2129 index = rx_ring->reg_idx;
2130 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2131 unsigned long mask;
2132 mask = (unsigned long) feature[RING_F_RSS].mask;
2133 index = index & mask;
2134 }
2135 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
2136
2137 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2138 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2139
2140 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2141 IXGBE_SRRCTL_BSIZEHDR_MASK;
2142
2143 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2144 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2145 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2146 #else
2147 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2148 #endif
2149 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2150 } else {
2151 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
2152 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2153 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2154 }
2155
2156 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
2157 }
2158
2159 static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2160 {
2161 u32 mrqc = 0;
2162 int mask;
2163
2164 if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
2165 return mrqc;
2166
2167 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
2168 #ifdef CONFIG_IXGBE_DCB
2169 | IXGBE_FLAG_DCB_ENABLED
2170 #endif
2171 | IXGBE_FLAG_SRIOV_ENABLED
2172 );
2173
2174 switch (mask) {
2175 case (IXGBE_FLAG_RSS_ENABLED):
2176 mrqc = IXGBE_MRQC_RSSEN;
2177 break;
2178 case (IXGBE_FLAG_SRIOV_ENABLED):
2179 mrqc = IXGBE_MRQC_VMDQEN;
2180 break;
2181 #ifdef CONFIG_IXGBE_DCB
2182 case (IXGBE_FLAG_DCB_ENABLED):
2183 mrqc = IXGBE_MRQC_RT8TCEN;
2184 break;
2185 #endif /* CONFIG_IXGBE_DCB */
2186 default:
2187 break;
2188 }
2189
2190 return mrqc;
2191 }
2192
2193 /**
2194 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2195 * @adapter: address of board private structure
2196 * @index: index of ring to set
2197 **/
2198 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
2199 {
2200 struct ixgbe_ring *rx_ring;
2201 struct ixgbe_hw *hw = &adapter->hw;
2202 int j;
2203 u32 rscctrl;
2204 int rx_buf_len;
2205
2206 rx_ring = adapter->rx_ring[index];
2207 j = rx_ring->reg_idx;
2208 rx_buf_len = rx_ring->rx_buf_len;
2209 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
2210 rscctrl |= IXGBE_RSCCTL_RSCEN;
2211 /*
2212 * we must limit the number of descriptors so that the
2213 * total size of max desc * buf_len is not greater
2214 * than 65535
2215 */
2216 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2217 #if (MAX_SKB_FRAGS > 16)
2218 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2219 #elif (MAX_SKB_FRAGS > 8)
2220 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2221 #elif (MAX_SKB_FRAGS > 4)
2222 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2223 #else
2224 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2225 #endif
2226 } else {
2227 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2228 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2229 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2230 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2231 else
2232 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2233 }
2234 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
2235 }
2236
2237 /**
2238 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
2239 * @adapter: board private structure
2240 *
2241 * Configure the Rx unit of the MAC after a reset.
2242 **/
2243 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2244 {
2245 u64 rdba;
2246 struct ixgbe_hw *hw = &adapter->hw;
2247 struct ixgbe_ring *rx_ring;
2248 struct net_device *netdev = adapter->netdev;
2249 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2250 int i, j;
2251 u32 rdlen, rxctrl, rxcsum;
2252 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2253 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2254 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2255 u32 fctrl, hlreg0;
2256 u32 reta = 0, mrqc = 0;
2257 u32 rdrxctl;
2258 int rx_buf_len;
2259
2260 /* Decide whether to use packet split mode or not */
2261 /* Do not use packet split if we're in SR-IOV Mode */
2262 if (!adapter->num_vfs)
2263 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
2264
2265 /* Set the RX buffer length according to the mode */
2266 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
2267 rx_buf_len = IXGBE_RX_HDR_SIZE;
2268 if (hw->mac.type == ixgbe_mac_82599EB) {
2269 /* PSRTYPE must be initialized in 82599 */
2270 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2271 IXGBE_PSRTYPE_UDPHDR |
2272 IXGBE_PSRTYPE_IPV4HDR |
2273 IXGBE_PSRTYPE_IPV6HDR |
2274 IXGBE_PSRTYPE_L2HDR;
2275 IXGBE_WRITE_REG(hw,
2276 IXGBE_PSRTYPE(adapter->num_vfs),
2277 psrtype);
2278 }
2279 } else {
2280 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
2281 (netdev->mtu <= ETH_DATA_LEN))
2282 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
2283 else
2284 rx_buf_len = ALIGN(max_frame, 1024);
2285 }
2286
2287 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2288 fctrl |= IXGBE_FCTRL_BAM;
2289 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
2290 fctrl |= IXGBE_FCTRL_PMCF;
2291 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2292
2293 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2294 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2295 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
2296 else
2297 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2298 #ifdef IXGBE_FCOE
2299 if (netdev->features & NETIF_F_FCOE_MTU)
2300 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2301 #endif
2302 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2303
2304 rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
2305 /* disable receives while setting up the descriptors */
2306 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2307 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2308
2309 /*
2310 * Setup the HW Rx Head and Tail Descriptor Pointers and
2311 * the Base and Length of the Rx Descriptor Ring
2312 */
2313 for (i = 0; i < adapter->num_rx_queues; i++) {
2314 rx_ring = adapter->rx_ring[i];
2315 rdba = rx_ring->dma;
2316 j = rx_ring->reg_idx;
2317 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
2318 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
2319 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
2320 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
2321 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
2322 rx_ring->head = IXGBE_RDH(j);
2323 rx_ring->tail = IXGBE_RDT(j);
2324 rx_ring->rx_buf_len = rx_buf_len;
2325
2326 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2327 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
2328 else
2329 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2330
2331 #ifdef IXGBE_FCOE
2332 if (netdev->features & NETIF_F_FCOE_MTU) {
2333 struct ixgbe_ring_feature *f;
2334 f = &adapter->ring_feature[RING_F_FCOE];
2335 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2336 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2337 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2338 rx_ring->rx_buf_len =
2339 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2340 }
2341 }
2342
2343 #endif /* IXGBE_FCOE */
2344 ixgbe_configure_srrctl(adapter, rx_ring);
2345 }
2346
2347 if (hw->mac.type == ixgbe_mac_82598EB) {
2348 /*
2349 * For VMDq support of different descriptor types or
2350 * buffer sizes through the use of multiple SRRCTL
2351 * registers, RDRXCTL.MVMEN must be set to 1
2352 *
2353 * also, the manual doesn't mention it clearly but DCA hints
2354 * will only use queue 0's tags unless this bit is set. Side
2355 * effects of setting this bit are only that SRRCTL must be
2356 * fully programmed [0..15]
2357 */
2358 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2359 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2360 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2361 }
2362
2363 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2364 u32 vt_reg_bits;
2365 u32 reg_offset, vf_shift;
2366 u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2367 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
2368 | IXGBE_VT_CTL_REPLEN;
2369 vt_reg_bits |= (adapter->num_vfs <<
2370 IXGBE_VT_CTL_POOL_SHIFT);
2371 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2372 IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
2373
2374 vf_shift = adapter->num_vfs % 32;
2375 reg_offset = adapter->num_vfs / 32;
2376 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
2377 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
2378 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
2379 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
2380 /* Enable only the PF's pool for Tx/Rx */
2381 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2382 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2383 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2384 ixgbe_set_vmolr(hw, adapter->num_vfs);
2385 }
2386
2387 /* Program MRQC for the distribution of queues */
2388 mrqc = ixgbe_setup_mrqc(adapter);
2389
2390 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2391 /* Fill out redirection table */
2392 for (i = 0, j = 0; i < 128; i++, j++) {
2393 if (j == adapter->ring_feature[RING_F_RSS].indices)
2394 j = 0;
2395 /* reta = 4-byte sliding window of
2396 * 0x00..(indices-1)(indices-1)00..etc. */
2397 reta = (reta << 8) | (j * 0x11);
2398 if ((i & 3) == 3)
2399 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2400 }
2401
2402 /* Fill out hash function seeds */
2403 for (i = 0; i < 10; i++)
2404 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2405
2406 if (hw->mac.type == ixgbe_mac_82598EB)
2407 mrqc |= IXGBE_MRQC_RSSEN;
2408 /* Perform hash on these packet types */
2409 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2410 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2411 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2412 | IXGBE_MRQC_RSS_FIELD_IPV6
2413 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2414 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2415 }
2416 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2417
2418 if (adapter->num_vfs) {
2419 u32 reg;
2420
2421 /* Map PF MAC address in RAR Entry 0 to first pool
2422 * following VFs */
2423 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2424
2425 /* Set up VF register offsets for selected VT Mode, i.e.
2426 * 64 VFs for SR-IOV */
2427 reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2428 reg |= IXGBE_GCR_EXT_SRIOV;
2429 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
2430 }
2431
2432 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2433
2434 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
2435 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
2436 /* Disable indicating checksum in descriptor, enables
2437 * RSS hash */
2438 rxcsum |= IXGBE_RXCSUM_PCSD;
2439 }
2440 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
2441 /* Enable IPv4 payload checksum for UDP fragments
2442 * if PCSD is not set */
2443 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2444 }
2445
2446 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2447
2448 if (hw->mac.type == ixgbe_mac_82599EB) {
2449 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2450 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
2451 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2452 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2453 }
2454
2455 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2456 /* Enable 82599 HW-RSC */
2457 for (i = 0; i < adapter->num_rx_queues; i++)
2458 ixgbe_configure_rscctl(adapter, i);
2459
2460 /* Disable RSC for ACK packets */
2461 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2462 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
2463 }
2464 }
2465
2466 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2467 {
2468 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2469 struct ixgbe_hw *hw = &adapter->hw;
2470 int pool_ndx = adapter->num_vfs;
2471
2472 /* add VID to filter table */
2473 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
2474 }
2475
2476 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2477 {
2478 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2479 struct ixgbe_hw *hw = &adapter->hw;
2480 int pool_ndx = adapter->num_vfs;
2481
2482 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2483 ixgbe_irq_disable(adapter);
2484
2485 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2486
2487 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2488 ixgbe_irq_enable(adapter);
2489
2490 /* remove VID from filter table */
2491 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
2492 }
2493
2494 /**
2495 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
2496 * @adapter: driver data
2497 */
2498 static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
2499 {
2500 struct ixgbe_hw *hw = &adapter->hw;
2501 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2502 int i, j;
2503
2504 switch (hw->mac.type) {
2505 case ixgbe_mac_82598EB:
2506 vlnctrl &= ~(IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE);
2507 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2508 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2509 break;
2510 case ixgbe_mac_82599EB:
2511 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
2512 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2513 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2514 for (i = 0; i < adapter->num_rx_queues; i++) {
2515 j = adapter->rx_ring[i]->reg_idx;
2516 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2517 vlnctrl &= ~IXGBE_RXDCTL_VME;
2518 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2519 }
2520 break;
2521 default:
2522 break;
2523 }
2524 }
2525
2526 /**
2527 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
2528 * @adapter: driver data
2529 */
2530 static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
2531 {
2532 struct ixgbe_hw *hw = &adapter->hw;
2533 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2534 int i, j;
2535
2536 switch (hw->mac.type) {
2537 case ixgbe_mac_82598EB:
2538 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2539 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2540 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2541 break;
2542 case ixgbe_mac_82599EB:
2543 vlnctrl |= IXGBE_VLNCTRL_VFE;
2544 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2545 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2546 for (i = 0; i < adapter->num_rx_queues; i++) {
2547 j = adapter->rx_ring[i]->reg_idx;
2548 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2549 vlnctrl |= IXGBE_RXDCTL_VME;
2550 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2551 }
2552 break;
2553 default:
2554 break;
2555 }
2556 }
2557
2558 static void ixgbe_vlan_rx_register(struct net_device *netdev,
2559 struct vlan_group *grp)
2560 {
2561 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2562
2563 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2564 ixgbe_irq_disable(adapter);
2565 adapter->vlgrp = grp;
2566
2567 /*
2568 * For a DCB driver, always enable VLAN tag stripping so we can
2569 * still receive traffic from a DCB-enabled host even if we're
2570 * not in DCB mode.
2571 */
2572 ixgbe_vlan_filter_enable(adapter);
2573
2574 ixgbe_vlan_rx_add_vid(netdev, 0);
2575
2576 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2577 ixgbe_irq_enable(adapter);
2578 }
2579
2580 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2581 {
2582 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2583
2584 if (adapter->vlgrp) {
2585 u16 vid;
2586 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2587 if (!vlan_group_get_device(adapter->vlgrp, vid))
2588 continue;
2589 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
2590 }
2591 }
2592 }
2593
2594 /**
2595 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
2596 * @netdev: network interface device structure
2597 *
2598 * The set_rx_method entry point is called whenever the unicast/multicast
2599 * address list or the network interface flags are updated. This routine is
2600 * responsible for configuring the hardware for proper unicast, multicast and
2601 * promiscuous mode.
2602 **/
2603 void ixgbe_set_rx_mode(struct net_device *netdev)
2604 {
2605 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2606 struct ixgbe_hw *hw = &adapter->hw;
2607 u32 fctrl;
2608
2609 /* Check for Promiscuous and All Multicast modes */
2610
2611 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2612
2613 if (netdev->flags & IFF_PROMISC) {
2614 hw->addr_ctrl.user_set_promisc = 1;
2615 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2616 /* don't hardware filter vlans in promisc mode */
2617 ixgbe_vlan_filter_disable(adapter);
2618 } else {
2619 if (netdev->flags & IFF_ALLMULTI) {
2620 fctrl |= IXGBE_FCTRL_MPE;
2621 fctrl &= ~IXGBE_FCTRL_UPE;
2622 } else {
2623 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2624 }
2625 ixgbe_vlan_filter_enable(adapter);
2626 hw->addr_ctrl.user_set_promisc = 0;
2627 }
2628
2629 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2630
2631 /* reprogram secondary unicast list */
2632 hw->mac.ops.update_uc_addr_list(hw, netdev);
2633
2634 /* reprogram multicast list */
2635 hw->mac.ops.update_mc_addr_list(hw, netdev);
2636
2637 if (adapter->num_vfs)
2638 ixgbe_restore_vf_multicasts(adapter);
2639 }
2640
2641 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
2642 {
2643 int q_idx;
2644 struct ixgbe_q_vector *q_vector;
2645 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2646
2647 /* legacy and MSI only use one vector */
2648 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2649 q_vectors = 1;
2650
2651 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2652 struct napi_struct *napi;
2653 q_vector = adapter->q_vector[q_idx];
2654 napi = &q_vector->napi;
2655 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2656 if (!q_vector->rxr_count || !q_vector->txr_count) {
2657 if (q_vector->txr_count == 1)
2658 napi->poll = &ixgbe_clean_txonly;
2659 else if (q_vector->rxr_count == 1)
2660 napi->poll = &ixgbe_clean_rxonly;
2661 }
2662 }
2663
2664 napi_enable(napi);
2665 }
2666 }
2667
2668 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2669 {
2670 int q_idx;
2671 struct ixgbe_q_vector *q_vector;
2672 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2673
2674 /* legacy and MSI only use one vector */
2675 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2676 q_vectors = 1;
2677
2678 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2679 q_vector = adapter->q_vector[q_idx];
2680 napi_disable(&q_vector->napi);
2681 }
2682 }
2683
2684 #ifdef CONFIG_IXGBE_DCB
2685 /*
2686 * ixgbe_configure_dcb - Configure DCB hardware
2687 * @adapter: ixgbe adapter struct
2688 *
2689 * This is called by the driver on open to configure the DCB hardware.
2690 * This is also called by the gennetlink interface when reconfiguring
2691 * the DCB state.
2692 */
2693 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2694 {
2695 struct ixgbe_hw *hw = &adapter->hw;
2696 u32 txdctl;
2697 int i, j;
2698
2699 ixgbe_dcb_check_config(&adapter->dcb_cfg);
2700 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
2701 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
2702
2703 /* reconfigure the hardware */
2704 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
2705
2706 for (i = 0; i < adapter->num_tx_queues; i++) {
2707 j = adapter->tx_ring[i]->reg_idx;
2708 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2709 /* PThresh workaround for Tx hang with DFP enabled. */
2710 txdctl |= 32;
2711 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2712 }
2713 /* Enable VLAN tag insert/strip */
2714 ixgbe_vlan_filter_enable(adapter);
2715
2716 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2717 }
2718
2719 #endif
2720 static void ixgbe_configure(struct ixgbe_adapter *adapter)
2721 {
2722 struct net_device *netdev = adapter->netdev;
2723 struct ixgbe_hw *hw = &adapter->hw;
2724 int i;
2725
2726 ixgbe_set_rx_mode(netdev);
2727
2728 ixgbe_restore_vlan(adapter);
2729 #ifdef CONFIG_IXGBE_DCB
2730 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2731 if (hw->mac.type == ixgbe_mac_82598EB)
2732 netif_set_gso_max_size(netdev, 32768);
2733 else
2734 netif_set_gso_max_size(netdev, 65536);
2735 ixgbe_configure_dcb(adapter);
2736 } else {
2737 netif_set_gso_max_size(netdev, 65536);
2738 }
2739 #else
2740 netif_set_gso_max_size(netdev, 65536);
2741 #endif
2742
2743 #ifdef IXGBE_FCOE
2744 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
2745 ixgbe_configure_fcoe(adapter);
2746
2747 #endif /* IXGBE_FCOE */
2748 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2749 for (i = 0; i < adapter->num_tx_queues; i++)
2750 adapter->tx_ring[i]->atr_sample_rate =
2751 adapter->atr_sample_rate;
2752 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
2753 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
2754 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
2755 }
2756
2757 ixgbe_configure_tx(adapter);
2758 ixgbe_configure_rx(adapter);
2759 for (i = 0; i < adapter->num_rx_queues; i++)
2760 ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
2761 (adapter->rx_ring[i]->count - 1));
2762 }
2763
2764 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2765 {
2766 switch (hw->phy.type) {
2767 case ixgbe_phy_sfp_avago:
2768 case ixgbe_phy_sfp_ftl:
2769 case ixgbe_phy_sfp_intel:
2770 case ixgbe_phy_sfp_unknown:
2771 case ixgbe_phy_tw_tyco:
2772 case ixgbe_phy_tw_unknown:
2773 return true;
2774 default:
2775 return false;
2776 }
2777 }
2778
2779 /**
2780 * ixgbe_sfp_link_config - set up SFP+ link
2781 * @adapter: pointer to private adapter struct
2782 **/
2783 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
2784 {
2785 struct ixgbe_hw *hw = &adapter->hw;
2786
2787 if (hw->phy.multispeed_fiber) {
2788 /*
2789 * In multispeed fiber setups, the device may not have
2790 * had a physical connection when the driver loaded.
2791 * If that's the case, the initial link configuration
2792 * couldn't get the MAC into 10G or 1G mode, so we'll
2793 * never have a link status change interrupt fire.
2794 * We need to try and force an autonegotiation
2795 * session, then bring up link.
2796 */
2797 hw->mac.ops.setup_sfp(hw);
2798 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
2799 schedule_work(&adapter->multispeed_fiber_task);
2800 } else {
2801 /*
2802 * Direct Attach Cu and non-multispeed fiber modules
2803 * still need to be configured properly prior to
2804 * attempting link.
2805 */
2806 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
2807 schedule_work(&adapter->sfp_config_module_task);
2808 }
2809 }
2810
2811 /**
2812 * ixgbe_non_sfp_link_config - set up non-SFP+ link
2813 * @hw: pointer to private hardware struct
2814 *
2815 * Returns 0 on success, negative on failure
2816 **/
2817 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
2818 {
2819 u32 autoneg;
2820 bool negotiation, link_up = false;
2821 u32 ret = IXGBE_ERR_LINK_SETUP;
2822
2823 if (hw->mac.ops.check_link)
2824 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
2825
2826 if (ret)
2827 goto link_cfg_out;
2828
2829 if (hw->mac.ops.get_link_capabilities)
2830 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
2831 if (ret)
2832 goto link_cfg_out;
2833
2834 if (hw->mac.ops.setup_link)
2835 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
2836 link_cfg_out:
2837 return ret;
2838 }
2839
2840 #define IXGBE_MAX_RX_DESC_POLL 10
2841 static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2842 int rxr)
2843 {
2844 int j = adapter->rx_ring[rxr]->reg_idx;
2845 int k;
2846
2847 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
2848 if (IXGBE_READ_REG(&adapter->hw,
2849 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
2850 break;
2851 else
2852 msleep(1);
2853 }
2854 if (k >= IXGBE_MAX_RX_DESC_POLL) {
2855 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
2856 "not set within the polling period\n", rxr);
2857 }
2858 ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
2859 (adapter->rx_ring[rxr]->count - 1));
2860 }
2861
2862 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2863 {
2864 struct net_device *netdev = adapter->netdev;
2865 struct ixgbe_hw *hw = &adapter->hw;
2866 int i, j = 0;
2867 int num_rx_rings = adapter->num_rx_queues;
2868 int err;
2869 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2870 u32 txdctl, rxdctl, mhadd;
2871 u32 dmatxctl;
2872 u32 gpie;
2873 u32 ctrl_ext;
2874
2875 ixgbe_get_hw_control(adapter);
2876
2877 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
2878 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
2879 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2880 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
2881 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
2882 } else {
2883 /* MSI only */
2884 gpie = 0;
2885 }
2886 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2887 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
2888 gpie |= IXGBE_GPIE_VTMODE_64;
2889 }
2890 /* XXX: to interrupt immediately for EICS writes, enable this */
2891 /* gpie |= IXGBE_GPIE_EIMEN; */
2892 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2893 }
2894
2895 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2896 /*
2897 * use EIAM to auto-mask when MSI-X interrupt is asserted
2898 * this saves a register write for every interrupt
2899 */
2900 switch (hw->mac.type) {
2901 case ixgbe_mac_82598EB:
2902 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2903 break;
2904 default:
2905 case ixgbe_mac_82599EB:
2906 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
2907 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
2908 break;
2909 }
2910 } else {
2911 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
2912 * specifically only auto mask tx and rx interrupts */
2913 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2914 }
2915
2916 /* Enable fan failure interrupt if media type is copper */
2917 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2918 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2919 gpie |= IXGBE_SDP1_GPIEN;
2920 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2921 }
2922
2923 if (hw->mac.type == ixgbe_mac_82599EB) {
2924 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2925 gpie |= IXGBE_SDP1_GPIEN;
2926 gpie |= IXGBE_SDP2_GPIEN;
2927 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2928 }
2929
2930 #ifdef IXGBE_FCOE
2931 /* adjust max frame to be able to do baby jumbo for FCoE */
2932 if ((netdev->features & NETIF_F_FCOE_MTU) &&
2933 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2934 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
2935
2936 #endif /* IXGBE_FCOE */
2937 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2938 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2939 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2940 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2941
2942 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2943 }
2944
2945 for (i = 0; i < adapter->num_tx_queues; i++) {
2946 j = adapter->tx_ring[i]->reg_idx;
2947 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2948 if (adapter->rx_itr_setting == 0) {
2949 /* cannot set wthresh when itr==0 */
2950 txdctl &= ~0x007F0000;
2951 } else {
2952 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2953 txdctl |= (8 << 16);
2954 }
2955 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2956 }
2957
2958 if (hw->mac.type == ixgbe_mac_82599EB) {
2959 /* DMATXCTL.EN must be set after all Tx queue config is done */
2960 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2961 dmatxctl |= IXGBE_DMATXCTL_TE;
2962 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2963 }
2964 for (i = 0; i < adapter->num_tx_queues; i++) {
2965 j = adapter->tx_ring[i]->reg_idx;
2966 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2967 txdctl |= IXGBE_TXDCTL_ENABLE;
2968 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2969 if (hw->mac.type == ixgbe_mac_82599EB) {
2970 int wait_loop = 10;
2971 /* poll for Tx Enable ready */
2972 do {
2973 msleep(1);
2974 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2975 } while (--wait_loop &&
2976 !(txdctl & IXGBE_TXDCTL_ENABLE));
2977 if (!wait_loop)
2978 DPRINTK(DRV, ERR, "Could not enable "
2979 "Tx Queue %d\n", j);
2980 }
2981 }
2982
2983 for (i = 0; i < num_rx_rings; i++) {
2984 j = adapter->rx_ring[i]->reg_idx;
2985 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2986 /* enable PTHRESH=32 descriptors (half the internal cache)
2987 * and HTHRESH=0 descriptors (to minimize latency on fetch),
2988 * this also removes a pesky rx_no_buffer_count increment */
2989 rxdctl |= 0x0020;
2990 rxdctl |= IXGBE_RXDCTL_ENABLE;
2991 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
2992 if (hw->mac.type == ixgbe_mac_82599EB)
2993 ixgbe_rx_desc_queue_enable(adapter, i);
2994 }
2995 /* enable all receives */
2996 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2997 if (hw->mac.type == ixgbe_mac_82598EB)
2998 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
2999 else
3000 rxdctl |= IXGBE_RXCTRL_RXEN;
3001 hw->mac.ops.enable_rx_dma(hw, rxdctl);
3002
3003 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3004 ixgbe_configure_msix(adapter);
3005 else
3006 ixgbe_configure_msi_and_legacy(adapter);
3007
3008 /* enable the optics */
3009 if (hw->phy.multispeed_fiber)
3010 hw->mac.ops.enable_tx_laser(hw);
3011
3012 clear_bit(__IXGBE_DOWN, &adapter->state);
3013 ixgbe_napi_enable_all(adapter);
3014
3015 /* clear any pending interrupts, may auto mask */
3016 IXGBE_READ_REG(hw, IXGBE_EICR);
3017
3018 ixgbe_irq_enable(adapter);
3019
3020 /*
3021 * If this adapter has a fan, check to see if we had a failure
3022 * before we enabled the interrupt.
3023 */
3024 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3025 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3026 if (esdp & IXGBE_ESDP_SDP1)
3027 DPRINTK(DRV, CRIT,
3028 "Fan has stopped, replace the adapter\n");
3029 }
3030
3031 /*
3032 * For hot-pluggable SFP+ devices, a new SFP+ module may have
3033 * arrived before interrupts were enabled but after probe. Such
3034 * devices wouldn't have their type identified yet. We need to
3035 * kick off the SFP+ module setup first, then try to bring up link.
3036 * If we're not hot-pluggable SFP+, we just need to configure link
3037 * and bring it up.
3038 */
3039 if (hw->phy.type == ixgbe_phy_unknown) {
3040 err = hw->phy.ops.identify(hw);
3041 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3042 /*
3043 * Take the device down and schedule the sfp tasklet
3044 * which will unregister_netdev and log it.
3045 */
3046 ixgbe_down(adapter);
3047 schedule_work(&adapter->sfp_config_module_task);
3048 return err;
3049 }
3050 }
3051
3052 if (ixgbe_is_sfp(hw)) {
3053 ixgbe_sfp_link_config(adapter);
3054 } else {
3055 err = ixgbe_non_sfp_link_config(hw);
3056 if (err)
3057 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
3058 }
3059
3060 for (i = 0; i < adapter->num_tx_queues; i++)
3061 set_bit(__IXGBE_FDIR_INIT_DONE,
3062 &(adapter->tx_ring[i]->reinit_state));
3063
3064 /* enable transmits */
3065 netif_tx_start_all_queues(netdev);
3066
3067 /* bring the link up in the watchdog, this could race with our first
3068 * link up interrupt but shouldn't be a problem */
3069 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3070 adapter->link_check_timeout = jiffies;
3071 mod_timer(&adapter->watchdog_timer, jiffies);
3072
3073 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3074 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3075 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3076 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3077
3078 return 0;
3079 }
3080
3081 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3082 {
3083 WARN_ON(in_interrupt());
3084 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
3085 msleep(1);
3086 ixgbe_down(adapter);
3087 /*
3088 * If SR-IOV enabled then wait a bit before bringing the adapter
3089 * back up to give the VFs time to respond to the reset. The
3090 * two second wait is based upon the watchdog timer cycle in
3091 * the VF driver.
3092 */
3093 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3094 msleep(2000);
3095 ixgbe_up(adapter);
3096 clear_bit(__IXGBE_RESETTING, &adapter->state);
3097 }
3098
3099 int ixgbe_up(struct ixgbe_adapter *adapter)
3100 {
3101 /* hardware has been reset, we need to reload some things */
3102 ixgbe_configure(adapter);
3103
3104 return ixgbe_up_complete(adapter);
3105 }
3106
3107 void ixgbe_reset(struct ixgbe_adapter *adapter)
3108 {
3109 struct ixgbe_hw *hw = &adapter->hw;
3110 int err;
3111
3112 err = hw->mac.ops.init_hw(hw);
3113 switch (err) {
3114 case 0:
3115 case IXGBE_ERR_SFP_NOT_PRESENT:
3116 break;
3117 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
3118 dev_err(&adapter->pdev->dev, "master disable timed out\n");
3119 break;
3120 case IXGBE_ERR_EEPROM_VERSION:
3121 /* We are running on a pre-production device, log a warning */
3122 dev_warn(&adapter->pdev->dev, "This device is a pre-production "
3123 "adapter/LOM. Please be aware there may be issues "
3124 "associated with your hardware. If you are "
3125 "experiencing problems please contact your Intel or "
3126 "hardware representative who provided you with this "
3127 "hardware.\n");
3128 break;
3129 default:
3130 dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
3131 }
3132
3133 /* reprogram the RAR[0] in case user changed it. */
3134 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
3135 IXGBE_RAH_AV);
3136 }
3137
3138 /**
3139 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
3140 * @adapter: board private structure
3141 * @rx_ring: ring to free buffers from
3142 **/
3143 static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3144 struct ixgbe_ring *rx_ring)
3145 {
3146 struct pci_dev *pdev = adapter->pdev;
3147 unsigned long size;
3148 unsigned int i;
3149
3150 /* Free all the Rx ring sk_buffs */
3151
3152 for (i = 0; i < rx_ring->count; i++) {
3153 struct ixgbe_rx_buffer *rx_buffer_info;
3154
3155 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3156 if (rx_buffer_info->dma) {
3157 pci_unmap_single(pdev, rx_buffer_info->dma,
3158 rx_ring->rx_buf_len,
3159 PCI_DMA_FROMDEVICE);
3160 rx_buffer_info->dma = 0;
3161 }
3162 if (rx_buffer_info->skb) {
3163 struct sk_buff *skb = rx_buffer_info->skb;
3164 rx_buffer_info->skb = NULL;
3165 do {
3166 struct sk_buff *this = skb;
3167 if (IXGBE_RSC_CB(this)->dma) {
3168 pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
3169 rx_ring->rx_buf_len,
3170 PCI_DMA_FROMDEVICE);
3171 IXGBE_RSC_CB(this)->dma = 0;
3172 }
3173 skb = skb->prev;
3174 dev_kfree_skb(this);
3175 } while (skb);
3176 }
3177 if (!rx_buffer_info->page)
3178 continue;
3179 if (rx_buffer_info->page_dma) {
3180 pci_unmap_page(pdev, rx_buffer_info->page_dma,
3181 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
3182 rx_buffer_info->page_dma = 0;
3183 }
3184 put_page(rx_buffer_info->page);
3185 rx_buffer_info->page = NULL;
3186 rx_buffer_info->page_offset = 0;
3187 }
3188
3189 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
3190 memset(rx_ring->rx_buffer_info, 0, size);
3191
3192 /* Zero out the descriptor ring */
3193 memset(rx_ring->desc, 0, rx_ring->size);
3194
3195 rx_ring->next_to_clean = 0;
3196 rx_ring->next_to_use = 0;
3197
3198 if (rx_ring->head)
3199 writel(0, adapter->hw.hw_addr + rx_ring->head);
3200 if (rx_ring->tail)
3201 writel(0, adapter->hw.hw_addr + rx_ring->tail);
3202 }
3203
3204 /**
3205 * ixgbe_clean_tx_ring - Free Tx Buffers
3206 * @adapter: board private structure
3207 * @tx_ring: ring to be cleaned
3208 **/
3209 static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3210 struct ixgbe_ring *tx_ring)
3211 {
3212 struct ixgbe_tx_buffer *tx_buffer_info;
3213 unsigned long size;
3214 unsigned int i;
3215
3216 /* Free all the Tx ring sk_buffs */
3217
3218 for (i = 0; i < tx_ring->count; i++) {
3219 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3220 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
3221 }
3222
3223 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3224 memset(tx_ring->tx_buffer_info, 0, size);
3225
3226 /* Zero out the descriptor ring */
3227 memset(tx_ring->desc, 0, tx_ring->size);
3228
3229 tx_ring->next_to_use = 0;
3230 tx_ring->next_to_clean = 0;
3231
3232 if (tx_ring->head)
3233 writel(0, adapter->hw.hw_addr + tx_ring->head);
3234 if (tx_ring->tail)
3235 writel(0, adapter->hw.hw_addr + tx_ring->tail);
3236 }
3237
3238 /**
3239 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
3240 * @adapter: board private structure
3241 **/
3242 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
3243 {
3244 int i;
3245
3246 for (i = 0; i < adapter->num_rx_queues; i++)
3247 ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
3248 }
3249
3250 /**
3251 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
3252 * @adapter: board private structure
3253 **/
3254 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
3255 {
3256 int i;
3257
3258 for (i = 0; i < adapter->num_tx_queues; i++)
3259 ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
3260 }
3261
3262 void ixgbe_down(struct ixgbe_adapter *adapter)
3263 {
3264 struct net_device *netdev = adapter->netdev;
3265 struct ixgbe_hw *hw = &adapter->hw;
3266 u32 rxctrl;
3267 u32 txdctl;
3268 int i, j;
3269
3270 /* signal that we are down to the interrupt handler */
3271 set_bit(__IXGBE_DOWN, &adapter->state);
3272
3273 /* power down the optics */
3274 if (hw->phy.multispeed_fiber)
3275 hw->mac.ops.disable_tx_laser(hw);
3276
3277 /* disable receive for all VFs and wait one second */
3278 if (adapter->num_vfs) {
3279 /* ping all the active vfs to let them know we are going down */
3280 ixgbe_ping_all_vfs(adapter);
3281
3282 /* Disable all VFTE/VFRE TX/RX */
3283 ixgbe_disable_tx_rx(adapter);
3284
3285 /* Mark all the VFs as inactive */
3286 for (i = 0 ; i < adapter->num_vfs; i++)
3287 adapter->vfinfo[i].clear_to_send = 0;
3288 }
3289
3290 /* disable receives */
3291 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3292 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3293
3294 netif_tx_disable(netdev);
3295
3296 IXGBE_WRITE_FLUSH(hw);
3297 msleep(10);
3298
3299 netif_tx_stop_all_queues(netdev);
3300
3301 ixgbe_irq_disable(adapter);
3302
3303 ixgbe_napi_disable_all(adapter);
3304
3305 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3306 del_timer_sync(&adapter->sfp_timer);
3307 del_timer_sync(&adapter->watchdog_timer);
3308 cancel_work_sync(&adapter->watchdog_task);
3309
3310 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3311 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3312 cancel_work_sync(&adapter->fdir_reinit_task);
3313
3314 /* disable transmits in the hardware now that interrupts are off */
3315 for (i = 0; i < adapter->num_tx_queues; i++) {
3316 j = adapter->tx_ring[i]->reg_idx;
3317 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3318 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
3319 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3320 }
3321 /* Disable the Tx DMA engine on 82599 */
3322 if (hw->mac.type == ixgbe_mac_82599EB)
3323 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3324 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3325 ~IXGBE_DMATXCTL_TE));
3326
3327 netif_carrier_off(netdev);
3328
3329 /* clear n-tuple filters that are cached */
3330 ethtool_ntuple_flush(netdev);
3331
3332 if (!pci_channel_offline(adapter->pdev))
3333 ixgbe_reset(adapter);
3334 ixgbe_clean_all_tx_rings(adapter);
3335 ixgbe_clean_all_rx_rings(adapter);
3336
3337 #ifdef CONFIG_IXGBE_DCA
3338 /* since we reset the hardware DCA settings were cleared */
3339 ixgbe_setup_dca(adapter);
3340 #endif
3341 }
3342
3343 /**
3344 * ixgbe_poll - NAPI Rx polling callback
3345 * @napi: structure for representing this polling device
3346 * @budget: how many packets driver is allowed to clean
3347 *
3348 * This function is used for legacy and MSI, NAPI mode
3349 **/
3350 static int ixgbe_poll(struct napi_struct *napi, int budget)
3351 {
3352 struct ixgbe_q_vector *q_vector =
3353 container_of(napi, struct ixgbe_q_vector, napi);
3354 struct ixgbe_adapter *adapter = q_vector->adapter;
3355 int tx_clean_complete, work_done = 0;
3356
3357 #ifdef CONFIG_IXGBE_DCA
3358 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3359 ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
3360 ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
3361 }
3362 #endif
3363
3364 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
3365 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
3366
3367 if (!tx_clean_complete)
3368 work_done = budget;
3369
3370 /* If budget not fully consumed, exit the polling mode */
3371 if (work_done < budget) {
3372 napi_complete(napi);
3373 if (adapter->rx_itr_setting & 1)
3374 ixgbe_set_itr(adapter);
3375 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3376 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
3377 }
3378 return work_done;
3379 }
3380
3381 /**
3382 * ixgbe_tx_timeout - Respond to a Tx Hang
3383 * @netdev: network interface device structure
3384 **/
3385 static void ixgbe_tx_timeout(struct net_device *netdev)
3386 {
3387 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3388
3389 /* Do the reset outside of interrupt context */
3390 schedule_work(&adapter->reset_task);
3391 }
3392
3393 static void ixgbe_reset_task(struct work_struct *work)
3394 {
3395 struct ixgbe_adapter *adapter;
3396 adapter = container_of(work, struct ixgbe_adapter, reset_task);
3397
3398 /* If we're already down or resetting, just bail */
3399 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3400 test_bit(__IXGBE_RESETTING, &adapter->state))
3401 return;
3402
3403 adapter->tx_timeout_count++;
3404
3405 ixgbe_reinit_locked(adapter);
3406 }
3407
3408 #ifdef CONFIG_IXGBE_DCB
3409 static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
3410 {
3411 bool ret = false;
3412 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
3413
3414 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3415 return ret;
3416
3417 f->mask = 0x7 << 3;
3418 adapter->num_rx_queues = f->indices;
3419 adapter->num_tx_queues = f->indices;
3420 ret = true;
3421
3422 return ret;
3423 }
3424 #endif
3425
3426 /**
3427 * ixgbe_set_rss_queues: Allocate queues for RSS
3428 * @adapter: board private structure to initialize
3429 *
3430 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
3431 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
3432 *
3433 **/
3434 static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3435 {
3436 bool ret = false;
3437 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
3438
3439 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3440 f->mask = 0xF;
3441 adapter->num_rx_queues = f->indices;
3442 adapter->num_tx_queues = f->indices;
3443 ret = true;
3444 } else {
3445 ret = false;
3446 }
3447
3448 return ret;
3449 }
3450
3451 /**
3452 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
3453 * @adapter: board private structure to initialize
3454 *
3455 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
3456 * to the original CPU that initiated the Tx session. This runs in addition
3457 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
3458 * Rx load across CPUs using RSS.
3459 *
3460 **/
3461 static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3462 {
3463 bool ret = false;
3464 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
3465
3466 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
3467 f_fdir->mask = 0;
3468
3469 /* Flow Director must have RSS enabled */
3470 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3471 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3472 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
3473 adapter->num_tx_queues = f_fdir->indices;
3474 adapter->num_rx_queues = f_fdir->indices;
3475 ret = true;
3476 } else {
3477 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3478 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3479 }
3480 return ret;
3481 }
3482
3483 #ifdef IXGBE_FCOE
3484 /**
3485 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
3486 * @adapter: board private structure to initialize
3487 *
3488 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
3489 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
3490 * rx queues out of the max number of rx queues, instead, it is used as the
3491 * index of the first rx queue used by FCoE.
3492 *
3493 **/
3494 static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3495 {
3496 bool ret = false;
3497 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3498
3499 f->indices = min((int)num_online_cpus(), f->indices);
3500 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
3501 adapter->num_rx_queues = 1;
3502 adapter->num_tx_queues = 1;
3503 #ifdef CONFIG_IXGBE_DCB
3504 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3505 DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
3506 ixgbe_set_dcb_queues(adapter);
3507 }
3508 #endif
3509 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3510 DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
3511 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3512 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3513 ixgbe_set_fdir_queues(adapter);
3514 else
3515 ixgbe_set_rss_queues(adapter);
3516 }
3517 /* adding FCoE rx rings to the end */
3518 f->mask = adapter->num_rx_queues;
3519 adapter->num_rx_queues += f->indices;
3520 adapter->num_tx_queues += f->indices;
3521
3522 ret = true;
3523 }
3524
3525 return ret;
3526 }
3527
3528 #endif /* IXGBE_FCOE */
3529 /**
3530 * ixgbe_set_sriov_queues: Allocate queues for IOV use
3531 * @adapter: board private structure to initialize
3532 *
3533 * IOV doesn't actually use anything, so just NAK the
3534 * request for now and let the other queue routines
3535 * figure out what to do.
3536 */
3537 static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
3538 {
3539 return false;
3540 }
3541
3542 /*
3543 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
3544 * @adapter: board private structure to initialize
3545 *
3546 * This is the top level queue allocation routine. The order here is very
3547 * important, starting with the "most" number of features turned on at once,
3548 * and ending with the smallest set of features. This way large combinations
3549 * can be allocated if they're turned on, and smaller combinations are the
3550 * fallthrough conditions.
3551 *
3552 **/
3553 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
3554 {
3555 /* Start with base case */
3556 adapter->num_rx_queues = 1;
3557 adapter->num_tx_queues = 1;
3558 adapter->num_rx_pools = adapter->num_rx_queues;
3559 adapter->num_rx_queues_per_pool = 1;
3560
3561 if (ixgbe_set_sriov_queues(adapter))
3562 return;
3563
3564 #ifdef IXGBE_FCOE
3565 if (ixgbe_set_fcoe_queues(adapter))
3566 goto done;
3567
3568 #endif /* IXGBE_FCOE */
3569 #ifdef CONFIG_IXGBE_DCB
3570 if (ixgbe_set_dcb_queues(adapter))
3571 goto done;
3572
3573 #endif
3574 if (ixgbe_set_fdir_queues(adapter))
3575 goto done;
3576
3577 if (ixgbe_set_rss_queues(adapter))
3578 goto done;
3579
3580 /* fallback to base case */
3581 adapter->num_rx_queues = 1;
3582 adapter->num_tx_queues = 1;
3583
3584 done:
3585 /* Notify the stack of the (possibly) reduced Tx Queue count. */
3586 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
3587 }
3588
3589 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
3590 int vectors)
3591 {
3592 int err, vector_threshold;
3593
3594 /* We'll want at least 3 (vector_threshold):
3595 * 1) TxQ[0] Cleanup
3596 * 2) RxQ[0] Cleanup
3597 * 3) Other (Link Status Change, etc.)
3598 * 4) TCP Timer (optional)
3599 */
3600 vector_threshold = MIN_MSIX_COUNT;
3601
3602 /* The more we get, the more we will assign to Tx/Rx Cleanup
3603 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
3604 * Right now, we simply care about how many we'll get; we'll
3605 * set them up later while requesting irq's.
3606 */
3607 while (vectors >= vector_threshold) {
3608 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
3609 vectors);
3610 if (!err) /* Success in acquiring all requested vectors. */
3611 break;
3612 else if (err < 0)
3613 vectors = 0; /* Nasty failure, quit now */
3614 else /* err == number of vectors we should try again with */
3615 vectors = err;
3616 }
3617
3618 if (vectors < vector_threshold) {
3619 /* Can't allocate enough MSI-X interrupts? Oh well.
3620 * This just means we'll go with either a single MSI
3621 * vector or fall back to legacy interrupts.
3622 */
3623 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
3624 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3625 kfree(adapter->msix_entries);
3626 adapter->msix_entries = NULL;
3627 } else {
3628 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
3629 /*
3630 * Adjust for only the vectors we'll use, which is minimum
3631 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
3632 * vectors we were allocated.
3633 */
3634 adapter->num_msix_vectors = min(vectors,
3635 adapter->max_msix_q_vectors + NON_Q_VECTORS);
3636 }
3637 }
3638
3639 /**
3640 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
3641 * @adapter: board private structure to initialize
3642 *
3643 * Cache the descriptor ring offsets for RSS to the assigned rings.
3644 *
3645 **/
3646 static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
3647 {
3648 int i;
3649 bool ret = false;
3650
3651 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3652 for (i = 0; i < adapter->num_rx_queues; i++)
3653 adapter->rx_ring[i]->reg_idx = i;
3654 for (i = 0; i < adapter->num_tx_queues; i++)
3655 adapter->tx_ring[i]->reg_idx = i;
3656 ret = true;
3657 } else {
3658 ret = false;
3659 }
3660
3661 return ret;
3662 }
3663
3664 #ifdef CONFIG_IXGBE_DCB
3665 /**
3666 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
3667 * @adapter: board private structure to initialize
3668 *
3669 * Cache the descriptor ring offsets for DCB to the assigned rings.
3670 *
3671 **/
3672 static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3673 {
3674 int i;
3675 bool ret = false;
3676 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
3677
3678 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3679 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3680 /* the number of queues is assumed to be symmetric */
3681 for (i = 0; i < dcb_i; i++) {
3682 adapter->rx_ring[i]->reg_idx = i << 3;
3683 adapter->tx_ring[i]->reg_idx = i << 2;
3684 }
3685 ret = true;
3686 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3687 if (dcb_i == 8) {
3688 /*
3689 * Tx TC0 starts at: descriptor queue 0
3690 * Tx TC1 starts at: descriptor queue 32
3691 * Tx TC2 starts at: descriptor queue 64
3692 * Tx TC3 starts at: descriptor queue 80
3693 * Tx TC4 starts at: descriptor queue 96
3694 * Tx TC5 starts at: descriptor queue 104
3695 * Tx TC6 starts at: descriptor queue 112
3696 * Tx TC7 starts at: descriptor queue 120
3697 *
3698 * Rx TC0-TC7 are offset by 16 queues each
3699 */
3700 for (i = 0; i < 3; i++) {
3701 adapter->tx_ring[i]->reg_idx = i << 5;
3702 adapter->rx_ring[i]->reg_idx = i << 4;
3703 }
3704 for ( ; i < 5; i++) {
3705 adapter->tx_ring[i]->reg_idx =
3706 ((i + 2) << 4);
3707 adapter->rx_ring[i]->reg_idx = i << 4;
3708 }
3709 for ( ; i < dcb_i; i++) {
3710 adapter->tx_ring[i]->reg_idx =
3711 ((i + 8) << 3);
3712 adapter->rx_ring[i]->reg_idx = i << 4;
3713 }
3714
3715 ret = true;
3716 } else if (dcb_i == 4) {
3717 /*
3718 * Tx TC0 starts at: descriptor queue 0
3719 * Tx TC1 starts at: descriptor queue 64
3720 * Tx TC2 starts at: descriptor queue 96
3721 * Tx TC3 starts at: descriptor queue 112
3722 *
3723 * Rx TC0-TC3 are offset by 32 queues each
3724 */
3725 adapter->tx_ring[0]->reg_idx = 0;
3726 adapter->tx_ring[1]->reg_idx = 64;
3727 adapter->tx_ring[2]->reg_idx = 96;
3728 adapter->tx_ring[3]->reg_idx = 112;
3729 for (i = 0 ; i < dcb_i; i++)
3730 adapter->rx_ring[i]->reg_idx = i << 5;
3731
3732 ret = true;
3733 } else {
3734 ret = false;
3735 }
3736 } else {
3737 ret = false;
3738 }
3739 } else {
3740 ret = false;
3741 }
3742
3743 return ret;
3744 }
3745 #endif
3746
3747 /**
3748 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
3749 * @adapter: board private structure to initialize
3750 *
3751 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
3752 *
3753 **/
3754 static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
3755 {
3756 int i;
3757 bool ret = false;
3758
3759 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3760 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3761 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
3762 for (i = 0; i < adapter->num_rx_queues; i++)
3763 adapter->rx_ring[i]->reg_idx = i;
3764 for (i = 0; i < adapter->num_tx_queues; i++)
3765 adapter->tx_ring[i]->reg_idx = i;
3766 ret = true;
3767 }
3768
3769 return ret;
3770 }
3771
3772 #ifdef IXGBE_FCOE
3773 /**
3774 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
3775 * @adapter: board private structure to initialize
3776 *
3777 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
3778 *
3779 */
3780 static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3781 {
3782 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
3783 bool ret = false;
3784 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3785
3786 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
3787 #ifdef CONFIG_IXGBE_DCB
3788 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3789 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
3790
3791 ixgbe_cache_ring_dcb(adapter);
3792 /* find out queues in TC for FCoE */
3793 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
3794 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
3795 /*
3796 * In 82599, the number of Tx queues for each traffic
3797 * class for both 8-TC and 4-TC modes are:
3798 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
3799 * 8 TCs: 32 32 16 16 8 8 8 8
3800 * 4 TCs: 64 64 32 32
3801 * We have max 8 queues for FCoE, where 8 the is
3802 * FCoE redirection table size. If TC for FCoE is
3803 * less than or equal to TC3, we have enough queues
3804 * to add max of 8 queues for FCoE, so we start FCoE
3805 * tx descriptor from the next one, i.e., reg_idx + 1.
3806 * If TC for FCoE is above TC3, implying 8 TC mode,
3807 * and we need 8 for FCoE, we have to take all queues
3808 * in that traffic class for FCoE.
3809 */
3810 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
3811 fcoe_tx_i--;
3812 }
3813 #endif /* CONFIG_IXGBE_DCB */
3814 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3815 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3816 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3817 ixgbe_cache_ring_fdir(adapter);
3818 else
3819 ixgbe_cache_ring_rss(adapter);
3820
3821 fcoe_rx_i = f->mask;
3822 fcoe_tx_i = f->mask;
3823 }
3824 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
3825 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
3826 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
3827 }
3828 ret = true;
3829 }
3830 return ret;
3831 }
3832
3833 #endif /* IXGBE_FCOE */
3834 /**
3835 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
3836 * @adapter: board private structure to initialize
3837 *
3838 * SR-IOV doesn't use any descriptor rings but changes the default if
3839 * no other mapping is used.
3840 *
3841 */
3842 static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
3843 {
3844 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
3845 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
3846 if (adapter->num_vfs)
3847 return true;
3848 else
3849 return false;
3850 }
3851
3852 /**
3853 * ixgbe_cache_ring_register - Descriptor ring to register mapping
3854 * @adapter: board private structure to initialize
3855 *
3856 * Once we know the feature-set enabled for the device, we'll cache
3857 * the register offset the descriptor ring is assigned to.
3858 *
3859 * Note, the order the various feature calls is important. It must start with
3860 * the "most" features enabled at the same time, then trickle down to the
3861 * least amount of features turned on at once.
3862 **/
3863 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3864 {
3865 /* start with default case */
3866 adapter->rx_ring[0]->reg_idx = 0;
3867 adapter->tx_ring[0]->reg_idx = 0;
3868
3869 if (ixgbe_cache_ring_sriov(adapter))
3870 return;
3871
3872 #ifdef IXGBE_FCOE
3873 if (ixgbe_cache_ring_fcoe(adapter))
3874 return;
3875
3876 #endif /* IXGBE_FCOE */
3877 #ifdef CONFIG_IXGBE_DCB
3878 if (ixgbe_cache_ring_dcb(adapter))
3879 return;
3880
3881 #endif
3882 if (ixgbe_cache_ring_fdir(adapter))
3883 return;
3884
3885 if (ixgbe_cache_ring_rss(adapter))
3886 return;
3887 }
3888
3889 /**
3890 * ixgbe_alloc_queues - Allocate memory for all rings
3891 * @adapter: board private structure to initialize
3892 *
3893 * We allocate one ring per queue at run-time since we don't know the
3894 * number of queues at compile-time. The polling_netdev array is
3895 * intended for Multiqueue, but should work fine with a single queue.
3896 **/
3897 static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
3898 {
3899 int i;
3900 int orig_node = adapter->node;
3901
3902 for (i = 0; i < adapter->num_tx_queues; i++) {
3903 struct ixgbe_ring *ring = adapter->tx_ring[i];
3904 if (orig_node == -1) {
3905 int cur_node = next_online_node(adapter->node);
3906 if (cur_node == MAX_NUMNODES)
3907 cur_node = first_online_node;
3908 adapter->node = cur_node;
3909 }
3910 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
3911 adapter->node);
3912 if (!ring)
3913 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
3914 if (!ring)
3915 goto err_tx_ring_allocation;
3916 ring->count = adapter->tx_ring_count;
3917 ring->queue_index = i;
3918 ring->numa_node = adapter->node;
3919
3920 adapter->tx_ring[i] = ring;
3921 }
3922
3923 /* Restore the adapter's original node */
3924 adapter->node = orig_node;
3925
3926 for (i = 0; i < adapter->num_rx_queues; i++) {
3927 struct ixgbe_ring *ring = adapter->rx_ring[i];
3928 if (orig_node == -1) {
3929 int cur_node = next_online_node(adapter->node);
3930 if (cur_node == MAX_NUMNODES)
3931 cur_node = first_online_node;
3932 adapter->node = cur_node;
3933 }
3934 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
3935 adapter->node);
3936 if (!ring)
3937 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
3938 if (!ring)
3939 goto err_rx_ring_allocation;
3940 ring->count = adapter->rx_ring_count;
3941 ring->queue_index = i;
3942 ring->numa_node = adapter->node;
3943
3944 adapter->rx_ring[i] = ring;
3945 }
3946
3947 /* Restore the adapter's original node */
3948 adapter->node = orig_node;
3949
3950 ixgbe_cache_ring_register(adapter);
3951
3952 return 0;
3953
3954 err_rx_ring_allocation:
3955 for (i = 0; i < adapter->num_tx_queues; i++)
3956 kfree(adapter->tx_ring[i]);
3957 err_tx_ring_allocation:
3958 return -ENOMEM;
3959 }
3960
3961 /**
3962 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
3963 * @adapter: board private structure to initialize
3964 *
3965 * Attempt to configure the interrupts using the best available
3966 * capabilities of the hardware and the kernel.
3967 **/
3968 static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
3969 {
3970 struct ixgbe_hw *hw = &adapter->hw;
3971 int err = 0;
3972 int vector, v_budget;
3973
3974 /*
3975 * It's easy to be greedy for MSI-X vectors, but it really
3976 * doesn't do us much good if we have a lot more vectors
3977 * than CPU's. So let's be conservative and only ask for
3978 * (roughly) the same number of vectors as there are CPU's.
3979 */
3980 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
3981 (int)num_online_cpus()) + NON_Q_VECTORS;
3982
3983 /*
3984 * At the same time, hardware can only support a maximum of
3985 * hw.mac->max_msix_vectors vectors. With features
3986 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
3987 * descriptor queues supported by our device. Thus, we cap it off in
3988 * those rare cases where the cpu count also exceeds our vector limit.
3989 */
3990 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
3991
3992 /* A failure in MSI-X entry allocation isn't fatal, but it does
3993 * mean we disable MSI-X capabilities of the adapter. */
3994 adapter->msix_entries = kcalloc(v_budget,
3995 sizeof(struct msix_entry), GFP_KERNEL);
3996 if (adapter->msix_entries) {
3997 for (vector = 0; vector < v_budget; vector++)
3998 adapter->msix_entries[vector].entry = vector;
3999
4000 ixgbe_acquire_msix_vectors(adapter, v_budget);
4001
4002 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4003 goto out;
4004 }
4005
4006 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4007 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4008 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4009 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4010 adapter->atr_sample_rate = 0;
4011 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4012 ixgbe_disable_sriov(adapter);
4013
4014 ixgbe_set_num_queues(adapter);
4015
4016 err = pci_enable_msi(adapter->pdev);
4017 if (!err) {
4018 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
4019 } else {
4020 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
4021 "falling back to legacy. Error: %d\n", err);
4022 /* reset err */
4023 err = 0;
4024 }
4025
4026 out:
4027 return err;
4028 }
4029
4030 /**
4031 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
4032 * @adapter: board private structure to initialize
4033 *
4034 * We allocate one q_vector per queue interrupt. If allocation fails we
4035 * return -ENOMEM.
4036 **/
4037 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4038 {
4039 int q_idx, num_q_vectors;
4040 struct ixgbe_q_vector *q_vector;
4041 int napi_vectors;
4042 int (*poll)(struct napi_struct *, int);
4043
4044 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4045 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4046 napi_vectors = adapter->num_rx_queues;
4047 poll = &ixgbe_clean_rxtx_many;
4048 } else {
4049 num_q_vectors = 1;
4050 napi_vectors = 1;
4051 poll = &ixgbe_poll;
4052 }
4053
4054 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4055 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
4056 GFP_KERNEL, adapter->node);
4057 if (!q_vector)
4058 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
4059 GFP_KERNEL);
4060 if (!q_vector)
4061 goto err_out;
4062 q_vector->adapter = adapter;
4063 if (q_vector->txr_count && !q_vector->rxr_count)
4064 q_vector->eitr = adapter->tx_eitr_param;
4065 else
4066 q_vector->eitr = adapter->rx_eitr_param;
4067 q_vector->v_idx = q_idx;
4068 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
4069 adapter->q_vector[q_idx] = q_vector;
4070 }
4071
4072 return 0;
4073
4074 err_out:
4075 while (q_idx) {
4076 q_idx--;
4077 q_vector = adapter->q_vector[q_idx];
4078 netif_napi_del(&q_vector->napi);
4079 kfree(q_vector);
4080 adapter->q_vector[q_idx] = NULL;
4081 }
4082 return -ENOMEM;
4083 }
4084
4085 /**
4086 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
4087 * @adapter: board private structure to initialize
4088 *
4089 * This function frees the memory allocated to the q_vectors. In addition if
4090 * NAPI is enabled it will delete any references to the NAPI struct prior
4091 * to freeing the q_vector.
4092 **/
4093 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
4094 {
4095 int q_idx, num_q_vectors;
4096
4097 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4098 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
4099 else
4100 num_q_vectors = 1;
4101
4102 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4103 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
4104 adapter->q_vector[q_idx] = NULL;
4105 netif_napi_del(&q_vector->napi);
4106 kfree(q_vector);
4107 }
4108 }
4109
4110 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
4111 {
4112 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4113 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4114 pci_disable_msix(adapter->pdev);
4115 kfree(adapter->msix_entries);
4116 adapter->msix_entries = NULL;
4117 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
4118 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
4119 pci_disable_msi(adapter->pdev);
4120 }
4121 return;
4122 }
4123
4124 /**
4125 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
4126 * @adapter: board private structure to initialize
4127 *
4128 * We determine which interrupt scheme to use based on...
4129 * - Kernel support (MSI, MSI-X)
4130 * - which can be user-defined (via MODULE_PARAM)
4131 * - Hardware queue count (num_*_queues)
4132 * - defined by miscellaneous hardware support/features (RSS, etc.)
4133 **/
4134 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
4135 {
4136 int err;
4137
4138 /* Number of supported queues */
4139 ixgbe_set_num_queues(adapter);
4140
4141 err = ixgbe_set_interrupt_capability(adapter);
4142 if (err) {
4143 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
4144 goto err_set_interrupt;
4145 }
4146
4147 err = ixgbe_alloc_q_vectors(adapter);
4148 if (err) {
4149 DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
4150 "vectors\n");
4151 goto err_alloc_q_vectors;
4152 }
4153
4154 err = ixgbe_alloc_queues(adapter);
4155 if (err) {
4156 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
4157 goto err_alloc_queues;
4158 }
4159
4160 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
4161 "Tx Queue count = %u\n",
4162 (adapter->num_rx_queues > 1) ? "Enabled" :
4163 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
4164
4165 set_bit(__IXGBE_DOWN, &adapter->state);
4166
4167 return 0;
4168
4169 err_alloc_queues:
4170 ixgbe_free_q_vectors(adapter);
4171 err_alloc_q_vectors:
4172 ixgbe_reset_interrupt_capability(adapter);
4173 err_set_interrupt:
4174 return err;
4175 }
4176
4177 /**
4178 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4179 * @adapter: board private structure to clear interrupt scheme on
4180 *
4181 * We go through and clear interrupt specific resources and reset the structure
4182 * to pre-load conditions
4183 **/
4184 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4185 {
4186 int i;
4187
4188 for (i = 0; i < adapter->num_tx_queues; i++) {
4189 kfree(adapter->tx_ring[i]);
4190 adapter->tx_ring[i] = NULL;
4191 }
4192 for (i = 0; i < adapter->num_rx_queues; i++) {
4193 kfree(adapter->rx_ring[i]);
4194 adapter->rx_ring[i] = NULL;
4195 }
4196
4197 ixgbe_free_q_vectors(adapter);
4198 ixgbe_reset_interrupt_capability(adapter);
4199 }
4200
4201 /**
4202 * ixgbe_sfp_timer - worker thread to find a missing module
4203 * @data: pointer to our adapter struct
4204 **/
4205 static void ixgbe_sfp_timer(unsigned long data)
4206 {
4207 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
4208
4209 /*
4210 * Do the sfp_timer outside of interrupt context due to the
4211 * delays that sfp+ detection requires
4212 */
4213 schedule_work(&adapter->sfp_task);
4214 }
4215
4216 /**
4217 * ixgbe_sfp_task - worker thread to find a missing module
4218 * @work: pointer to work_struct containing our data
4219 **/
4220 static void ixgbe_sfp_task(struct work_struct *work)
4221 {
4222 struct ixgbe_adapter *adapter = container_of(work,
4223 struct ixgbe_adapter,
4224 sfp_task);
4225 struct ixgbe_hw *hw = &adapter->hw;
4226
4227 if ((hw->phy.type == ixgbe_phy_nl) &&
4228 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4229 s32 ret = hw->phy.ops.identify_sfp(hw);
4230 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
4231 goto reschedule;
4232 ret = hw->phy.ops.reset(hw);
4233 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4234 dev_err(&adapter->pdev->dev, "failed to initialize "
4235 "because an unsupported SFP+ module type "
4236 "was detected.\n"
4237 "Reload the driver after installing a "
4238 "supported module.\n");
4239 unregister_netdev(adapter->netdev);
4240 } else {
4241 DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
4242 hw->phy.sfp_type);
4243 }
4244 /* don't need this routine any more */
4245 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4246 }
4247 return;
4248 reschedule:
4249 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
4250 mod_timer(&adapter->sfp_timer,
4251 round_jiffies(jiffies + (2 * HZ)));
4252 }
4253
4254 /**
4255 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
4256 * @adapter: board private structure to initialize
4257 *
4258 * ixgbe_sw_init initializes the Adapter private data structure.
4259 * Fields are initialized based on PCI device information and
4260 * OS network device settings (MTU size).
4261 **/
4262 static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4263 {
4264 struct ixgbe_hw *hw = &adapter->hw;
4265 struct pci_dev *pdev = adapter->pdev;
4266 struct net_device *dev = adapter->netdev;
4267 unsigned int rss;
4268 #ifdef CONFIG_IXGBE_DCB
4269 int j;
4270 struct tc_configuration *tc;
4271 #endif
4272
4273 /* PCI config space info */
4274
4275 hw->vendor_id = pdev->vendor;
4276 hw->device_id = pdev->device;
4277 hw->revision_id = pdev->revision;
4278 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4279 hw->subsystem_device_id = pdev->subsystem_device;
4280
4281 /* Set capability flags */
4282 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
4283 adapter->ring_feature[RING_F_RSS].indices = rss;
4284 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
4285 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
4286 if (hw->mac.type == ixgbe_mac_82598EB) {
4287 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4288 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4289 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
4290 } else if (hw->mac.type == ixgbe_mac_82599EB) {
4291 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
4292 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4293 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4294 if (dev->features & NETIF_F_NTUPLE) {
4295 /* Flow Director perfect filter enabled */
4296 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4297 adapter->atr_sample_rate = 0;
4298 spin_lock_init(&adapter->fdir_perfect_lock);
4299 } else {
4300 /* Flow Director hash filters enabled */
4301 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4302 adapter->atr_sample_rate = 20;
4303 }
4304 adapter->ring_feature[RING_F_FDIR].indices =
4305 IXGBE_MAX_FDIR_INDICES;
4306 adapter->fdir_pballoc = 0;
4307 #ifdef IXGBE_FCOE
4308 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
4309 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
4310 adapter->ring_feature[RING_F_FCOE].indices = 0;
4311 #ifdef CONFIG_IXGBE_DCB
4312 /* Default traffic class to use for FCoE */
4313 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
4314 #endif
4315 #endif /* IXGBE_FCOE */
4316 }
4317
4318 #ifdef CONFIG_IXGBE_DCB
4319 /* Configure DCB traffic classes */
4320 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
4321 tc = &adapter->dcb_cfg.tc_config[j];
4322 tc->path[DCB_TX_CONFIG].bwg_id = 0;
4323 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
4324 tc->path[DCB_RX_CONFIG].bwg_id = 0;
4325 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
4326 tc->dcb_pfc = pfc_disabled;
4327 }
4328 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
4329 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
4330 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
4331 adapter->dcb_cfg.pfc_mode_enable = false;
4332 adapter->dcb_cfg.round_robin_enable = false;
4333 adapter->dcb_set_bitmap = 0x00;
4334 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
4335 adapter->ring_feature[RING_F_DCB].indices);
4336
4337 #endif
4338
4339 /* default flow control settings */
4340 hw->fc.requested_mode = ixgbe_fc_full;
4341 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
4342 #ifdef CONFIG_DCB
4343 adapter->last_lfc_mode = hw->fc.current_mode;
4344 #endif
4345 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
4346 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
4347 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4348 hw->fc.send_xon = true;
4349 hw->fc.disable_fc_autoneg = false;
4350
4351 /* enable itr by default in dynamic mode */
4352 adapter->rx_itr_setting = 1;
4353 adapter->rx_eitr_param = 20000;
4354 adapter->tx_itr_setting = 1;
4355 adapter->tx_eitr_param = 10000;
4356
4357 /* set defaults for eitr in MegaBytes */
4358 adapter->eitr_low = 10;
4359 adapter->eitr_high = 20;
4360
4361 /* set default ring sizes */
4362 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
4363 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
4364
4365 /* initialize eeprom parameters */
4366 if (ixgbe_init_eeprom_params_generic(hw)) {
4367 dev_err(&pdev->dev, "EEPROM initialization failed\n");
4368 return -EIO;
4369 }
4370
4371 /* enable rx csum by default */
4372 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
4373
4374 /* get assigned NUMA node */
4375 adapter->node = dev_to_node(&pdev->dev);
4376
4377 set_bit(__IXGBE_DOWN, &adapter->state);
4378
4379 return 0;
4380 }
4381
4382 /**
4383 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
4384 * @adapter: board private structure
4385 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4386 *
4387 * Return 0 on success, negative on failure
4388 **/
4389 int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4390 struct ixgbe_ring *tx_ring)
4391 {
4392 struct pci_dev *pdev = adapter->pdev;
4393 int size;
4394
4395 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4396 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
4397 if (!tx_ring->tx_buffer_info)
4398 tx_ring->tx_buffer_info = vmalloc(size);
4399 if (!tx_ring->tx_buffer_info)
4400 goto err;
4401 memset(tx_ring->tx_buffer_info, 0, size);
4402
4403 /* round up to nearest 4K */
4404 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4405 tx_ring->size = ALIGN(tx_ring->size, 4096);
4406
4407 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
4408 &tx_ring->dma);
4409 if (!tx_ring->desc)
4410 goto err;
4411
4412 tx_ring->next_to_use = 0;
4413 tx_ring->next_to_clean = 0;
4414 tx_ring->work_limit = tx_ring->count;
4415 return 0;
4416
4417 err:
4418 vfree(tx_ring->tx_buffer_info);
4419 tx_ring->tx_buffer_info = NULL;
4420 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
4421 "descriptor ring\n");
4422 return -ENOMEM;
4423 }
4424
4425 /**
4426 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
4427 * @adapter: board private structure
4428 *
4429 * If this function returns with an error, then it's possible one or
4430 * more of the rings is populated (while the rest are not). It is the
4431 * callers duty to clean those orphaned rings.
4432 *
4433 * Return 0 on success, negative on failure
4434 **/
4435 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4436 {
4437 int i, err = 0;
4438
4439 for (i = 0; i < adapter->num_tx_queues; i++) {
4440 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
4441 if (!err)
4442 continue;
4443 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
4444 break;
4445 }
4446
4447 return err;
4448 }
4449
4450 /**
4451 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
4452 * @adapter: board private structure
4453 * @rx_ring: rx descriptor ring (for a specific queue) to setup
4454 *
4455 * Returns 0 on success, negative on failure
4456 **/
4457 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4458 struct ixgbe_ring *rx_ring)
4459 {
4460 struct pci_dev *pdev = adapter->pdev;
4461 int size;
4462
4463 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4464 rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
4465 if (!rx_ring->rx_buffer_info)
4466 rx_ring->rx_buffer_info = vmalloc(size);
4467 if (!rx_ring->rx_buffer_info) {
4468 DPRINTK(PROBE, ERR,
4469 "vmalloc allocation failed for the rx desc ring\n");
4470 goto alloc_failed;
4471 }
4472 memset(rx_ring->rx_buffer_info, 0, size);
4473
4474 /* Round up to nearest 4K */
4475 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4476 rx_ring->size = ALIGN(rx_ring->size, 4096);
4477
4478 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
4479
4480 if (!rx_ring->desc) {
4481 DPRINTK(PROBE, ERR,
4482 "Memory allocation failed for the rx desc ring\n");
4483 vfree(rx_ring->rx_buffer_info);
4484 goto alloc_failed;
4485 }
4486
4487 rx_ring->next_to_clean = 0;
4488 rx_ring->next_to_use = 0;
4489
4490 return 0;
4491
4492 alloc_failed:
4493 return -ENOMEM;
4494 }
4495
4496 /**
4497 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
4498 * @adapter: board private structure
4499 *
4500 * If this function returns with an error, then it's possible one or
4501 * more of the rings is populated (while the rest are not). It is the
4502 * callers duty to clean those orphaned rings.
4503 *
4504 * Return 0 on success, negative on failure
4505 **/
4506
4507 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4508 {
4509 int i, err = 0;
4510
4511 for (i = 0; i < adapter->num_rx_queues; i++) {
4512 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
4513 if (!err)
4514 continue;
4515 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
4516 break;
4517 }
4518
4519 return err;
4520 }
4521
4522 /**
4523 * ixgbe_free_tx_resources - Free Tx Resources per Queue
4524 * @adapter: board private structure
4525 * @tx_ring: Tx descriptor ring for a specific queue
4526 *
4527 * Free all transmit software resources
4528 **/
4529 void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
4530 struct ixgbe_ring *tx_ring)
4531 {
4532 struct pci_dev *pdev = adapter->pdev;
4533
4534 ixgbe_clean_tx_ring(adapter, tx_ring);
4535
4536 vfree(tx_ring->tx_buffer_info);
4537 tx_ring->tx_buffer_info = NULL;
4538
4539 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
4540
4541 tx_ring->desc = NULL;
4542 }
4543
4544 /**
4545 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
4546 * @adapter: board private structure
4547 *
4548 * Free all transmit software resources
4549 **/
4550 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
4551 {
4552 int i;
4553
4554 for (i = 0; i < adapter->num_tx_queues; i++)
4555 if (adapter->tx_ring[i]->desc)
4556 ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
4557 }
4558
4559 /**
4560 * ixgbe_free_rx_resources - Free Rx Resources
4561 * @adapter: board private structure
4562 * @rx_ring: ring to clean the resources from
4563 *
4564 * Free all receive software resources
4565 **/
4566 void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
4567 struct ixgbe_ring *rx_ring)
4568 {
4569 struct pci_dev *pdev = adapter->pdev;
4570
4571 ixgbe_clean_rx_ring(adapter, rx_ring);
4572
4573 vfree(rx_ring->rx_buffer_info);
4574 rx_ring->rx_buffer_info = NULL;
4575
4576 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
4577
4578 rx_ring->desc = NULL;
4579 }
4580
4581 /**
4582 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
4583 * @adapter: board private structure
4584 *
4585 * Free all receive software resources
4586 **/
4587 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4588 {
4589 int i;
4590
4591 for (i = 0; i < adapter->num_rx_queues; i++)
4592 if (adapter->rx_ring[i]->desc)
4593 ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
4594 }
4595
4596 /**
4597 * ixgbe_change_mtu - Change the Maximum Transfer Unit
4598 * @netdev: network interface device structure
4599 * @new_mtu: new value for maximum frame size
4600 *
4601 * Returns 0 on success, negative on failure
4602 **/
4603 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
4604 {
4605 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4606 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4607
4608 /* MTU < 68 is an error and causes problems on some kernels */
4609 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
4610 return -EINVAL;
4611
4612 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
4613 netdev->mtu, new_mtu);
4614 /* must set new MTU before calling down or up */
4615 netdev->mtu = new_mtu;
4616
4617 if (netif_running(netdev))
4618 ixgbe_reinit_locked(adapter);
4619
4620 return 0;
4621 }
4622
4623 /**
4624 * ixgbe_open - Called when a network interface is made active
4625 * @netdev: network interface device structure
4626 *
4627 * Returns 0 on success, negative value on failure
4628 *
4629 * The open entry point is called when a network interface is made
4630 * active by the system (IFF_UP). At this point all resources needed
4631 * for transmit and receive operations are allocated, the interrupt
4632 * handler is registered with the OS, the watchdog timer is started,
4633 * and the stack is notified that the interface is ready.
4634 **/
4635 static int ixgbe_open(struct net_device *netdev)
4636 {
4637 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4638 int err;
4639
4640 /* disallow open during test */
4641 if (test_bit(__IXGBE_TESTING, &adapter->state))
4642 return -EBUSY;
4643
4644 netif_carrier_off(netdev);
4645
4646 /* allocate transmit descriptors */
4647 err = ixgbe_setup_all_tx_resources(adapter);
4648 if (err)
4649 goto err_setup_tx;
4650
4651 /* allocate receive descriptors */
4652 err = ixgbe_setup_all_rx_resources(adapter);
4653 if (err)
4654 goto err_setup_rx;
4655
4656 ixgbe_configure(adapter);
4657
4658 err = ixgbe_request_irq(adapter);
4659 if (err)
4660 goto err_req_irq;
4661
4662 err = ixgbe_up_complete(adapter);
4663 if (err)
4664 goto err_up;
4665
4666 netif_tx_start_all_queues(netdev);
4667
4668 return 0;
4669
4670 err_up:
4671 ixgbe_release_hw_control(adapter);
4672 ixgbe_free_irq(adapter);
4673 err_req_irq:
4674 err_setup_rx:
4675 ixgbe_free_all_rx_resources(adapter);
4676 err_setup_tx:
4677 ixgbe_free_all_tx_resources(adapter);
4678 ixgbe_reset(adapter);
4679
4680 return err;
4681 }
4682
4683 /**
4684 * ixgbe_close - Disables a network interface
4685 * @netdev: network interface device structure
4686 *
4687 * Returns 0, this is not allowed to fail
4688 *
4689 * The close entry point is called when an interface is de-activated
4690 * by the OS. The hardware is still under the drivers control, but
4691 * needs to be disabled. A global MAC reset is issued to stop the
4692 * hardware, and all transmit and receive resources are freed.
4693 **/
4694 static int ixgbe_close(struct net_device *netdev)
4695 {
4696 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4697
4698 ixgbe_down(adapter);
4699 ixgbe_free_irq(adapter);
4700
4701 ixgbe_free_all_tx_resources(adapter);
4702 ixgbe_free_all_rx_resources(adapter);
4703
4704 ixgbe_release_hw_control(adapter);
4705
4706 return 0;
4707 }
4708
4709 #ifdef CONFIG_PM
4710 static int ixgbe_resume(struct pci_dev *pdev)
4711 {
4712 struct net_device *netdev = pci_get_drvdata(pdev);
4713 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4714 u32 err;
4715
4716 pci_set_power_state(pdev, PCI_D0);
4717 pci_restore_state(pdev);
4718 /*
4719 * pci_restore_state clears dev->state_saved so call
4720 * pci_save_state to restore it.
4721 */
4722 pci_save_state(pdev);
4723
4724 err = pci_enable_device_mem(pdev);
4725 if (err) {
4726 printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
4727 "suspend\n");
4728 return err;
4729 }
4730 pci_set_master(pdev);
4731
4732 pci_wake_from_d3(pdev, false);
4733
4734 err = ixgbe_init_interrupt_scheme(adapter);
4735 if (err) {
4736 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
4737 "device\n");
4738 return err;
4739 }
4740
4741 ixgbe_reset(adapter);
4742
4743 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
4744
4745 if (netif_running(netdev)) {
4746 err = ixgbe_open(adapter->netdev);
4747 if (err)
4748 return err;
4749 }
4750
4751 netif_device_attach(netdev);
4752
4753 return 0;
4754 }
4755 #endif /* CONFIG_PM */
4756
4757 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
4758 {
4759 struct net_device *netdev = pci_get_drvdata(pdev);
4760 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4761 struct ixgbe_hw *hw = &adapter->hw;
4762 u32 ctrl, fctrl;
4763 u32 wufc = adapter->wol;
4764 #ifdef CONFIG_PM
4765 int retval = 0;
4766 #endif
4767
4768 netif_device_detach(netdev);
4769
4770 if (netif_running(netdev)) {
4771 ixgbe_down(adapter);
4772 ixgbe_free_irq(adapter);
4773 ixgbe_free_all_tx_resources(adapter);
4774 ixgbe_free_all_rx_resources(adapter);
4775 }
4776 ixgbe_clear_interrupt_scheme(adapter);
4777
4778 #ifdef CONFIG_PM
4779 retval = pci_save_state(pdev);
4780 if (retval)
4781 return retval;
4782
4783 #endif
4784 if (wufc) {
4785 ixgbe_set_rx_mode(netdev);
4786
4787 /* turn on all-multi mode if wake on multicast is enabled */
4788 if (wufc & IXGBE_WUFC_MC) {
4789 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4790 fctrl |= IXGBE_FCTRL_MPE;
4791 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4792 }
4793
4794 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
4795 ctrl |= IXGBE_CTRL_GIO_DIS;
4796 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
4797
4798 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
4799 } else {
4800 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
4801 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
4802 }
4803
4804 if (wufc && hw->mac.type == ixgbe_mac_82599EB)
4805 pci_wake_from_d3(pdev, true);
4806 else
4807 pci_wake_from_d3(pdev, false);
4808
4809 *enable_wake = !!wufc;
4810
4811 ixgbe_release_hw_control(adapter);
4812
4813 pci_disable_device(pdev);
4814
4815 return 0;
4816 }
4817
4818 #ifdef CONFIG_PM
4819 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
4820 {
4821 int retval;
4822 bool wake;
4823
4824 retval = __ixgbe_shutdown(pdev, &wake);
4825 if (retval)
4826 return retval;
4827
4828 if (wake) {
4829 pci_prepare_to_sleep(pdev);
4830 } else {
4831 pci_wake_from_d3(pdev, false);
4832 pci_set_power_state(pdev, PCI_D3hot);
4833 }
4834
4835 return 0;
4836 }
4837 #endif /* CONFIG_PM */
4838
4839 static void ixgbe_shutdown(struct pci_dev *pdev)
4840 {
4841 bool wake;
4842
4843 __ixgbe_shutdown(pdev, &wake);
4844
4845 if (system_state == SYSTEM_POWER_OFF) {
4846 pci_wake_from_d3(pdev, wake);
4847 pci_set_power_state(pdev, PCI_D3hot);
4848 }
4849 }
4850
4851 /**
4852 * ixgbe_update_stats - Update the board statistics counters.
4853 * @adapter: board private structure
4854 **/
4855 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4856 {
4857 struct net_device *netdev = adapter->netdev;
4858 struct ixgbe_hw *hw = &adapter->hw;
4859 u64 total_mpc = 0;
4860 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
4861 u64 non_eop_descs = 0, restart_queue = 0;
4862
4863 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
4864 u64 rsc_count = 0;
4865 u64 rsc_flush = 0;
4866 for (i = 0; i < 16; i++)
4867 adapter->hw_rx_no_dma_resources +=
4868 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4869 for (i = 0; i < adapter->num_rx_queues; i++) {
4870 rsc_count += adapter->rx_ring[i]->rsc_count;
4871 rsc_flush += adapter->rx_ring[i]->rsc_flush;
4872 }
4873 adapter->rsc_total_count = rsc_count;
4874 adapter->rsc_total_flush = rsc_flush;
4875 }
4876
4877 /* gather some stats to the adapter struct that are per queue */
4878 for (i = 0; i < adapter->num_tx_queues; i++)
4879 restart_queue += adapter->tx_ring[i]->restart_queue;
4880 adapter->restart_queue = restart_queue;
4881
4882 for (i = 0; i < adapter->num_rx_queues; i++)
4883 non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
4884 adapter->non_eop_descs = non_eop_descs;
4885
4886 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4887 for (i = 0; i < 8; i++) {
4888 /* for packet buffers not used, the register should read 0 */
4889 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4890 missed_rx += mpc;
4891 adapter->stats.mpc[i] += mpc;
4892 total_mpc += adapter->stats.mpc[i];
4893 if (hw->mac.type == ixgbe_mac_82598EB)
4894 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
4895 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4896 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
4897 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4898 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
4899 if (hw->mac.type == ixgbe_mac_82599EB) {
4900 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4901 IXGBE_PXONRXCNT(i));
4902 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4903 IXGBE_PXOFFRXCNT(i));
4904 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4905 } else {
4906 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4907 IXGBE_PXONRXC(i));
4908 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4909 IXGBE_PXOFFRXC(i));
4910 }
4911 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
4912 IXGBE_PXONTXC(i));
4913 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
4914 IXGBE_PXOFFTXC(i));
4915 }
4916 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4917 /* work around hardware counting issue */
4918 adapter->stats.gprc -= missed_rx;
4919
4920 /* 82598 hardware only has a 32 bit counter in the high register */
4921 if (hw->mac.type == ixgbe_mac_82599EB) {
4922 u64 tmp;
4923 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
4924 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
4925 adapter->stats.gorc += (tmp << 32);
4926 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
4927 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
4928 adapter->stats.gotc += (tmp << 32);
4929 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4930 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4931 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4932 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4933 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
4934 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
4935 #ifdef IXGBE_FCOE
4936 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4937 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4938 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4939 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4940 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4941 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4942 #endif /* IXGBE_FCOE */
4943 } else {
4944 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4945 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4946 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4947 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4948 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4949 }
4950 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4951 adapter->stats.bprc += bprc;
4952 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4953 if (hw->mac.type == ixgbe_mac_82598EB)
4954 adapter->stats.mprc -= bprc;
4955 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4956 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4957 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4958 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4959 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4960 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4961 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4962 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4963 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4964 adapter->stats.lxontxc += lxon;
4965 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4966 adapter->stats.lxofftxc += lxoff;
4967 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4968 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4969 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4970 /*
4971 * 82598 errata - tx of flow control packets is included in tx counters
4972 */
4973 xon_off_tot = lxon + lxoff;
4974 adapter->stats.gptc -= xon_off_tot;
4975 adapter->stats.mptc -= xon_off_tot;
4976 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
4977 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4978 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4979 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4980 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4981 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4982 adapter->stats.ptc64 -= xon_off_tot;
4983 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4984 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4985 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4986 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4987 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4988 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4989
4990 /* Fill out the OS statistics structure */
4991 netdev->stats.multicast = adapter->stats.mprc;
4992
4993 /* Rx Errors */
4994 netdev->stats.rx_errors = adapter->stats.crcerrs +
4995 adapter->stats.rlec;
4996 netdev->stats.rx_dropped = 0;
4997 netdev->stats.rx_length_errors = adapter->stats.rlec;
4998 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4999 netdev->stats.rx_missed_errors = total_mpc;
5000 }
5001
5002 /**
5003 * ixgbe_watchdog - Timer Call-back
5004 * @data: pointer to adapter cast into an unsigned long
5005 **/
5006 static void ixgbe_watchdog(unsigned long data)
5007 {
5008 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
5009 struct ixgbe_hw *hw = &adapter->hw;
5010 u64 eics = 0;
5011 int i;
5012
5013 /*
5014 * Do the watchdog outside of interrupt context due to the lovely
5015 * delays that some of the newer hardware requires
5016 */
5017
5018 if (test_bit(__IXGBE_DOWN, &adapter->state))
5019 goto watchdog_short_circuit;
5020
5021 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5022 /*
5023 * for legacy and MSI interrupts don't set any bits
5024 * that are enabled for EIAM, because this operation
5025 * would set *both* EIMS and EICS for any bit in EIAM
5026 */
5027 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5028 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
5029 goto watchdog_reschedule;
5030 }
5031
5032 /* get one bit for every active tx/rx interrupt vector */
5033 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5034 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5035 if (qv->rxr_count || qv->txr_count)
5036 eics |= ((u64)1 << i);
5037 }
5038
5039 /* Cause software interrupt to ensure rx rings are cleaned */
5040 ixgbe_irq_rearm_queues(adapter, eics);
5041
5042 watchdog_reschedule:
5043 /* Reset the timer */
5044 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
5045
5046 watchdog_short_circuit:
5047 schedule_work(&adapter->watchdog_task);
5048 }
5049
5050 /**
5051 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
5052 * @work: pointer to work_struct containing our data
5053 **/
5054 static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5055 {
5056 struct ixgbe_adapter *adapter = container_of(work,
5057 struct ixgbe_adapter,
5058 multispeed_fiber_task);
5059 struct ixgbe_hw *hw = &adapter->hw;
5060 u32 autoneg;
5061 bool negotiation;
5062
5063 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
5064 autoneg = hw->phy.autoneg_advertised;
5065 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
5066 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
5067 hw->mac.autotry_restart = false;
5068 if (hw->mac.ops.setup_link)
5069 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
5070 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
5071 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
5072 }
5073
5074 /**
5075 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
5076 * @work: pointer to work_struct containing our data
5077 **/
5078 static void ixgbe_sfp_config_module_task(struct work_struct *work)
5079 {
5080 struct ixgbe_adapter *adapter = container_of(work,
5081 struct ixgbe_adapter,
5082 sfp_config_module_task);
5083 struct ixgbe_hw *hw = &adapter->hw;
5084 u32 err;
5085
5086 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
5087
5088 /* Time for electrical oscillations to settle down */
5089 msleep(100);
5090 err = hw->phy.ops.identify_sfp(hw);
5091
5092 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
5093 dev_err(&adapter->pdev->dev, "failed to initialize because "
5094 "an unsupported SFP+ module type was detected.\n"
5095 "Reload the driver after installing a supported "
5096 "module.\n");
5097 unregister_netdev(adapter->netdev);
5098 return;
5099 }
5100 hw->mac.ops.setup_sfp(hw);
5101
5102 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
5103 /* This will also work for DA Twinax connections */
5104 schedule_work(&adapter->multispeed_fiber_task);
5105 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
5106 }
5107
5108 /**
5109 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
5110 * @work: pointer to work_struct containing our data
5111 **/
5112 static void ixgbe_fdir_reinit_task(struct work_struct *work)
5113 {
5114 struct ixgbe_adapter *adapter = container_of(work,
5115 struct ixgbe_adapter,
5116 fdir_reinit_task);
5117 struct ixgbe_hw *hw = &adapter->hw;
5118 int i;
5119
5120 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5121 for (i = 0; i < adapter->num_tx_queues; i++)
5122 set_bit(__IXGBE_FDIR_INIT_DONE,
5123 &(adapter->tx_ring[i]->reinit_state));
5124 } else {
5125 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
5126 "ignored adding FDIR ATR filters\n");
5127 }
5128 /* Done FDIR Re-initialization, enable transmits */
5129 netif_tx_start_all_queues(adapter->netdev);
5130 }
5131
5132 static DEFINE_MUTEX(ixgbe_watchdog_lock);
5133
5134 /**
5135 * ixgbe_watchdog_task - worker thread to bring link up
5136 * @work: pointer to work_struct containing our data
5137 **/
5138 static void ixgbe_watchdog_task(struct work_struct *work)
5139 {
5140 struct ixgbe_adapter *adapter = container_of(work,
5141 struct ixgbe_adapter,
5142 watchdog_task);
5143 struct net_device *netdev = adapter->netdev;
5144 struct ixgbe_hw *hw = &adapter->hw;
5145 u32 link_speed;
5146 bool link_up;
5147 int i;
5148 struct ixgbe_ring *tx_ring;
5149 int some_tx_pending = 0;
5150
5151 mutex_lock(&ixgbe_watchdog_lock);
5152
5153 link_up = adapter->link_up;
5154 link_speed = adapter->link_speed;
5155
5156 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
5157 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
5158 if (link_up) {
5159 #ifdef CONFIG_DCB
5160 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5161 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
5162 hw->mac.ops.fc_enable(hw, i);
5163 } else {
5164 hw->mac.ops.fc_enable(hw, 0);
5165 }
5166 #else
5167 hw->mac.ops.fc_enable(hw, 0);
5168 #endif
5169 }
5170
5171 if (link_up ||
5172 time_after(jiffies, (adapter->link_check_timeout +
5173 IXGBE_TRY_LINK_TIMEOUT))) {
5174 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5175 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5176 }
5177 adapter->link_up = link_up;
5178 adapter->link_speed = link_speed;
5179 }
5180
5181 if (link_up) {
5182 if (!netif_carrier_ok(netdev)) {
5183 bool flow_rx, flow_tx;
5184
5185 if (hw->mac.type == ixgbe_mac_82599EB) {
5186 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5187 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5188 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5189 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5190 } else {
5191 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5192 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5193 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5194 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5195 }
5196
5197 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
5198 "Flow Control: %s\n",
5199 netdev->name,
5200 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
5201 "10 Gbps" :
5202 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
5203 "1 Gbps" : "unknown speed")),
5204 ((flow_rx && flow_tx) ? "RX/TX" :
5205 (flow_rx ? "RX" :
5206 (flow_tx ? "TX" : "None"))));
5207
5208 netif_carrier_on(netdev);
5209 } else {
5210 /* Force detection of hung controller */
5211 adapter->detect_tx_hung = true;
5212 }
5213 } else {
5214 adapter->link_up = false;
5215 adapter->link_speed = 0;
5216 if (netif_carrier_ok(netdev)) {
5217 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
5218 netdev->name);
5219 netif_carrier_off(netdev);
5220 }
5221 }
5222
5223 if (!netif_carrier_ok(netdev)) {
5224 for (i = 0; i < adapter->num_tx_queues; i++) {
5225 tx_ring = adapter->tx_ring[i];
5226 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
5227 some_tx_pending = 1;
5228 break;
5229 }
5230 }
5231
5232 if (some_tx_pending) {
5233 /* We've lost link, so the controller stops DMA,
5234 * but we've got queued Tx work that's never going
5235 * to get done, so reset controller to flush Tx.
5236 * (Do the reset outside of interrupt context).
5237 */
5238 schedule_work(&adapter->reset_task);
5239 }
5240 }
5241
5242 ixgbe_update_stats(adapter);
5243 mutex_unlock(&ixgbe_watchdog_lock);
5244 }
5245
5246 static int ixgbe_tso(struct ixgbe_adapter *adapter,
5247 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5248 u32 tx_flags, u8 *hdr_len)
5249 {
5250 struct ixgbe_adv_tx_context_desc *context_desc;
5251 unsigned int i;
5252 int err;
5253 struct ixgbe_tx_buffer *tx_buffer_info;
5254 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
5255 u32 mss_l4len_idx, l4len;
5256
5257 if (skb_is_gso(skb)) {
5258 if (skb_header_cloned(skb)) {
5259 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5260 if (err)
5261 return err;
5262 }
5263 l4len = tcp_hdrlen(skb);
5264 *hdr_len += l4len;
5265
5266 if (skb->protocol == htons(ETH_P_IP)) {
5267 struct iphdr *iph = ip_hdr(skb);
5268 iph->tot_len = 0;
5269 iph->check = 0;
5270 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5271 iph->daddr, 0,
5272 IPPROTO_TCP,
5273 0);
5274 } else if (skb_is_gso_v6(skb)) {
5275 ipv6_hdr(skb)->payload_len = 0;
5276 tcp_hdr(skb)->check =
5277 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5278 &ipv6_hdr(skb)->daddr,
5279 0, IPPROTO_TCP, 0);
5280 }
5281
5282 i = tx_ring->next_to_use;
5283
5284 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5285 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
5286
5287 /* VLAN MACLEN IPLEN */
5288 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5289 vlan_macip_lens |=
5290 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5291 vlan_macip_lens |= ((skb_network_offset(skb)) <<
5292 IXGBE_ADVTXD_MACLEN_SHIFT);
5293 *hdr_len += skb_network_offset(skb);
5294 vlan_macip_lens |=
5295 (skb_transport_header(skb) - skb_network_header(skb));
5296 *hdr_len +=
5297 (skb_transport_header(skb) - skb_network_header(skb));
5298 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5299 context_desc->seqnum_seed = 0;
5300
5301 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5302 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
5303 IXGBE_ADVTXD_DTYP_CTXT);
5304
5305 if (skb->protocol == htons(ETH_P_IP))
5306 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5307 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5308 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5309
5310 /* MSS L4LEN IDX */
5311 mss_l4len_idx =
5312 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
5313 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
5314 /* use index 1 for TSO */
5315 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
5316 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5317
5318 tx_buffer_info->time_stamp = jiffies;
5319 tx_buffer_info->next_to_watch = i;
5320
5321 i++;
5322 if (i == tx_ring->count)
5323 i = 0;
5324 tx_ring->next_to_use = i;
5325
5326 return true;
5327 }
5328 return false;
5329 }
5330
5331 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5332 struct ixgbe_ring *tx_ring,
5333 struct sk_buff *skb, u32 tx_flags)
5334 {
5335 struct ixgbe_adv_tx_context_desc *context_desc;
5336 unsigned int i;
5337 struct ixgbe_tx_buffer *tx_buffer_info;
5338 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
5339
5340 if (skb->ip_summed == CHECKSUM_PARTIAL ||
5341 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
5342 i = tx_ring->next_to_use;
5343 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5344 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
5345
5346 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5347 vlan_macip_lens |=
5348 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5349 vlan_macip_lens |= (skb_network_offset(skb) <<
5350 IXGBE_ADVTXD_MACLEN_SHIFT);
5351 if (skb->ip_summed == CHECKSUM_PARTIAL)
5352 vlan_macip_lens |= (skb_transport_header(skb) -
5353 skb_network_header(skb));
5354
5355 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5356 context_desc->seqnum_seed = 0;
5357
5358 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
5359 IXGBE_ADVTXD_DTYP_CTXT);
5360
5361 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5362 __be16 protocol;
5363
5364 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
5365 const struct vlan_ethhdr *vhdr =
5366 (const struct vlan_ethhdr *)skb->data;
5367
5368 protocol = vhdr->h_vlan_encapsulated_proto;
5369 } else {
5370 protocol = skb->protocol;
5371 }
5372
5373 switch (protocol) {
5374 case cpu_to_be16(ETH_P_IP):
5375 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5376 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5377 type_tucmd_mlhl |=
5378 IXGBE_ADVTXD_TUCMD_L4T_TCP;
5379 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
5380 type_tucmd_mlhl |=
5381 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5382 break;
5383 case cpu_to_be16(ETH_P_IPV6):
5384 /* XXX what about other V6 headers?? */
5385 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5386 type_tucmd_mlhl |=
5387 IXGBE_ADVTXD_TUCMD_L4T_TCP;
5388 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
5389 type_tucmd_mlhl |=
5390 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5391 break;
5392 default:
5393 if (unlikely(net_ratelimit())) {
5394 DPRINTK(PROBE, WARNING,
5395 "partial checksum but proto=%x!\n",
5396 skb->protocol);
5397 }
5398 break;
5399 }
5400 }
5401
5402 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5403 /* use index zero for tx checksum offload */
5404 context_desc->mss_l4len_idx = 0;
5405
5406 tx_buffer_info->time_stamp = jiffies;
5407 tx_buffer_info->next_to_watch = i;
5408
5409 i++;
5410 if (i == tx_ring->count)
5411 i = 0;
5412 tx_ring->next_to_use = i;
5413
5414 return true;
5415 }
5416
5417 return false;
5418 }
5419
5420 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5421 struct ixgbe_ring *tx_ring,
5422 struct sk_buff *skb, u32 tx_flags,
5423 unsigned int first)
5424 {
5425 struct pci_dev *pdev = adapter->pdev;
5426 struct ixgbe_tx_buffer *tx_buffer_info;
5427 unsigned int len;
5428 unsigned int total = skb->len;
5429 unsigned int offset = 0, size, count = 0, i;
5430 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
5431 unsigned int f;
5432
5433 i = tx_ring->next_to_use;
5434
5435 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
5436 /* excluding fcoe_crc_eof for FCoE */
5437 total -= sizeof(struct fcoe_crc_eof);
5438
5439 len = min(skb_headlen(skb), total);
5440 while (len) {
5441 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5442 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5443
5444 tx_buffer_info->length = size;
5445 tx_buffer_info->mapped_as_page = false;
5446 tx_buffer_info->dma = pci_map_single(pdev,
5447 skb->data + offset,
5448 size, PCI_DMA_TODEVICE);
5449 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
5450 goto dma_error;
5451 tx_buffer_info->time_stamp = jiffies;
5452 tx_buffer_info->next_to_watch = i;
5453
5454 len -= size;
5455 total -= size;
5456 offset += size;
5457 count++;
5458
5459 if (len) {
5460 i++;
5461 if (i == tx_ring->count)
5462 i = 0;
5463 }
5464 }
5465
5466 for (f = 0; f < nr_frags; f++) {
5467 struct skb_frag_struct *frag;
5468
5469 frag = &skb_shinfo(skb)->frags[f];
5470 len = min((unsigned int)frag->size, total);
5471 offset = frag->page_offset;
5472
5473 while (len) {
5474 i++;
5475 if (i == tx_ring->count)
5476 i = 0;
5477
5478 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5479 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5480
5481 tx_buffer_info->length = size;
5482 tx_buffer_info->dma = pci_map_page(adapter->pdev,
5483 frag->page,
5484 offset, size,
5485 PCI_DMA_TODEVICE);
5486 tx_buffer_info->mapped_as_page = true;
5487 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
5488 goto dma_error;
5489 tx_buffer_info->time_stamp = jiffies;
5490 tx_buffer_info->next_to_watch = i;
5491
5492 len -= size;
5493 total -= size;
5494 offset += size;
5495 count++;
5496 }
5497 if (total == 0)
5498 break;
5499 }
5500
5501 tx_ring->tx_buffer_info[i].skb = skb;
5502 tx_ring->tx_buffer_info[first].next_to_watch = i;
5503
5504 return count;
5505
5506 dma_error:
5507 dev_err(&pdev->dev, "TX DMA map failed\n");
5508
5509 /* clear timestamp and dma mappings for failed tx_buffer_info map */
5510 tx_buffer_info->dma = 0;
5511 tx_buffer_info->time_stamp = 0;
5512 tx_buffer_info->next_to_watch = 0;
5513 if (count)
5514 count--;
5515
5516 /* clear timestamp and dma mappings for remaining portion of packet */
5517 while (count--) {
5518 if (i==0)
5519 i += tx_ring->count;
5520 i--;
5521 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5522 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
5523 }
5524
5525 return 0;
5526 }
5527
5528 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
5529 struct ixgbe_ring *tx_ring,
5530 int tx_flags, int count, u32 paylen, u8 hdr_len)
5531 {
5532 union ixgbe_adv_tx_desc *tx_desc = NULL;
5533 struct ixgbe_tx_buffer *tx_buffer_info;
5534 u32 olinfo_status = 0, cmd_type_len = 0;
5535 unsigned int i;
5536 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
5537
5538 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
5539
5540 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
5541
5542 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5543 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
5544
5545 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
5546 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
5547
5548 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
5549 IXGBE_ADVTXD_POPTS_SHIFT;
5550
5551 /* use index 1 context for tso */
5552 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
5553 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
5554 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
5555 IXGBE_ADVTXD_POPTS_SHIFT;
5556
5557 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
5558 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
5559 IXGBE_ADVTXD_POPTS_SHIFT;
5560
5561 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
5562 olinfo_status |= IXGBE_ADVTXD_CC;
5563 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
5564 if (tx_flags & IXGBE_TX_FLAGS_FSO)
5565 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
5566 }
5567
5568 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
5569
5570 i = tx_ring->next_to_use;
5571 while (count--) {
5572 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5573 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
5574 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
5575 tx_desc->read.cmd_type_len =
5576 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
5577 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
5578 i++;
5579 if (i == tx_ring->count)
5580 i = 0;
5581 }
5582
5583 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
5584
5585 /*
5586 * Force memory writes to complete before letting h/w
5587 * know there are new descriptors to fetch. (Only
5588 * applicable for weak-ordered memory model archs,
5589 * such as IA-64).
5590 */
5591 wmb();
5592
5593 tx_ring->next_to_use = i;
5594 writel(i, adapter->hw.hw_addr + tx_ring->tail);
5595 }
5596
5597 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5598 int queue, u32 tx_flags)
5599 {
5600 /* Right now, we support IPv4 only */
5601 struct ixgbe_atr_input atr_input;
5602 struct tcphdr *th;
5603 struct iphdr *iph = ip_hdr(skb);
5604 struct ethhdr *eth = (struct ethhdr *)skb->data;
5605 u16 vlan_id, src_port, dst_port, flex_bytes;
5606 u32 src_ipv4_addr, dst_ipv4_addr;
5607 u8 l4type = 0;
5608
5609 /* check if we're UDP or TCP */
5610 if (iph->protocol == IPPROTO_TCP) {
5611 th = tcp_hdr(skb);
5612 src_port = th->source;
5613 dst_port = th->dest;
5614 l4type |= IXGBE_ATR_L4TYPE_TCP;
5615 /* l4type IPv4 type is 0, no need to assign */
5616 } else {
5617 /* Unsupported L4 header, just bail here */
5618 return;
5619 }
5620
5621 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
5622
5623 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
5624 IXGBE_TX_FLAGS_VLAN_SHIFT;
5625 src_ipv4_addr = iph->saddr;
5626 dst_ipv4_addr = iph->daddr;
5627 flex_bytes = eth->h_proto;
5628
5629 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
5630 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
5631 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
5632 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
5633 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
5634 /* src and dst are inverted, think how the receiver sees them */
5635 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
5636 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
5637
5638 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
5639 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
5640 }
5641
5642 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
5643 struct ixgbe_ring *tx_ring, int size)
5644 {
5645 netif_stop_subqueue(netdev, tx_ring->queue_index);
5646 /* Herbert's original patch had:
5647 * smp_mb__after_netif_stop_queue();
5648 * but since that doesn't exist yet, just open code it. */
5649 smp_mb();
5650
5651 /* We need to check again in a case another CPU has just
5652 * made room available. */
5653 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
5654 return -EBUSY;
5655
5656 /* A reprieve! - use start_queue because it doesn't call schedule */
5657 netif_start_subqueue(netdev, tx_ring->queue_index);
5658 ++tx_ring->restart_queue;
5659 return 0;
5660 }
5661
5662 static int ixgbe_maybe_stop_tx(struct net_device *netdev,
5663 struct ixgbe_ring *tx_ring, int size)
5664 {
5665 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
5666 return 0;
5667 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
5668 }
5669
5670 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5671 {
5672 struct ixgbe_adapter *adapter = netdev_priv(dev);
5673 int txq = smp_processor_id();
5674
5675 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5676 while (unlikely(txq >= dev->real_num_tx_queues))
5677 txq -= dev->real_num_tx_queues;
5678 return txq;
5679 }
5680
5681 #ifdef IXGBE_FCOE
5682 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5683 ((skb->protocol == htons(ETH_P_FCOE)) ||
5684 (skb->protocol == htons(ETH_P_FIP)))) {
5685 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
5686 txq += adapter->ring_feature[RING_F_FCOE].mask;
5687 return txq;
5688 }
5689 #endif
5690 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5691 if (skb->priority == TC_PRIO_CONTROL)
5692 txq = adapter->ring_feature[RING_F_DCB].indices-1;
5693 else
5694 txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
5695 >> 13;
5696 return txq;
5697 }
5698
5699 return skb_tx_hash(dev, skb);
5700 }
5701
5702 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5703 struct net_device *netdev)
5704 {
5705 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5706 struct ixgbe_ring *tx_ring;
5707 struct netdev_queue *txq;
5708 unsigned int first;
5709 unsigned int tx_flags = 0;
5710 u8 hdr_len = 0;
5711 int tso;
5712 int count = 0;
5713 unsigned int f;
5714
5715 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
5716 tx_flags |= vlan_tx_tag_get(skb);
5717 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5718 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
5719 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
5720 }
5721 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5722 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5723 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5724 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
5725 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5726 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5727 }
5728
5729 tx_ring = adapter->tx_ring[skb->queue_mapping];
5730
5731 #ifdef IXGBE_FCOE
5732 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
5733 #ifdef CONFIG_IXGBE_DCB
5734 /* for FCoE with DCB, we force the priority to what
5735 * was specified by the switch */
5736 if ((skb->protocol == htons(ETH_P_FCOE)) ||
5737 (skb->protocol == htons(ETH_P_FIP))) {
5738 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
5739 << IXGBE_TX_FLAGS_VLAN_SHIFT);
5740 tx_flags |= ((adapter->fcoe.up << 13)
5741 << IXGBE_TX_FLAGS_VLAN_SHIFT);
5742 }
5743 #endif
5744 /* flag for FCoE offloads */
5745 if (skb->protocol == htons(ETH_P_FCOE))
5746 tx_flags |= IXGBE_TX_FLAGS_FCOE;
5747 }
5748 #endif
5749
5750 /* four things can cause us to need a context descriptor */
5751 if (skb_is_gso(skb) ||
5752 (skb->ip_summed == CHECKSUM_PARTIAL) ||
5753 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
5754 (tx_flags & IXGBE_TX_FLAGS_FCOE))
5755 count++;
5756
5757 count += TXD_USE_COUNT(skb_headlen(skb));
5758 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
5759 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
5760
5761 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
5762 adapter->tx_busy++;
5763 return NETDEV_TX_BUSY;
5764 }
5765
5766 first = tx_ring->next_to_use;
5767 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
5768 #ifdef IXGBE_FCOE
5769 /* setup tx offload for FCoE */
5770 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5771 if (tso < 0) {
5772 dev_kfree_skb_any(skb);
5773 return NETDEV_TX_OK;
5774 }
5775 if (tso)
5776 tx_flags |= IXGBE_TX_FLAGS_FSO;
5777 #endif /* IXGBE_FCOE */
5778 } else {
5779 if (skb->protocol == htons(ETH_P_IP))
5780 tx_flags |= IXGBE_TX_FLAGS_IPV4;
5781 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5782 if (tso < 0) {
5783 dev_kfree_skb_any(skb);
5784 return NETDEV_TX_OK;
5785 }
5786
5787 if (tso)
5788 tx_flags |= IXGBE_TX_FLAGS_TSO;
5789 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
5790 (skb->ip_summed == CHECKSUM_PARTIAL))
5791 tx_flags |= IXGBE_TX_FLAGS_CSUM;
5792 }
5793
5794 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
5795 if (count) {
5796 /* add the ATR filter if ATR is on */
5797 if (tx_ring->atr_sample_rate) {
5798 ++tx_ring->atr_count;
5799 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
5800 test_bit(__IXGBE_FDIR_INIT_DONE,
5801 &tx_ring->reinit_state)) {
5802 ixgbe_atr(adapter, skb, tx_ring->queue_index,
5803 tx_flags);
5804 tx_ring->atr_count = 0;
5805 }
5806 }
5807 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
5808 txq->tx_bytes += skb->len;
5809 txq->tx_packets++;
5810 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
5811 hdr_len);
5812 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
5813
5814 } else {
5815 dev_kfree_skb_any(skb);
5816 tx_ring->tx_buffer_info[first].time_stamp = 0;
5817 tx_ring->next_to_use = first;
5818 }
5819
5820 return NETDEV_TX_OK;
5821 }
5822
5823 /**
5824 * ixgbe_set_mac - Change the Ethernet Address of the NIC
5825 * @netdev: network interface device structure
5826 * @p: pointer to an address structure
5827 *
5828 * Returns 0 on success, negative on failure
5829 **/
5830 static int ixgbe_set_mac(struct net_device *netdev, void *p)
5831 {
5832 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5833 struct ixgbe_hw *hw = &adapter->hw;
5834 struct sockaddr *addr = p;
5835
5836 if (!is_valid_ether_addr(addr->sa_data))
5837 return -EADDRNOTAVAIL;
5838
5839 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5840 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
5841
5842 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
5843 IXGBE_RAH_AV);
5844
5845 return 0;
5846 }
5847
5848 static int
5849 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
5850 {
5851 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5852 struct ixgbe_hw *hw = &adapter->hw;
5853 u16 value;
5854 int rc;
5855
5856 if (prtad != hw->phy.mdio.prtad)
5857 return -EINVAL;
5858 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
5859 if (!rc)
5860 rc = value;
5861 return rc;
5862 }
5863
5864 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
5865 u16 addr, u16 value)
5866 {
5867 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5868 struct ixgbe_hw *hw = &adapter->hw;
5869
5870 if (prtad != hw->phy.mdio.prtad)
5871 return -EINVAL;
5872 return hw->phy.ops.write_reg(hw, addr, devad, value);
5873 }
5874
5875 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
5876 {
5877 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5878
5879 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
5880 }
5881
5882 /**
5883 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
5884 * netdev->dev_addrs
5885 * @netdev: network interface device structure
5886 *
5887 * Returns non-zero on failure
5888 **/
5889 static int ixgbe_add_sanmac_netdev(struct net_device *dev)
5890 {
5891 int err = 0;
5892 struct ixgbe_adapter *adapter = netdev_priv(dev);
5893 struct ixgbe_mac_info *mac = &adapter->hw.mac;
5894
5895 if (is_valid_ether_addr(mac->san_addr)) {
5896 rtnl_lock();
5897 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5898 rtnl_unlock();
5899 }
5900 return err;
5901 }
5902
5903 /**
5904 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
5905 * netdev->dev_addrs
5906 * @netdev: network interface device structure
5907 *
5908 * Returns non-zero on failure
5909 **/
5910 static int ixgbe_del_sanmac_netdev(struct net_device *dev)
5911 {
5912 int err = 0;
5913 struct ixgbe_adapter *adapter = netdev_priv(dev);
5914 struct ixgbe_mac_info *mac = &adapter->hw.mac;
5915
5916 if (is_valid_ether_addr(mac->san_addr)) {
5917 rtnl_lock();
5918 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5919 rtnl_unlock();
5920 }
5921 return err;
5922 }
5923
5924 #ifdef CONFIG_NET_POLL_CONTROLLER
5925 /*
5926 * Polling 'interrupt' - used by things like netconsole to send skbs
5927 * without having to re-enable interrupts. It's not called while
5928 * the interrupt routine is executing.
5929 */
5930 static void ixgbe_netpoll(struct net_device *netdev)
5931 {
5932 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5933 int i;
5934
5935 /* if interface is down do nothing */
5936 if (test_bit(__IXGBE_DOWN, &adapter->state))
5937 return;
5938
5939 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
5940 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5941 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5942 for (i = 0; i < num_q_vectors; i++) {
5943 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
5944 ixgbe_msix_clean_many(0, q_vector);
5945 }
5946 } else {
5947 ixgbe_intr(adapter->pdev->irq, netdev);
5948 }
5949 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
5950 }
5951 #endif
5952
5953 static const struct net_device_ops ixgbe_netdev_ops = {
5954 .ndo_open = ixgbe_open,
5955 .ndo_stop = ixgbe_close,
5956 .ndo_start_xmit = ixgbe_xmit_frame,
5957 .ndo_select_queue = ixgbe_select_queue,
5958 .ndo_set_rx_mode = ixgbe_set_rx_mode,
5959 .ndo_set_multicast_list = ixgbe_set_rx_mode,
5960 .ndo_validate_addr = eth_validate_addr,
5961 .ndo_set_mac_address = ixgbe_set_mac,
5962 .ndo_change_mtu = ixgbe_change_mtu,
5963 .ndo_tx_timeout = ixgbe_tx_timeout,
5964 .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
5965 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
5966 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
5967 .ndo_do_ioctl = ixgbe_ioctl,
5968 #ifdef CONFIG_NET_POLL_CONTROLLER
5969 .ndo_poll_controller = ixgbe_netpoll,
5970 #endif
5971 #ifdef IXGBE_FCOE
5972 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
5973 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
5974 .ndo_fcoe_enable = ixgbe_fcoe_enable,
5975 .ndo_fcoe_disable = ixgbe_fcoe_disable,
5976 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
5977 #endif /* IXGBE_FCOE */
5978 };
5979
5980 static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
5981 const struct ixgbe_info *ii)
5982 {
5983 #ifdef CONFIG_PCI_IOV
5984 struct ixgbe_hw *hw = &adapter->hw;
5985 int err;
5986
5987 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
5988 return;
5989
5990 /* The 82599 supports up to 64 VFs per physical function
5991 * but this implementation limits allocation to 63 so that
5992 * basic networking resources are still available to the
5993 * physical function
5994 */
5995 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
5996 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
5997 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
5998 if (err) {
5999 DPRINTK(PROBE, ERR,
6000 "Failed to enable PCI sriov: %d\n", err);
6001 goto err_novfs;
6002 }
6003 /* If call to enable VFs succeeded then allocate memory
6004 * for per VF control structures.
6005 */
6006 adapter->vfinfo =
6007 kcalloc(adapter->num_vfs,
6008 sizeof(struct vf_data_storage), GFP_KERNEL);
6009 if (adapter->vfinfo) {
6010 /* Now that we're sure SR-IOV is enabled
6011 * and memory allocated set up the mailbox parameters
6012 */
6013 ixgbe_init_mbx_params_pf(hw);
6014 memcpy(&hw->mbx.ops, ii->mbx_ops,
6015 sizeof(hw->mbx.ops));
6016
6017 /* Disable RSC when in SR-IOV mode */
6018 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
6019 IXGBE_FLAG2_RSC_ENABLED);
6020 return;
6021 }
6022
6023 /* Oh oh */
6024 DPRINTK(PROBE, ERR,
6025 "Unable to allocate memory for VF "
6026 "Data Storage - SRIOV disabled\n");
6027 pci_disable_sriov(adapter->pdev);
6028
6029 err_novfs:
6030 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
6031 adapter->num_vfs = 0;
6032 #endif /* CONFIG_PCI_IOV */
6033 }
6034
6035 /**
6036 * ixgbe_probe - Device Initialization Routine
6037 * @pdev: PCI device information struct
6038 * @ent: entry in ixgbe_pci_tbl
6039 *
6040 * Returns 0 on success, negative on failure
6041 *
6042 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
6043 * The OS initialization, configuring of the adapter private structure,
6044 * and a hardware reset occur.
6045 **/
6046 static int __devinit ixgbe_probe(struct pci_dev *pdev,
6047 const struct pci_device_id *ent)
6048 {
6049 struct net_device *netdev;
6050 struct ixgbe_adapter *adapter = NULL;
6051 struct ixgbe_hw *hw;
6052 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
6053 static int cards_found;
6054 int i, err, pci_using_dac;
6055 unsigned int indices = num_possible_cpus();
6056 #ifdef IXGBE_FCOE
6057 u16 device_caps;
6058 #endif
6059 u32 part_num, eec;
6060
6061 err = pci_enable_device_mem(pdev);
6062 if (err)
6063 return err;
6064
6065 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
6066 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
6067 pci_using_dac = 1;
6068 } else {
6069 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6070 if (err) {
6071 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6072 if (err) {
6073 dev_err(&pdev->dev, "No usable DMA "
6074 "configuration, aborting\n");
6075 goto err_dma;
6076 }
6077 }
6078 pci_using_dac = 0;
6079 }
6080
6081 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
6082 IORESOURCE_MEM), ixgbe_driver_name);
6083 if (err) {
6084 dev_err(&pdev->dev,
6085 "pci_request_selected_regions failed 0x%x\n", err);
6086 goto err_pci_reg;
6087 }
6088
6089 pci_enable_pcie_error_reporting(pdev);
6090
6091 pci_set_master(pdev);
6092 pci_save_state(pdev);
6093
6094 if (ii->mac == ixgbe_mac_82598EB)
6095 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
6096 else
6097 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
6098
6099 indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
6100 #ifdef IXGBE_FCOE
6101 indices += min_t(unsigned int, num_possible_cpus(),
6102 IXGBE_MAX_FCOE_INDICES);
6103 #endif
6104 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
6105 if (!netdev) {
6106 err = -ENOMEM;
6107 goto err_alloc_etherdev;
6108 }
6109
6110 SET_NETDEV_DEV(netdev, &pdev->dev);
6111
6112 pci_set_drvdata(pdev, netdev);
6113 adapter = netdev_priv(netdev);
6114
6115 adapter->netdev = netdev;
6116 adapter->pdev = pdev;
6117 hw = &adapter->hw;
6118 hw->back = adapter;
6119 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
6120
6121 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6122 pci_resource_len(pdev, 0));
6123 if (!hw->hw_addr) {
6124 err = -EIO;
6125 goto err_ioremap;
6126 }
6127
6128 for (i = 1; i <= 5; i++) {
6129 if (pci_resource_len(pdev, i) == 0)
6130 continue;
6131 }
6132
6133 netdev->netdev_ops = &ixgbe_netdev_ops;
6134 ixgbe_set_ethtool_ops(netdev);
6135 netdev->watchdog_timeo = 5 * HZ;
6136 strcpy(netdev->name, pci_name(pdev));
6137
6138 adapter->bd_number = cards_found;
6139
6140 /* Setup hw api */
6141 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
6142 hw->mac.type = ii->mac;
6143
6144 /* EEPROM */
6145 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
6146 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
6147 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
6148 if (!(eec & (1 << 8)))
6149 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
6150
6151 /* PHY */
6152 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
6153 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
6154 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
6155 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
6156 hw->phy.mdio.mmds = 0;
6157 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
6158 hw->phy.mdio.dev = netdev;
6159 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
6160 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
6161
6162 /* set up this timer and work struct before calling get_invariants
6163 * which might start the timer
6164 */
6165 init_timer(&adapter->sfp_timer);
6166 adapter->sfp_timer.function = &ixgbe_sfp_timer;
6167 adapter->sfp_timer.data = (unsigned long) adapter;
6168
6169 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
6170
6171 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
6172 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
6173
6174 /* a new SFP+ module arrival, called from GPI SDP2 context */
6175 INIT_WORK(&adapter->sfp_config_module_task,
6176 ixgbe_sfp_config_module_task);
6177
6178 ii->get_invariants(hw);
6179
6180 /* setup the private structure */
6181 err = ixgbe_sw_init(adapter);
6182 if (err)
6183 goto err_sw_init;
6184
6185 /* Make it possible the adapter to be woken up via WOL */
6186 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
6187 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6188
6189 /*
6190 * If there is a fan on this device and it has failed log the
6191 * failure.
6192 */
6193 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
6194 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
6195 if (esdp & IXGBE_ESDP_SDP1)
6196 DPRINTK(PROBE, CRIT,
6197 "Fan has stopped, replace the adapter\n");
6198 }
6199
6200 /* reset_hw fills in the perm_addr as well */
6201 err = hw->mac.ops.reset_hw(hw);
6202 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
6203 hw->mac.type == ixgbe_mac_82598EB) {
6204 /*
6205 * Start a kernel thread to watch for a module to arrive.
6206 * Only do this for 82598, since 82599 will generate
6207 * interrupts on module arrival.
6208 */
6209 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
6210 mod_timer(&adapter->sfp_timer,
6211 round_jiffies(jiffies + (2 * HZ)));
6212 err = 0;
6213 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
6214 dev_err(&adapter->pdev->dev, "failed to initialize because "
6215 "an unsupported SFP+ module type was detected.\n"
6216 "Reload the driver after installing a supported "
6217 "module.\n");
6218 goto err_sw_init;
6219 } else if (err) {
6220 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
6221 goto err_sw_init;
6222 }
6223
6224 ixgbe_probe_vf(adapter, ii);
6225
6226 netdev->features = NETIF_F_SG |
6227 NETIF_F_IP_CSUM |
6228 NETIF_F_HW_VLAN_TX |
6229 NETIF_F_HW_VLAN_RX |
6230 NETIF_F_HW_VLAN_FILTER;
6231
6232 netdev->features |= NETIF_F_IPV6_CSUM;
6233 netdev->features |= NETIF_F_TSO;
6234 netdev->features |= NETIF_F_TSO6;
6235 netdev->features |= NETIF_F_GRO;
6236
6237 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
6238 netdev->features |= NETIF_F_SCTP_CSUM;
6239
6240 netdev->vlan_features |= NETIF_F_TSO;
6241 netdev->vlan_features |= NETIF_F_TSO6;
6242 netdev->vlan_features |= NETIF_F_IP_CSUM;
6243 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
6244 netdev->vlan_features |= NETIF_F_SG;
6245
6246 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6247 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
6248 IXGBE_FLAG_DCB_ENABLED);
6249 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
6250 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
6251
6252 #ifdef CONFIG_IXGBE_DCB
6253 netdev->dcbnl_ops = &dcbnl_ops;
6254 #endif
6255
6256 #ifdef IXGBE_FCOE
6257 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
6258 if (hw->mac.ops.get_device_caps) {
6259 hw->mac.ops.get_device_caps(hw, &device_caps);
6260 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
6261 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
6262 }
6263 }
6264 #endif /* IXGBE_FCOE */
6265 if (pci_using_dac)
6266 netdev->features |= NETIF_F_HIGHDMA;
6267
6268 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
6269 netdev->features |= NETIF_F_LRO;
6270
6271 /* make sure the EEPROM is good */
6272 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
6273 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
6274 err = -EIO;
6275 goto err_eeprom;
6276 }
6277
6278 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
6279 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
6280
6281 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
6282 dev_err(&pdev->dev, "invalid MAC address\n");
6283 err = -EIO;
6284 goto err_eeprom;
6285 }
6286
6287 /* power down the optics */
6288 if (hw->phy.multispeed_fiber)
6289 hw->mac.ops.disable_tx_laser(hw);
6290
6291 init_timer(&adapter->watchdog_timer);
6292 adapter->watchdog_timer.function = &ixgbe_watchdog;
6293 adapter->watchdog_timer.data = (unsigned long)adapter;
6294
6295 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
6296 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
6297
6298 err = ixgbe_init_interrupt_scheme(adapter);
6299 if (err)
6300 goto err_sw_init;
6301
6302 switch (pdev->device) {
6303 case IXGBE_DEV_ID_82599_KX4:
6304 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6305 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
6306 break;
6307 default:
6308 adapter->wol = 0;
6309 break;
6310 }
6311 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6312
6313 /* pick up the PCI bus settings for reporting later */
6314 hw->mac.ops.get_bus_info(hw);
6315
6316 /* print bus type/speed/width info */
6317 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
6318 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
6319 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
6320 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
6321 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
6322 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
6323 "Unknown"),
6324 netdev->dev_addr);
6325 ixgbe_read_pba_num_generic(hw, &part_num);
6326 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
6327 dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
6328 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
6329 (part_num >> 8), (part_num & 0xff));
6330 else
6331 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
6332 hw->mac.type, hw->phy.type,
6333 (part_num >> 8), (part_num & 0xff));
6334
6335 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
6336 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
6337 "this card is not sufficient for optimal "
6338 "performance.\n");
6339 dev_warn(&pdev->dev, "For optimal performance a x8 "
6340 "PCI-Express slot is required.\n");
6341 }
6342
6343 /* save off EEPROM version number */
6344 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
6345
6346 /* reset the hardware with the new settings */
6347 err = hw->mac.ops.start_hw(hw);
6348
6349 if (err == IXGBE_ERR_EEPROM_VERSION) {
6350 /* We are running on a pre-production device, log a warning */
6351 dev_warn(&pdev->dev, "This device is a pre-production "
6352 "adapter/LOM. Please be aware there may be issues "
6353 "associated with your hardware. If you are "
6354 "experiencing problems please contact your Intel or "
6355 "hardware representative who provided you with this "
6356 "hardware.\n");
6357 }
6358 strcpy(netdev->name, "eth%d");
6359 err = register_netdev(netdev);
6360 if (err)
6361 goto err_register;
6362
6363 /* carrier off reporting is important to ethtool even BEFORE open */
6364 netif_carrier_off(netdev);
6365
6366 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
6367 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
6368 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
6369
6370 #ifdef CONFIG_IXGBE_DCA
6371 if (dca_add_requester(&pdev->dev) == 0) {
6372 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
6373 ixgbe_setup_dca(adapter);
6374 }
6375 #endif
6376 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6377 DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
6378 adapter->num_vfs);
6379 for (i = 0; i < adapter->num_vfs; i++)
6380 ixgbe_vf_configuration(pdev, (i | 0x10000000));
6381 }
6382
6383 /* add san mac addr to netdev */
6384 ixgbe_add_sanmac_netdev(netdev);
6385
6386 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
6387 cards_found++;
6388 return 0;
6389
6390 err_register:
6391 ixgbe_release_hw_control(adapter);
6392 ixgbe_clear_interrupt_scheme(adapter);
6393 err_sw_init:
6394 err_eeprom:
6395 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6396 ixgbe_disable_sriov(adapter);
6397 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
6398 del_timer_sync(&adapter->sfp_timer);
6399 cancel_work_sync(&adapter->sfp_task);
6400 cancel_work_sync(&adapter->multispeed_fiber_task);
6401 cancel_work_sync(&adapter->sfp_config_module_task);
6402 iounmap(hw->hw_addr);
6403 err_ioremap:
6404 free_netdev(netdev);
6405 err_alloc_etherdev:
6406 pci_release_selected_regions(pdev, pci_select_bars(pdev,
6407 IORESOURCE_MEM));
6408 err_pci_reg:
6409 err_dma:
6410 pci_disable_device(pdev);
6411 return err;
6412 }
6413
6414 /**
6415 * ixgbe_remove - Device Removal Routine
6416 * @pdev: PCI device information struct
6417 *
6418 * ixgbe_remove is called by the PCI subsystem to alert the driver
6419 * that it should release a PCI device. The could be caused by a
6420 * Hot-Plug event, or because the driver is going to be removed from
6421 * memory.
6422 **/
6423 static void __devexit ixgbe_remove(struct pci_dev *pdev)
6424 {
6425 struct net_device *netdev = pci_get_drvdata(pdev);
6426 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6427
6428 set_bit(__IXGBE_DOWN, &adapter->state);
6429 /* clear the module not found bit to make sure the worker won't
6430 * reschedule
6431 */
6432 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
6433 del_timer_sync(&adapter->watchdog_timer);
6434
6435 del_timer_sync(&adapter->sfp_timer);
6436 cancel_work_sync(&adapter->watchdog_task);
6437 cancel_work_sync(&adapter->sfp_task);
6438 cancel_work_sync(&adapter->multispeed_fiber_task);
6439 cancel_work_sync(&adapter->sfp_config_module_task);
6440 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
6441 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
6442 cancel_work_sync(&adapter->fdir_reinit_task);
6443 flush_scheduled_work();
6444
6445 #ifdef CONFIG_IXGBE_DCA
6446 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
6447 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
6448 dca_remove_requester(&pdev->dev);
6449 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
6450 }
6451
6452 #endif
6453 #ifdef IXGBE_FCOE
6454 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
6455 ixgbe_cleanup_fcoe(adapter);
6456
6457 #endif /* IXGBE_FCOE */
6458
6459 /* remove the added san mac */
6460 ixgbe_del_sanmac_netdev(netdev);
6461
6462 if (netdev->reg_state == NETREG_REGISTERED)
6463 unregister_netdev(netdev);
6464
6465 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6466 ixgbe_disable_sriov(adapter);
6467
6468 ixgbe_clear_interrupt_scheme(adapter);
6469
6470 ixgbe_release_hw_control(adapter);
6471
6472 iounmap(adapter->hw.hw_addr);
6473 pci_release_selected_regions(pdev, pci_select_bars(pdev,
6474 IORESOURCE_MEM));
6475
6476 DPRINTK(PROBE, INFO, "complete\n");
6477
6478 free_netdev(netdev);
6479
6480 pci_disable_pcie_error_reporting(pdev);
6481
6482 pci_disable_device(pdev);
6483 }
6484
6485 /**
6486 * ixgbe_io_error_detected - called when PCI error is detected
6487 * @pdev: Pointer to PCI device
6488 * @state: The current pci connection state
6489 *
6490 * This function is called after a PCI bus error affecting
6491 * this device has been detected.
6492 */
6493 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
6494 pci_channel_state_t state)
6495 {
6496 struct net_device *netdev = pci_get_drvdata(pdev);
6497 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6498
6499 netif_device_detach(netdev);
6500
6501 if (state == pci_channel_io_perm_failure)
6502 return PCI_ERS_RESULT_DISCONNECT;
6503
6504 if (netif_running(netdev))
6505 ixgbe_down(adapter);
6506 pci_disable_device(pdev);
6507
6508 /* Request a slot reset. */
6509 return PCI_ERS_RESULT_NEED_RESET;
6510 }
6511
6512 /**
6513 * ixgbe_io_slot_reset - called after the pci bus has been reset.
6514 * @pdev: Pointer to PCI device
6515 *
6516 * Restart the card from scratch, as if from a cold-boot.
6517 */
6518 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
6519 {
6520 struct net_device *netdev = pci_get_drvdata(pdev);
6521 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6522 pci_ers_result_t result;
6523 int err;
6524
6525 if (pci_enable_device_mem(pdev)) {
6526 DPRINTK(PROBE, ERR,
6527 "Cannot re-enable PCI device after reset.\n");
6528 result = PCI_ERS_RESULT_DISCONNECT;
6529 } else {
6530 pci_set_master(pdev);
6531 pci_restore_state(pdev);
6532 pci_save_state(pdev);
6533
6534 pci_wake_from_d3(pdev, false);
6535
6536 ixgbe_reset(adapter);
6537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6538 result = PCI_ERS_RESULT_RECOVERED;
6539 }
6540
6541 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6542 if (err) {
6543 dev_err(&pdev->dev,
6544 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
6545 /* non-fatal, continue */
6546 }
6547
6548 return result;
6549 }
6550
6551 /**
6552 * ixgbe_io_resume - called when traffic can start flowing again.
6553 * @pdev: Pointer to PCI device
6554 *
6555 * This callback is called when the error recovery driver tells us that
6556 * its OK to resume normal operation.
6557 */
6558 static void ixgbe_io_resume(struct pci_dev *pdev)
6559 {
6560 struct net_device *netdev = pci_get_drvdata(pdev);
6561 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6562
6563 if (netif_running(netdev)) {
6564 if (ixgbe_up(adapter)) {
6565 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
6566 return;
6567 }
6568 }
6569
6570 netif_device_attach(netdev);
6571 }
6572
6573 static struct pci_error_handlers ixgbe_err_handler = {
6574 .error_detected = ixgbe_io_error_detected,
6575 .slot_reset = ixgbe_io_slot_reset,
6576 .resume = ixgbe_io_resume,
6577 };
6578
6579 static struct pci_driver ixgbe_driver = {
6580 .name = ixgbe_driver_name,
6581 .id_table = ixgbe_pci_tbl,
6582 .probe = ixgbe_probe,
6583 .remove = __devexit_p(ixgbe_remove),
6584 #ifdef CONFIG_PM
6585 .suspend = ixgbe_suspend,
6586 .resume = ixgbe_resume,
6587 #endif
6588 .shutdown = ixgbe_shutdown,
6589 .err_handler = &ixgbe_err_handler
6590 };
6591
6592 /**
6593 * ixgbe_init_module - Driver Registration Routine
6594 *
6595 * ixgbe_init_module is the first routine called when the driver is
6596 * loaded. All it does is register with the PCI subsystem.
6597 **/
6598 static int __init ixgbe_init_module(void)
6599 {
6600 int ret;
6601 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
6602 ixgbe_driver_string, ixgbe_driver_version);
6603
6604 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
6605
6606 #ifdef CONFIG_IXGBE_DCA
6607 dca_register_notify(&dca_notifier);
6608 #endif
6609
6610 ret = pci_register_driver(&ixgbe_driver);
6611 return ret;
6612 }
6613
6614 module_init(ixgbe_init_module);
6615
6616 /**
6617 * ixgbe_exit_module - Driver Exit Cleanup Routine
6618 *
6619 * ixgbe_exit_module is called just before the driver is removed
6620 * from memory.
6621 **/
6622 static void __exit ixgbe_exit_module(void)
6623 {
6624 #ifdef CONFIG_IXGBE_DCA
6625 dca_unregister_notify(&dca_notifier);
6626 #endif
6627 pci_unregister_driver(&ixgbe_driver);
6628 }
6629
6630 #ifdef CONFIG_IXGBE_DCA
6631 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
6632 void *p)
6633 {
6634 int ret_val;
6635
6636 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
6637 __ixgbe_notify_dca);
6638
6639 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6640 }
6641
6642 #endif /* CONFIG_IXGBE_DCA */
6643 #ifdef DEBUG
6644 /**
6645 * ixgbe_get_hw_dev_name - return device name string
6646 * used by hardware layer to print debugging information
6647 **/
6648 char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
6649 {
6650 struct ixgbe_adapter *adapter = hw->back;
6651 return adapter->netdev->name;
6652 }
6653
6654 #endif
6655 module_exit(ixgbe_exit_module);
6656
6657 /* ixgbe_main.c */
This page took 0.16397 seconds and 6 git commands to generate.