igb: fix comments
[deliverable/linux.git] / drivers / net / igb / igb_main.c
CommitLineData
9d5c8243
AK
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
9d5c8243
AK
34#include <linux/ipv6.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
37#include <linux/mii.h>
38#include <linux/ethtool.h>
39#include <linux/if_vlan.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <linux/interrupt.h>
43#include <linux/if_ether.h>
fe4506b6
JC
44#ifdef CONFIG_DCA
45#include <linux/dca.h>
46#endif
9d5c8243
AK
47#include "igb.h"
48
0024fd00 49#define DRV_VERSION "1.2.45-k2"
9d5c8243
AK
50char igb_driver_name[] = "igb";
51char igb_driver_version[] = DRV_VERSION;
52static const char igb_driver_string[] =
53 "Intel(R) Gigabit Ethernet Network Driver";
2d064c06 54static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation.";
9d5c8243 55
9d5c8243
AK
56static const struct e1000_info *igb_info_tbl[] = {
57 [board_82575] = &e1000_82575_info,
58};
59
60static struct pci_device_id igb_pci_tbl[] = {
2d064c06
AD
61 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
62 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
63 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
9d5c8243
AK
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
68 /* required last entry */
69 {0, }
70};
71
72MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
73
74void igb_reset(struct igb_adapter *);
75static int igb_setup_all_tx_resources(struct igb_adapter *);
76static int igb_setup_all_rx_resources(struct igb_adapter *);
77static void igb_free_all_tx_resources(struct igb_adapter *);
78static void igb_free_all_rx_resources(struct igb_adapter *);
3b644cf6
MW
79static void igb_free_tx_resources(struct igb_ring *);
80static void igb_free_rx_resources(struct igb_ring *);
9d5c8243
AK
81void igb_update_stats(struct igb_adapter *);
82static int igb_probe(struct pci_dev *, const struct pci_device_id *);
83static void __devexit igb_remove(struct pci_dev *pdev);
84static int igb_sw_init(struct igb_adapter *);
85static int igb_open(struct net_device *);
86static int igb_close(struct net_device *);
87static void igb_configure_tx(struct igb_adapter *);
88static void igb_configure_rx(struct igb_adapter *);
89static void igb_setup_rctl(struct igb_adapter *);
90static void igb_clean_all_tx_rings(struct igb_adapter *);
91static void igb_clean_all_rx_rings(struct igb_adapter *);
3b644cf6
MW
92static void igb_clean_tx_ring(struct igb_ring *);
93static void igb_clean_rx_ring(struct igb_ring *);
9d5c8243
AK
94static void igb_set_multi(struct net_device *);
95static void igb_update_phy_info(unsigned long);
96static void igb_watchdog(unsigned long);
97static void igb_watchdog_task(struct work_struct *);
98static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
99 struct igb_ring *);
100static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
101static struct net_device_stats *igb_get_stats(struct net_device *);
102static int igb_change_mtu(struct net_device *, int);
103static int igb_set_mac(struct net_device *, void *);
104static irqreturn_t igb_intr(int irq, void *);
105static irqreturn_t igb_intr_msi(int irq, void *);
106static irqreturn_t igb_msix_other(int irq, void *);
107static irqreturn_t igb_msix_rx(int irq, void *);
108static irqreturn_t igb_msix_tx(int irq, void *);
109static int igb_clean_rx_ring_msix(struct napi_struct *, int);
fe4506b6
JC
110#ifdef CONFIG_DCA
111static void igb_update_rx_dca(struct igb_ring *);
112static void igb_update_tx_dca(struct igb_ring *);
113static void igb_setup_dca(struct igb_adapter *);
114#endif /* CONFIG_DCA */
3b644cf6 115static bool igb_clean_tx_irq(struct igb_ring *);
661086df 116static int igb_poll(struct napi_struct *, int);
3b644cf6
MW
117static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
118static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
d3352520
AD
119#ifdef CONFIG_IGB_LRO
120static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
121#endif
9d5c8243
AK
122static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
123static void igb_tx_timeout(struct net_device *);
124static void igb_reset_task(struct work_struct *);
125static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
126static void igb_vlan_rx_add_vid(struct net_device *, u16);
127static void igb_vlan_rx_kill_vid(struct net_device *, u16);
128static void igb_restore_vlan(struct igb_adapter *);
129
130static int igb_suspend(struct pci_dev *, pm_message_t);
131#ifdef CONFIG_PM
132static int igb_resume(struct pci_dev *);
133#endif
134static void igb_shutdown(struct pci_dev *);
fe4506b6
JC
135#ifdef CONFIG_DCA
136static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
137static struct notifier_block dca_notifier = {
138 .notifier_call = igb_notify_dca,
139 .next = NULL,
140 .priority = 0
141};
142#endif
9d5c8243
AK
143
144#ifdef CONFIG_NET_POLL_CONTROLLER
145/* for netdump / net console */
146static void igb_netpoll(struct net_device *);
147#endif
148
149static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
150 pci_channel_state_t);
151static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
152static void igb_io_resume(struct pci_dev *);
153
154static struct pci_error_handlers igb_err_handler = {
155 .error_detected = igb_io_error_detected,
156 .slot_reset = igb_io_slot_reset,
157 .resume = igb_io_resume,
158};
159
160
161static struct pci_driver igb_driver = {
162 .name = igb_driver_name,
163 .id_table = igb_pci_tbl,
164 .probe = igb_probe,
165 .remove = __devexit_p(igb_remove),
166#ifdef CONFIG_PM
167 /* Power Managment Hooks */
168 .suspend = igb_suspend,
169 .resume = igb_resume,
170#endif
171 .shutdown = igb_shutdown,
172 .err_handler = &igb_err_handler
173};
174
7dfc16fa
AD
175static int global_quad_port_a; /* global quad port a indication */
176
9d5c8243
AK
177MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
178MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
179MODULE_LICENSE("GPL");
180MODULE_VERSION(DRV_VERSION);
181
182#ifdef DEBUG
183/**
184 * igb_get_hw_dev_name - return device name string
185 * used by hardware layer to print debugging information
186 **/
187char *igb_get_hw_dev_name(struct e1000_hw *hw)
188{
189 struct igb_adapter *adapter = hw->back;
190 return adapter->netdev->name;
191}
192#endif
193
194/**
195 * igb_init_module - Driver Registration Routine
196 *
197 * igb_init_module is the first routine called when the driver is
198 * loaded. All it does is register with the PCI subsystem.
199 **/
200static int __init igb_init_module(void)
201{
202 int ret;
203 printk(KERN_INFO "%s - version %s\n",
204 igb_driver_string, igb_driver_version);
205
206 printk(KERN_INFO "%s\n", igb_copyright);
207
7dfc16fa
AD
208 global_quad_port_a = 0;
209
9d5c8243 210 ret = pci_register_driver(&igb_driver);
fe4506b6
JC
211#ifdef CONFIG_DCA
212 dca_register_notify(&dca_notifier);
213#endif
9d5c8243
AK
214 return ret;
215}
216
217module_init(igb_init_module);
218
219/**
220 * igb_exit_module - Driver Exit Cleanup Routine
221 *
222 * igb_exit_module is called just before the driver is removed
223 * from memory.
224 **/
225static void __exit igb_exit_module(void)
226{
fe4506b6
JC
227#ifdef CONFIG_DCA
228 dca_unregister_notify(&dca_notifier);
229#endif
9d5c8243
AK
230 pci_unregister_driver(&igb_driver);
231}
232
233module_exit(igb_exit_module);
234
235/**
236 * igb_alloc_queues - Allocate memory for all rings
237 * @adapter: board private structure to initialize
238 *
239 * We allocate one ring per queue at run-time since we don't know the
240 * number of queues at compile-time.
241 **/
242static int igb_alloc_queues(struct igb_adapter *adapter)
243{
244 int i;
245
246 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
247 sizeof(struct igb_ring), GFP_KERNEL);
248 if (!adapter->tx_ring)
249 return -ENOMEM;
250
251 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
252 sizeof(struct igb_ring), GFP_KERNEL);
253 if (!adapter->rx_ring) {
254 kfree(adapter->tx_ring);
255 return -ENOMEM;
256 }
257
6eb5a7f1
AD
258 adapter->rx_ring->buddy = adapter->tx_ring;
259
661086df
PWJ
260 for (i = 0; i < adapter->num_tx_queues; i++) {
261 struct igb_ring *ring = &(adapter->tx_ring[i]);
262 ring->adapter = adapter;
263 ring->queue_index = i;
264 }
9d5c8243
AK
265 for (i = 0; i < adapter->num_rx_queues; i++) {
266 struct igb_ring *ring = &(adapter->rx_ring[i]);
267 ring->adapter = adapter;
844290e5 268 ring->queue_index = i;
9d5c8243
AK
269 ring->itr_register = E1000_ITR;
270
844290e5 271 /* set a default napi handler for each rx_ring */
661086df 272 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
9d5c8243
AK
273 }
274 return 0;
275}
276
a88f10ec
AD
277static void igb_free_queues(struct igb_adapter *adapter)
278{
279 int i;
280
281 for (i = 0; i < adapter->num_rx_queues; i++)
282 netif_napi_del(&adapter->rx_ring[i].napi);
283
284 kfree(adapter->tx_ring);
285 kfree(adapter->rx_ring);
286}
287
9d5c8243
AK
288#define IGB_N0_QUEUE -1
289static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
290 int tx_queue, int msix_vector)
291{
292 u32 msixbm = 0;
293 struct e1000_hw *hw = &adapter->hw;
2d064c06
AD
294 u32 ivar, index;
295
296 switch (hw->mac.type) {
297 case e1000_82575:
9d5c8243
AK
298 /* The 82575 assigns vectors using a bitmask, which matches the
299 bitmask for the EICR/EIMS/EIMC registers. To assign one
300 or more queues to a vector, we write the appropriate bits
301 into the MSIXBM register for that vector. */
302 if (rx_queue > IGB_N0_QUEUE) {
303 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
304 adapter->rx_ring[rx_queue].eims_value = msixbm;
305 }
306 if (tx_queue > IGB_N0_QUEUE) {
307 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
308 adapter->tx_ring[tx_queue].eims_value =
309 E1000_EICR_TX_QUEUE0 << tx_queue;
310 }
311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
2d064c06
AD
312 break;
313 case e1000_82576:
106ef2fe 314 /* The 82576 uses a table-based method for assigning vectors.
2d064c06
AD
315 Each queue has a single entry in the table to which we write
316 a vector number along with a "valid" bit. Sadly, the layout
317 of the table is somewhat counterintuitive. */
318 if (rx_queue > IGB_N0_QUEUE) {
319 index = (rx_queue & 0x7);
320 ivar = array_rd32(E1000_IVAR0, index);
321 if (rx_queue < 8) {
322 /* vector goes into low byte of register */
323 ivar = ivar & 0xFFFFFF00;
324 ivar |= msix_vector | E1000_IVAR_VALID;
325 } else {
326 /* vector goes into third byte of register */
327 ivar = ivar & 0xFF00FFFF;
328 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
329 }
330 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
331 array_wr32(E1000_IVAR0, index, ivar);
332 }
333 if (tx_queue > IGB_N0_QUEUE) {
334 index = (tx_queue & 0x7);
335 ivar = array_rd32(E1000_IVAR0, index);
336 if (tx_queue < 8) {
337 /* vector goes into second byte of register */
338 ivar = ivar & 0xFFFF00FF;
339 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
340 } else {
341 /* vector goes into high byte of register */
342 ivar = ivar & 0x00FFFFFF;
343 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
344 }
345 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
346 array_wr32(E1000_IVAR0, index, ivar);
347 }
348 break;
349 default:
350 BUG();
351 break;
352 }
9d5c8243
AK
353}
354
355/**
356 * igb_configure_msix - Configure MSI-X hardware
357 *
358 * igb_configure_msix sets up the hardware to properly
359 * generate MSI-X interrupts.
360 **/
361static void igb_configure_msix(struct igb_adapter *adapter)
362{
363 u32 tmp;
364 int i, vector = 0;
365 struct e1000_hw *hw = &adapter->hw;
366
367 adapter->eims_enable_mask = 0;
2d064c06
AD
368 if (hw->mac.type == e1000_82576)
369 /* Turn on MSI-X capability first, or our settings
370 * won't stick. And it will take days to debug. */
371 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
372 E1000_GPIE_PBA | E1000_GPIE_EIAME |
373 E1000_GPIE_NSICR);
9d5c8243
AK
374
375 for (i = 0; i < adapter->num_tx_queues; i++) {
376 struct igb_ring *tx_ring = &adapter->tx_ring[i];
377 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
378 adapter->eims_enable_mask |= tx_ring->eims_value;
379 if (tx_ring->itr_val)
6eb5a7f1 380 writel(tx_ring->itr_val,
9d5c8243
AK
381 hw->hw_addr + tx_ring->itr_register);
382 else
383 writel(1, hw->hw_addr + tx_ring->itr_register);
384 }
385
386 for (i = 0; i < adapter->num_rx_queues; i++) {
387 struct igb_ring *rx_ring = &adapter->rx_ring[i];
25ac3c24 388 rx_ring->buddy = NULL;
9d5c8243
AK
389 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
390 adapter->eims_enable_mask |= rx_ring->eims_value;
391 if (rx_ring->itr_val)
6eb5a7f1 392 writel(rx_ring->itr_val,
9d5c8243
AK
393 hw->hw_addr + rx_ring->itr_register);
394 else
395 writel(1, hw->hw_addr + rx_ring->itr_register);
396 }
397
398
399 /* set vector for other causes, i.e. link changes */
2d064c06
AD
400 switch (hw->mac.type) {
401 case e1000_82575:
9d5c8243
AK
402 array_wr32(E1000_MSIXBM(0), vector++,
403 E1000_EIMS_OTHER);
404
9d5c8243
AK
405 tmp = rd32(E1000_CTRL_EXT);
406 /* enable MSI-X PBA support*/
407 tmp |= E1000_CTRL_EXT_PBA_CLR;
408
409 /* Auto-Mask interrupts upon ICR read. */
410 tmp |= E1000_CTRL_EXT_EIAME;
411 tmp |= E1000_CTRL_EXT_IRCA;
412
413 wr32(E1000_CTRL_EXT, tmp);
414 adapter->eims_enable_mask |= E1000_EIMS_OTHER;
844290e5 415 adapter->eims_other = E1000_EIMS_OTHER;
9d5c8243 416
2d064c06
AD
417 break;
418
419 case e1000_82576:
420 tmp = (vector++ | E1000_IVAR_VALID) << 8;
421 wr32(E1000_IVAR_MISC, tmp);
422
423 adapter->eims_enable_mask = (1 << (vector)) - 1;
424 adapter->eims_other = 1 << (vector - 1);
425 break;
426 default:
427 /* do nothing, since nothing else supports MSI-X */
428 break;
429 } /* switch (hw->mac.type) */
9d5c8243
AK
430 wrfl();
431}
432
433/**
434 * igb_request_msix - Initialize MSI-X interrupts
435 *
436 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
437 * kernel.
438 **/
439static int igb_request_msix(struct igb_adapter *adapter)
440{
441 struct net_device *netdev = adapter->netdev;
442 int i, err = 0, vector = 0;
443
444 vector = 0;
445
446 for (i = 0; i < adapter->num_tx_queues; i++) {
447 struct igb_ring *ring = &(adapter->tx_ring[i]);
448 sprintf(ring->name, "%s-tx%d", netdev->name, i);
449 err = request_irq(adapter->msix_entries[vector].vector,
450 &igb_msix_tx, 0, ring->name,
451 &(adapter->tx_ring[i]));
452 if (err)
453 goto out;
454 ring->itr_register = E1000_EITR(0) + (vector << 2);
6eb5a7f1 455 ring->itr_val = 976; /* ~4000 ints/sec */
9d5c8243
AK
456 vector++;
457 }
458 for (i = 0; i < adapter->num_rx_queues; i++) {
459 struct igb_ring *ring = &(adapter->rx_ring[i]);
460 if (strlen(netdev->name) < (IFNAMSIZ - 5))
461 sprintf(ring->name, "%s-rx%d", netdev->name, i);
462 else
463 memcpy(ring->name, netdev->name, IFNAMSIZ);
464 err = request_irq(adapter->msix_entries[vector].vector,
465 &igb_msix_rx, 0, ring->name,
466 &(adapter->rx_ring[i]));
467 if (err)
468 goto out;
469 ring->itr_register = E1000_EITR(0) + (vector << 2);
470 ring->itr_val = adapter->itr;
844290e5
PW
471 /* overwrite the poll routine for MSIX, we've already done
472 * netif_napi_add */
473 ring->napi.poll = &igb_clean_rx_ring_msix;
9d5c8243
AK
474 vector++;
475 }
476
477 err = request_irq(adapter->msix_entries[vector].vector,
478 &igb_msix_other, 0, netdev->name, netdev);
479 if (err)
480 goto out;
481
9d5c8243
AK
482 igb_configure_msix(adapter);
483 return 0;
484out:
485 return err;
486}
487
488static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
489{
490 if (adapter->msix_entries) {
491 pci_disable_msix(adapter->pdev);
492 kfree(adapter->msix_entries);
493 adapter->msix_entries = NULL;
7dfc16fa 494 } else if (adapter->flags & IGB_FLAG_HAS_MSI)
9d5c8243
AK
495 pci_disable_msi(adapter->pdev);
496 return;
497}
498
499
500/**
501 * igb_set_interrupt_capability - set MSI or MSI-X if supported
502 *
503 * Attempt to configure interrupts using the best available
504 * capabilities of the hardware and kernel.
505 **/
506static void igb_set_interrupt_capability(struct igb_adapter *adapter)
507{
508 int err;
509 int numvecs, i;
510
511 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
512 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
513 GFP_KERNEL);
514 if (!adapter->msix_entries)
515 goto msi_only;
516
517 for (i = 0; i < numvecs; i++)
518 adapter->msix_entries[i].entry = i;
519
520 err = pci_enable_msix(adapter->pdev,
521 adapter->msix_entries,
522 numvecs);
523 if (err == 0)
524 return;
525
526 igb_reset_interrupt_capability(adapter);
527
528 /* If we can't do MSI-X, try MSI */
529msi_only:
530 adapter->num_rx_queues = 1;
661086df 531 adapter->num_tx_queues = 1;
9d5c8243 532 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 533 adapter->flags |= IGB_FLAG_HAS_MSI;
661086df 534
661086df 535 /* Notify the stack of the (possibly) reduced Tx Queue count. */
fd2ea0a7 536 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
9d5c8243
AK
537 return;
538}
539
540/**
541 * igb_request_irq - initialize interrupts
542 *
543 * Attempts to configure interrupts using the best available
544 * capabilities of the hardware and kernel.
545 **/
546static int igb_request_irq(struct igb_adapter *adapter)
547{
548 struct net_device *netdev = adapter->netdev;
549 struct e1000_hw *hw = &adapter->hw;
550 int err = 0;
551
552 if (adapter->msix_entries) {
553 err = igb_request_msix(adapter);
844290e5 554 if (!err)
9d5c8243 555 goto request_done;
9d5c8243
AK
556 /* fall back to MSI */
557 igb_reset_interrupt_capability(adapter);
558 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 559 adapter->flags |= IGB_FLAG_HAS_MSI;
9d5c8243
AK
560 igb_free_all_tx_resources(adapter);
561 igb_free_all_rx_resources(adapter);
562 adapter->num_rx_queues = 1;
563 igb_alloc_queues(adapter);
844290e5 564 } else {
2d064c06
AD
565 switch (hw->mac.type) {
566 case e1000_82575:
567 wr32(E1000_MSIXBM(0),
568 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
569 break;
570 case e1000_82576:
571 wr32(E1000_IVAR0, E1000_IVAR_VALID);
572 break;
573 default:
574 break;
575 }
9d5c8243 576 }
844290e5 577
7dfc16fa 578 if (adapter->flags & IGB_FLAG_HAS_MSI) {
9d5c8243
AK
579 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
580 netdev->name, netdev);
581 if (!err)
582 goto request_done;
583 /* fall back to legacy interrupts */
584 igb_reset_interrupt_capability(adapter);
7dfc16fa 585 adapter->flags &= ~IGB_FLAG_HAS_MSI;
9d5c8243
AK
586 }
587
588 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
589 netdev->name, netdev);
590
6cb5e577 591 if (err)
9d5c8243
AK
592 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
593 err);
9d5c8243
AK
594
595request_done:
596 return err;
597}
598
599static void igb_free_irq(struct igb_adapter *adapter)
600{
601 struct net_device *netdev = adapter->netdev;
602
603 if (adapter->msix_entries) {
604 int vector = 0, i;
605
606 for (i = 0; i < adapter->num_tx_queues; i++)
607 free_irq(adapter->msix_entries[vector++].vector,
608 &(adapter->tx_ring[i]));
609 for (i = 0; i < adapter->num_rx_queues; i++)
610 free_irq(adapter->msix_entries[vector++].vector,
611 &(adapter->rx_ring[i]));
612
613 free_irq(adapter->msix_entries[vector++].vector, netdev);
614 return;
615 }
616
617 free_irq(adapter->pdev->irq, netdev);
618}
619
620/**
621 * igb_irq_disable - Mask off interrupt generation on the NIC
622 * @adapter: board private structure
623 **/
624static void igb_irq_disable(struct igb_adapter *adapter)
625{
626 struct e1000_hw *hw = &adapter->hw;
627
628 if (adapter->msix_entries) {
844290e5 629 wr32(E1000_EIAM, 0);
9d5c8243
AK
630 wr32(E1000_EIMC, ~0);
631 wr32(E1000_EIAC, 0);
632 }
844290e5
PW
633
634 wr32(E1000_IAM, 0);
9d5c8243
AK
635 wr32(E1000_IMC, ~0);
636 wrfl();
637 synchronize_irq(adapter->pdev->irq);
638}
639
640/**
641 * igb_irq_enable - Enable default interrupt generation settings
642 * @adapter: board private structure
643 **/
644static void igb_irq_enable(struct igb_adapter *adapter)
645{
646 struct e1000_hw *hw = &adapter->hw;
647
648 if (adapter->msix_entries) {
844290e5
PW
649 wr32(E1000_EIAC, adapter->eims_enable_mask);
650 wr32(E1000_EIAM, adapter->eims_enable_mask);
651 wr32(E1000_EIMS, adapter->eims_enable_mask);
9d5c8243 652 wr32(E1000_IMS, E1000_IMS_LSC);
844290e5
PW
653 } else {
654 wr32(E1000_IMS, IMS_ENABLE_MASK);
655 wr32(E1000_IAM, IMS_ENABLE_MASK);
656 }
9d5c8243
AK
657}
658
659static void igb_update_mng_vlan(struct igb_adapter *adapter)
660{
661 struct net_device *netdev = adapter->netdev;
662 u16 vid = adapter->hw.mng_cookie.vlan_id;
663 u16 old_vid = adapter->mng_vlan_id;
664 if (adapter->vlgrp) {
665 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
666 if (adapter->hw.mng_cookie.status &
667 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
668 igb_vlan_rx_add_vid(netdev, vid);
669 adapter->mng_vlan_id = vid;
670 } else
671 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
672
673 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
674 (vid != old_vid) &&
675 !vlan_group_get_device(adapter->vlgrp, old_vid))
676 igb_vlan_rx_kill_vid(netdev, old_vid);
677 } else
678 adapter->mng_vlan_id = vid;
679 }
680}
681
682/**
683 * igb_release_hw_control - release control of the h/w to f/w
684 * @adapter: address of board private structure
685 *
686 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
687 * For ASF and Pass Through versions of f/w this means that the
688 * driver is no longer loaded.
689 *
690 **/
691static void igb_release_hw_control(struct igb_adapter *adapter)
692{
693 struct e1000_hw *hw = &adapter->hw;
694 u32 ctrl_ext;
695
696 /* Let firmware take over control of h/w */
697 ctrl_ext = rd32(E1000_CTRL_EXT);
698 wr32(E1000_CTRL_EXT,
699 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
700}
701
702
703/**
704 * igb_get_hw_control - get control of the h/w from f/w
705 * @adapter: address of board private structure
706 *
707 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
708 * For ASF and Pass Through versions of f/w this means that
709 * the driver is loaded.
710 *
711 **/
712static void igb_get_hw_control(struct igb_adapter *adapter)
713{
714 struct e1000_hw *hw = &adapter->hw;
715 u32 ctrl_ext;
716
717 /* Let firmware know the driver has taken over */
718 ctrl_ext = rd32(E1000_CTRL_EXT);
719 wr32(E1000_CTRL_EXT,
720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
721}
722
723static void igb_init_manageability(struct igb_adapter *adapter)
724{
725 struct e1000_hw *hw = &adapter->hw;
726
727 if (adapter->en_mng_pt) {
728 u32 manc2h = rd32(E1000_MANC2H);
729 u32 manc = rd32(E1000_MANC);
730
9d5c8243
AK
731 /* enable receiving management packets to the host */
732 /* this will probably generate destination unreachable messages
733 * from the host OS, but the packets will be handled on SMBUS */
734 manc |= E1000_MANC_EN_MNG2HOST;
735#define E1000_MNG2HOST_PORT_623 (1 << 5)
736#define E1000_MNG2HOST_PORT_664 (1 << 6)
737 manc2h |= E1000_MNG2HOST_PORT_623;
738 manc2h |= E1000_MNG2HOST_PORT_664;
739 wr32(E1000_MANC2H, manc2h);
740
741 wr32(E1000_MANC, manc);
742 }
743}
744
9d5c8243
AK
745/**
746 * igb_configure - configure the hardware for RX and TX
747 * @adapter: private board structure
748 **/
749static void igb_configure(struct igb_adapter *adapter)
750{
751 struct net_device *netdev = adapter->netdev;
752 int i;
753
754 igb_get_hw_control(adapter);
755 igb_set_multi(netdev);
756
757 igb_restore_vlan(adapter);
758 igb_init_manageability(adapter);
759
760 igb_configure_tx(adapter);
761 igb_setup_rctl(adapter);
762 igb_configure_rx(adapter);
662d7205
AD
763
764 igb_rx_fifo_flush_82575(&adapter->hw);
765
9d5c8243
AK
766 /* call IGB_DESC_UNUSED which always leaves
767 * at least 1 descriptor unused to make sure
768 * next_to_use != next_to_clean */
769 for (i = 0; i < adapter->num_rx_queues; i++) {
770 struct igb_ring *ring = &adapter->rx_ring[i];
3b644cf6 771 igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring));
9d5c8243
AK
772 }
773
774
775 adapter->tx_queue_len = netdev->tx_queue_len;
776}
777
778
779/**
780 * igb_up - Open the interface and prepare it to handle traffic
781 * @adapter: board private structure
782 **/
783
784int igb_up(struct igb_adapter *adapter)
785{
786 struct e1000_hw *hw = &adapter->hw;
787 int i;
788
789 /* hardware has been reset, we need to reload some things */
790 igb_configure(adapter);
791
792 clear_bit(__IGB_DOWN, &adapter->state);
793
844290e5
PW
794 for (i = 0; i < adapter->num_rx_queues; i++)
795 napi_enable(&adapter->rx_ring[i].napi);
796 if (adapter->msix_entries)
9d5c8243 797 igb_configure_msix(adapter);
9d5c8243
AK
798
799 /* Clear any pending interrupts. */
800 rd32(E1000_ICR);
801 igb_irq_enable(adapter);
802
803 /* Fire a link change interrupt to start the watchdog. */
804 wr32(E1000_ICS, E1000_ICS_LSC);
805 return 0;
806}
807
808void igb_down(struct igb_adapter *adapter)
809{
810 struct e1000_hw *hw = &adapter->hw;
811 struct net_device *netdev = adapter->netdev;
812 u32 tctl, rctl;
813 int i;
814
815 /* signal that we're down so the interrupt handler does not
816 * reschedule our watchdog timer */
817 set_bit(__IGB_DOWN, &adapter->state);
818
819 /* disable receives in the hardware */
820 rctl = rd32(E1000_RCTL);
821 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
822 /* flush and sleep below */
823
fd2ea0a7 824 netif_tx_stop_all_queues(netdev);
9d5c8243
AK
825
826 /* disable transmits in the hardware */
827 tctl = rd32(E1000_TCTL);
828 tctl &= ~E1000_TCTL_EN;
829 wr32(E1000_TCTL, tctl);
830 /* flush both disables and wait for them to finish */
831 wrfl();
832 msleep(10);
833
844290e5
PW
834 for (i = 0; i < adapter->num_rx_queues; i++)
835 napi_disable(&adapter->rx_ring[i].napi);
9d5c8243 836
9d5c8243
AK
837 igb_irq_disable(adapter);
838
839 del_timer_sync(&adapter->watchdog_timer);
840 del_timer_sync(&adapter->phy_info_timer);
841
842 netdev->tx_queue_len = adapter->tx_queue_len;
843 netif_carrier_off(netdev);
844 adapter->link_speed = 0;
845 adapter->link_duplex = 0;
846
3023682e
JK
847 if (!pci_channel_offline(adapter->pdev))
848 igb_reset(adapter);
9d5c8243
AK
849 igb_clean_all_tx_rings(adapter);
850 igb_clean_all_rx_rings(adapter);
851}
852
853void igb_reinit_locked(struct igb_adapter *adapter)
854{
855 WARN_ON(in_interrupt());
856 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
857 msleep(1);
858 igb_down(adapter);
859 igb_up(adapter);
860 clear_bit(__IGB_RESETTING, &adapter->state);
861}
862
863void igb_reset(struct igb_adapter *adapter)
864{
865 struct e1000_hw *hw = &adapter->hw;
2d064c06
AD
866 struct e1000_mac_info *mac = &hw->mac;
867 struct e1000_fc_info *fc = &hw->fc;
9d5c8243
AK
868 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
869 u16 hwm;
870
871 /* Repartition Pba for greater than 9k mtu
872 * To take effect CTRL.RST is required.
873 */
2d064c06 874 if (mac->type != e1000_82576) {
9d5c8243 875 pba = E1000_PBA_34K;
2d064c06
AD
876 }
877 else {
878 pba = E1000_PBA_64K;
879 }
9d5c8243 880
2d064c06
AD
881 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
882 (mac->type < e1000_82576)) {
9d5c8243
AK
883 /* adjust PBA for jumbo frames */
884 wr32(E1000_PBA, pba);
885
886 /* To maintain wire speed transmits, the Tx FIFO should be
887 * large enough to accommodate two full transmit packets,
888 * rounded up to the next 1KB and expressed in KB. Likewise,
889 * the Rx FIFO should be large enough to accommodate at least
890 * one full receive packet and is similarly rounded up and
891 * expressed in KB. */
892 pba = rd32(E1000_PBA);
893 /* upper 16 bits has Tx packet buffer allocation size in KB */
894 tx_space = pba >> 16;
895 /* lower 16 bits has Rx packet buffer allocation size in KB */
896 pba &= 0xffff;
897 /* the tx fifo also stores 16 bytes of information about the tx
898 * but don't include ethernet FCS because hardware appends it */
899 min_tx_space = (adapter->max_frame_size +
900 sizeof(struct e1000_tx_desc) -
901 ETH_FCS_LEN) * 2;
902 min_tx_space = ALIGN(min_tx_space, 1024);
903 min_tx_space >>= 10;
904 /* software strips receive CRC, so leave room for it */
905 min_rx_space = adapter->max_frame_size;
906 min_rx_space = ALIGN(min_rx_space, 1024);
907 min_rx_space >>= 10;
908
909 /* If current Tx allocation is less than the min Tx FIFO size,
910 * and the min Tx FIFO size is less than the current Rx FIFO
911 * allocation, take space away from current Rx allocation */
912 if (tx_space < min_tx_space &&
913 ((min_tx_space - tx_space) < pba)) {
914 pba = pba - (min_tx_space - tx_space);
915
916 /* if short on rx space, rx wins and must trump tx
917 * adjustment */
918 if (pba < min_rx_space)
919 pba = min_rx_space;
920 }
2d064c06 921 wr32(E1000_PBA, pba);
9d5c8243 922 }
9d5c8243
AK
923
924 /* flow control settings */
925 /* The high water mark must be low enough to fit one full frame
926 * (or the size used for early receive) above it in the Rx FIFO.
927 * Set it to the lower of:
928 * - 90% of the Rx FIFO size, or
929 * - the full Rx FIFO size minus one full frame */
930 hwm = min(((pba << 10) * 9 / 10),
2d064c06 931 ((pba << 10) - 2 * adapter->max_frame_size));
9d5c8243 932
2d064c06
AD
933 if (mac->type < e1000_82576) {
934 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
935 fc->low_water = fc->high_water - 8;
936 } else {
937 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
938 fc->low_water = fc->high_water - 16;
939 }
9d5c8243
AK
940 fc->pause_time = 0xFFFF;
941 fc->send_xon = 1;
942 fc->type = fc->original_type;
943
944 /* Allow time for pending master requests to run */
945 adapter->hw.mac.ops.reset_hw(&adapter->hw);
946 wr32(E1000_WUC, 0);
947
948 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
949 dev_err(&adapter->pdev->dev, "Hardware Error\n");
950
951 igb_update_mng_vlan(adapter);
952
953 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
954 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
955
956 igb_reset_adaptive(&adapter->hw);
68707acb
BH
957 if (adapter->hw.phy.ops.get_phy_info)
958 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
9d5c8243
AK
959}
960
42bfd33a
TI
961/**
962 * igb_is_need_ioport - determine if an adapter needs ioport resources or not
963 * @pdev: PCI device information struct
964 *
965 * Returns true if an adapter needs ioport resources
966 **/
967static int igb_is_need_ioport(struct pci_dev *pdev)
968{
969 switch (pdev->device) {
970 /* Currently there are no adapters that need ioport resources */
971 default:
972 return false;
973 }
974}
975
9d5c8243
AK
976/**
977 * igb_probe - Device Initialization Routine
978 * @pdev: PCI device information struct
979 * @ent: entry in igb_pci_tbl
980 *
981 * Returns 0 on success, negative on failure
982 *
983 * igb_probe initializes an adapter identified by a pci_dev structure.
984 * The OS initialization, configuring of the adapter private structure,
985 * and a hardware reset occur.
986 **/
987static int __devinit igb_probe(struct pci_dev *pdev,
988 const struct pci_device_id *ent)
989{
990 struct net_device *netdev;
991 struct igb_adapter *adapter;
992 struct e1000_hw *hw;
993 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
994 unsigned long mmio_start, mmio_len;
9d5c8243
AK
995 int i, err, pci_using_dac;
996 u16 eeprom_data = 0;
997 u16 eeprom_apme_mask = IGB_EEPROM_APME;
998 u32 part_num;
42bfd33a 999 int bars, need_ioport;
9d5c8243 1000
42bfd33a
TI
1001 /* do not allocate ioport bars when not needed */
1002 need_ioport = igb_is_need_ioport(pdev);
1003 if (need_ioport) {
1004 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1005 err = pci_enable_device(pdev);
1006 } else {
1007 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1008 err = pci_enable_device_mem(pdev);
1009 }
9d5c8243
AK
1010 if (err)
1011 return err;
1012
1013 pci_using_dac = 0;
1014 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1015 if (!err) {
1016 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1017 if (!err)
1018 pci_using_dac = 1;
1019 } else {
1020 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1021 if (err) {
1022 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1023 if (err) {
1024 dev_err(&pdev->dev, "No usable DMA "
1025 "configuration, aborting\n");
1026 goto err_dma;
1027 }
1028 }
1029 }
1030
42bfd33a 1031 err = pci_request_selected_regions(pdev, bars, igb_driver_name);
9d5c8243
AK
1032 if (err)
1033 goto err_pci_reg;
1034
1035 pci_set_master(pdev);
c682fc23 1036 pci_save_state(pdev);
9d5c8243
AK
1037
1038 err = -ENOMEM;
661086df 1039 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES);
9d5c8243
AK
1040 if (!netdev)
1041 goto err_alloc_etherdev;
1042
1043 SET_NETDEV_DEV(netdev, &pdev->dev);
1044
1045 pci_set_drvdata(pdev, netdev);
1046 adapter = netdev_priv(netdev);
1047 adapter->netdev = netdev;
1048 adapter->pdev = pdev;
1049 hw = &adapter->hw;
1050 hw->back = adapter;
1051 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
42bfd33a
TI
1052 adapter->bars = bars;
1053 adapter->need_ioport = need_ioport;
9d5c8243
AK
1054
1055 mmio_start = pci_resource_start(pdev, 0);
1056 mmio_len = pci_resource_len(pdev, 0);
1057
1058 err = -EIO;
1059 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
1060 if (!adapter->hw.hw_addr)
1061 goto err_ioremap;
1062
1063 netdev->open = &igb_open;
1064 netdev->stop = &igb_close;
1065 netdev->get_stats = &igb_get_stats;
1066 netdev->set_multicast_list = &igb_set_multi;
1067 netdev->set_mac_address = &igb_set_mac;
1068 netdev->change_mtu = &igb_change_mtu;
1069 netdev->do_ioctl = &igb_ioctl;
1070 igb_set_ethtool_ops(netdev);
1071 netdev->tx_timeout = &igb_tx_timeout;
1072 netdev->watchdog_timeo = 5 * HZ;
9d5c8243
AK
1073 netdev->vlan_rx_register = igb_vlan_rx_register;
1074 netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
1075 netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
1076#ifdef CONFIG_NET_POLL_CONTROLLER
1077 netdev->poll_controller = igb_netpoll;
1078#endif
1079 netdev->hard_start_xmit = &igb_xmit_frame_adv;
1080
1081 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1082
1083 netdev->mem_start = mmio_start;
1084 netdev->mem_end = mmio_start + mmio_len;
1085
9d5c8243
AK
1086 /* PCI config space info */
1087 hw->vendor_id = pdev->vendor;
1088 hw->device_id = pdev->device;
1089 hw->revision_id = pdev->revision;
1090 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1091 hw->subsystem_device_id = pdev->subsystem_device;
1092
1093 /* setup the private structure */
1094 hw->back = adapter;
1095 /* Copy the default MAC, PHY and NVM function pointers */
1096 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1097 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1098 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1099 /* Initialize skew-specific constants */
1100 err = ei->get_invariants(hw);
1101 if (err)
1102 goto err_hw_init;
1103
1104 err = igb_sw_init(adapter);
1105 if (err)
1106 goto err_sw_init;
1107
1108 igb_get_bus_info_pcie(hw);
1109
7dfc16fa
AD
1110 /* set flags */
1111 switch (hw->mac.type) {
1112 case e1000_82576:
1113 case e1000_82575:
1114 adapter->flags |= IGB_FLAG_HAS_DCA;
1115 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1116 break;
1117 default:
1118 break;
1119 }
1120
9d5c8243
AK
1121 hw->phy.autoneg_wait_to_complete = false;
1122 hw->mac.adaptive_ifs = true;
1123
1124 /* Copper options */
1125 if (hw->phy.media_type == e1000_media_type_copper) {
1126 hw->phy.mdix = AUTO_ALL_MODES;
1127 hw->phy.disable_polarity_correction = false;
1128 hw->phy.ms_type = e1000_ms_hw_default;
1129 }
1130
1131 if (igb_check_reset_block(hw))
1132 dev_info(&pdev->dev,
1133 "PHY reset is blocked due to SOL/IDER session.\n");
1134
1135 netdev->features = NETIF_F_SG |
1136 NETIF_F_HW_CSUM |
1137 NETIF_F_HW_VLAN_TX |
1138 NETIF_F_HW_VLAN_RX |
1139 NETIF_F_HW_VLAN_FILTER;
1140
1141 netdev->features |= NETIF_F_TSO;
9d5c8243 1142 netdev->features |= NETIF_F_TSO6;
48f29ffc 1143
d3352520
AD
1144#ifdef CONFIG_IGB_LRO
1145 netdev->features |= NETIF_F_LRO;
1146#endif
1147
48f29ffc
JK
1148 netdev->vlan_features |= NETIF_F_TSO;
1149 netdev->vlan_features |= NETIF_F_TSO6;
1150 netdev->vlan_features |= NETIF_F_HW_CSUM;
1151 netdev->vlan_features |= NETIF_F_SG;
1152
9d5c8243
AK
1153 if (pci_using_dac)
1154 netdev->features |= NETIF_F_HIGHDMA;
1155
1156 netdev->features |= NETIF_F_LLTX;
1157 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1158
1159 /* before reading the NVM, reset the controller to put the device in a
1160 * known good starting state */
1161 hw->mac.ops.reset_hw(hw);
1162
1163 /* make sure the NVM is good */
1164 if (igb_validate_nvm_checksum(hw) < 0) {
1165 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1166 err = -EIO;
1167 goto err_eeprom;
1168 }
1169
1170 /* copy the MAC address out of the NVM */
1171 if (hw->mac.ops.read_mac_addr(hw))
1172 dev_err(&pdev->dev, "NVM Read Error\n");
1173
1174 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1175 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1176
1177 if (!is_valid_ether_addr(netdev->perm_addr)) {
1178 dev_err(&pdev->dev, "Invalid MAC Address\n");
1179 err = -EIO;
1180 goto err_eeprom;
1181 }
1182
1183 init_timer(&adapter->watchdog_timer);
1184 adapter->watchdog_timer.function = &igb_watchdog;
1185 adapter->watchdog_timer.data = (unsigned long) adapter;
1186
1187 init_timer(&adapter->phy_info_timer);
1188 adapter->phy_info_timer.function = &igb_update_phy_info;
1189 adapter->phy_info_timer.data = (unsigned long) adapter;
1190
1191 INIT_WORK(&adapter->reset_task, igb_reset_task);
1192 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1193
1194 /* Initialize link & ring properties that are user-changeable */
1195 adapter->tx_ring->count = 256;
1196 for (i = 0; i < adapter->num_tx_queues; i++)
1197 adapter->tx_ring[i].count = adapter->tx_ring->count;
1198 adapter->rx_ring->count = 256;
1199 for (i = 0; i < adapter->num_rx_queues; i++)
1200 adapter->rx_ring[i].count = adapter->rx_ring->count;
1201
1202 adapter->fc_autoneg = true;
1203 hw->mac.autoneg = true;
1204 hw->phy.autoneg_advertised = 0x2f;
1205
1206 hw->fc.original_type = e1000_fc_default;
1207 hw->fc.type = e1000_fc_default;
1208
1209 adapter->itr_setting = 3;
1210 adapter->itr = IGB_START_ITR;
1211
1212 igb_validate_mdi_setting(hw);
1213
1214 adapter->rx_csum = 1;
1215
1216 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1217 * enable the ACPI Magic Packet filter
1218 */
1219
1220 if (hw->bus.func == 0 ||
1221 hw->device_id == E1000_DEV_ID_82575EB_COPPER)
1222 hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1,
1223 &eeprom_data);
1224
1225 if (eeprom_data & eeprom_apme_mask)
1226 adapter->eeprom_wol |= E1000_WUFC_MAG;
1227
1228 /* now that we have the eeprom settings, apply the special cases where
1229 * the eeprom may be wrong or the board simply won't support wake on
1230 * lan on a particular port */
1231 switch (pdev->device) {
1232 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1233 adapter->eeprom_wol = 0;
1234 break;
1235 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2d064c06
AD
1236 case E1000_DEV_ID_82576_FIBER:
1237 case E1000_DEV_ID_82576_SERDES:
9d5c8243
AK
1238 /* Wake events only supported on port A for dual fiber
1239 * regardless of eeprom setting */
1240 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1241 adapter->eeprom_wol = 0;
1242 break;
7dfc16fa
AD
1243 case E1000_DEV_ID_82576_QUAD_COPPER:
1244 /* if quad port adapter, disable WoL on all but port A */
1245 if (global_quad_port_a != 0)
1246 adapter->eeprom_wol = 0;
1247 else
1248 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1249 /* Reset for multiple quad port adapters */
1250 if (++global_quad_port_a == 4)
1251 global_quad_port_a = 0;
1252 break;
9d5c8243
AK
1253 }
1254
1255 /* initialize the wol settings based on the eeprom settings */
1256 adapter->wol = adapter->eeprom_wol;
1257
1258 /* reset the hardware with the new settings */
1259 igb_reset(adapter);
1260
1261 /* let the f/w know that the h/w is now under the control of the
1262 * driver. */
1263 igb_get_hw_control(adapter);
1264
1265 /* tell the stack to leave us alone until igb_open() is called */
1266 netif_carrier_off(netdev);
fd2ea0a7 1267 netif_tx_stop_all_queues(netdev);
9d5c8243
AK
1268
1269 strcpy(netdev->name, "eth%d");
1270 err = register_netdev(netdev);
1271 if (err)
1272 goto err_register;
1273
fe4506b6 1274#ifdef CONFIG_DCA
7dfc16fa
AD
1275 if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
1276 (dca_add_requester(&pdev->dev) == 0)) {
1277 adapter->flags |= IGB_FLAG_DCA_ENABLED;
fe4506b6
JC
1278 dev_info(&pdev->dev, "DCA enabled\n");
1279 /* Always use CB2 mode, difference is masked
1280 * in the CB driver. */
1281 wr32(E1000_DCA_CTRL, 2);
1282 igb_setup_dca(adapter);
1283 }
1284#endif
1285
9d5c8243
AK
1286 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1287 /* print bus type/speed/width info */
1288 dev_info(&pdev->dev,
1289 "%s: (PCIe:%s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
1290 netdev->name,
1291 ((hw->bus.speed == e1000_bus_speed_2500)
1292 ? "2.5Gb/s" : "unknown"),
1293 ((hw->bus.width == e1000_bus_width_pcie_x4)
1294 ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1)
1295 ? "Width x1" : "unknown"),
1296 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
1297 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
1298
1299 igb_read_part_num(hw, &part_num);
1300 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1301 (part_num >> 8), (part_num & 0xff));
1302
1303 dev_info(&pdev->dev,
1304 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1305 adapter->msix_entries ? "MSI-X" :
7dfc16fa 1306 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
9d5c8243
AK
1307 adapter->num_rx_queues, adapter->num_tx_queues);
1308
9d5c8243
AK
1309 return 0;
1310
1311err_register:
1312 igb_release_hw_control(adapter);
1313err_eeprom:
1314 if (!igb_check_reset_block(hw))
1315 hw->phy.ops.reset_phy(hw);
1316
1317 if (hw->flash_address)
1318 iounmap(hw->flash_address);
1319
1320 igb_remove_device(hw);
a88f10ec 1321 igb_free_queues(adapter);
9d5c8243
AK
1322err_sw_init:
1323err_hw_init:
1324 iounmap(hw->hw_addr);
1325err_ioremap:
1326 free_netdev(netdev);
1327err_alloc_etherdev:
42bfd33a 1328 pci_release_selected_regions(pdev, bars);
9d5c8243
AK
1329err_pci_reg:
1330err_dma:
1331 pci_disable_device(pdev);
1332 return err;
1333}
1334
1335/**
1336 * igb_remove - Device Removal Routine
1337 * @pdev: PCI device information struct
1338 *
1339 * igb_remove is called by the PCI subsystem to alert the driver
1340 * that it should release a PCI device. The could be caused by a
1341 * Hot-Plug event, or because the driver is going to be removed from
1342 * memory.
1343 **/
1344static void __devexit igb_remove(struct pci_dev *pdev)
1345{
1346 struct net_device *netdev = pci_get_drvdata(pdev);
1347 struct igb_adapter *adapter = netdev_priv(netdev);
9280fa52 1348#ifdef CONFIG_DCA
fe4506b6 1349 struct e1000_hw *hw = &adapter->hw;
9280fa52 1350#endif
9d5c8243
AK
1351
1352 /* flush_scheduled work may reschedule our watchdog task, so
1353 * explicitly disable watchdog tasks from being rescheduled */
1354 set_bit(__IGB_DOWN, &adapter->state);
1355 del_timer_sync(&adapter->watchdog_timer);
1356 del_timer_sync(&adapter->phy_info_timer);
1357
1358 flush_scheduled_work();
1359
fe4506b6 1360#ifdef CONFIG_DCA
7dfc16fa 1361 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6
JC
1362 dev_info(&pdev->dev, "DCA disabled\n");
1363 dca_remove_requester(&pdev->dev);
7dfc16fa 1364 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
fe4506b6
JC
1365 wr32(E1000_DCA_CTRL, 1);
1366 }
1367#endif
1368
9d5c8243
AK
1369 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1370 * would have already happened in close and is redundant. */
1371 igb_release_hw_control(adapter);
1372
1373 unregister_netdev(netdev);
1374
1375 if (!igb_check_reset_block(&adapter->hw))
1376 adapter->hw.phy.ops.reset_phy(&adapter->hw);
1377
1378 igb_remove_device(&adapter->hw);
1379 igb_reset_interrupt_capability(adapter);
1380
a88f10ec 1381 igb_free_queues(adapter);
9d5c8243
AK
1382
1383 iounmap(adapter->hw.hw_addr);
1384 if (adapter->hw.flash_address)
1385 iounmap(adapter->hw.flash_address);
42bfd33a 1386 pci_release_selected_regions(pdev, adapter->bars);
9d5c8243
AK
1387
1388 free_netdev(netdev);
1389
1390 pci_disable_device(pdev);
1391}
1392
1393/**
1394 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1395 * @adapter: board private structure to initialize
1396 *
1397 * igb_sw_init initializes the Adapter private data structure.
1398 * Fields are initialized based on PCI device information and
1399 * OS network device settings (MTU size).
1400 **/
1401static int __devinit igb_sw_init(struct igb_adapter *adapter)
1402{
1403 struct e1000_hw *hw = &adapter->hw;
1404 struct net_device *netdev = adapter->netdev;
1405 struct pci_dev *pdev = adapter->pdev;
1406
1407 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1408
1409 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1410 adapter->rx_ps_hdr_size = 0; /* disable packet split */
1411 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1412 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1413
1414 /* Number of supported queues. */
1415 /* Having more queues than CPUs doesn't make sense. */
661086df 1416 adapter->num_rx_queues = min((u32)IGB_MAX_RX_QUEUES, (u32)num_online_cpus());
661086df 1417 adapter->num_tx_queues = min(IGB_MAX_TX_QUEUES, num_online_cpus());
9d5c8243 1418
661086df
PWJ
1419 /* This call may decrease the number of queues depending on
1420 * interrupt mode. */
9d5c8243
AK
1421 igb_set_interrupt_capability(adapter);
1422
1423 if (igb_alloc_queues(adapter)) {
1424 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1425 return -ENOMEM;
1426 }
1427
1428 /* Explicitly disable IRQ since the NIC can be in any state. */
1429 igb_irq_disable(adapter);
1430
1431 set_bit(__IGB_DOWN, &adapter->state);
1432 return 0;
1433}
1434
1435/**
1436 * igb_open - Called when a network interface is made active
1437 * @netdev: network interface device structure
1438 *
1439 * Returns 0 on success, negative value on failure
1440 *
1441 * The open entry point is called when a network interface is made
1442 * active by the system (IFF_UP). At this point all resources needed
1443 * for transmit and receive operations are allocated, the interrupt
1444 * handler is registered with the OS, the watchdog timer is started,
1445 * and the stack is notified that the interface is ready.
1446 **/
1447static int igb_open(struct net_device *netdev)
1448{
1449 struct igb_adapter *adapter = netdev_priv(netdev);
1450 struct e1000_hw *hw = &adapter->hw;
1451 int err;
1452 int i;
1453
1454 /* disallow open during test */
1455 if (test_bit(__IGB_TESTING, &adapter->state))
1456 return -EBUSY;
1457
1458 /* allocate transmit descriptors */
1459 err = igb_setup_all_tx_resources(adapter);
1460 if (err)
1461 goto err_setup_tx;
1462
1463 /* allocate receive descriptors */
1464 err = igb_setup_all_rx_resources(adapter);
1465 if (err)
1466 goto err_setup_rx;
1467
1468 /* e1000_power_up_phy(adapter); */
1469
1470 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1471 if ((adapter->hw.mng_cookie.status &
1472 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1473 igb_update_mng_vlan(adapter);
1474
1475 /* before we allocate an interrupt, we must be ready to handle it.
1476 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1477 * as soon as we call pci_request_irq, so we have to setup our
1478 * clean_rx handler before we do so. */
1479 igb_configure(adapter);
1480
1481 err = igb_request_irq(adapter);
1482 if (err)
1483 goto err_req_irq;
1484
1485 /* From here on the code is the same as igb_up() */
1486 clear_bit(__IGB_DOWN, &adapter->state);
1487
844290e5
PW
1488 for (i = 0; i < adapter->num_rx_queues; i++)
1489 napi_enable(&adapter->rx_ring[i].napi);
9d5c8243
AK
1490
1491 /* Clear any pending interrupts. */
1492 rd32(E1000_ICR);
844290e5
PW
1493
1494 igb_irq_enable(adapter);
1495
d55b53ff
JK
1496 netif_tx_start_all_queues(netdev);
1497
9d5c8243
AK
1498 /* Fire a link status change interrupt to start the watchdog. */
1499 wr32(E1000_ICS, E1000_ICS_LSC);
1500
1501 return 0;
1502
1503err_req_irq:
1504 igb_release_hw_control(adapter);
1505 /* e1000_power_down_phy(adapter); */
1506 igb_free_all_rx_resources(adapter);
1507err_setup_rx:
1508 igb_free_all_tx_resources(adapter);
1509err_setup_tx:
1510 igb_reset(adapter);
1511
1512 return err;
1513}
1514
1515/**
1516 * igb_close - Disables a network interface
1517 * @netdev: network interface device structure
1518 *
1519 * Returns 0, this is not allowed to fail
1520 *
1521 * The close entry point is called when an interface is de-activated
1522 * by the OS. The hardware is still under the driver's control, but
1523 * needs to be disabled. A global MAC reset is issued to stop the
1524 * hardware, and all transmit and receive resources are freed.
1525 **/
1526static int igb_close(struct net_device *netdev)
1527{
1528 struct igb_adapter *adapter = netdev_priv(netdev);
1529
1530 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1531 igb_down(adapter);
1532
1533 igb_free_irq(adapter);
1534
1535 igb_free_all_tx_resources(adapter);
1536 igb_free_all_rx_resources(adapter);
1537
1538 /* kill manageability vlan ID if supported, but not if a vlan with
1539 * the same ID is registered on the host OS (let 8021q kill it) */
1540 if ((adapter->hw.mng_cookie.status &
1541 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1542 !(adapter->vlgrp &&
1543 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1544 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1545
1546 return 0;
1547}
1548
1549/**
1550 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1551 * @adapter: board private structure
1552 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1553 *
1554 * Return 0 on success, negative on failure
1555 **/
1556
1557int igb_setup_tx_resources(struct igb_adapter *adapter,
1558 struct igb_ring *tx_ring)
1559{
1560 struct pci_dev *pdev = adapter->pdev;
1561 int size;
1562
1563 size = sizeof(struct igb_buffer) * tx_ring->count;
1564 tx_ring->buffer_info = vmalloc(size);
1565 if (!tx_ring->buffer_info)
1566 goto err;
1567 memset(tx_ring->buffer_info, 0, size);
1568
1569 /* round up to nearest 4K */
1570 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc)
1571 + sizeof(u32);
1572 tx_ring->size = ALIGN(tx_ring->size, 4096);
1573
1574 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1575 &tx_ring->dma);
1576
1577 if (!tx_ring->desc)
1578 goto err;
1579
1580 tx_ring->adapter = adapter;
1581 tx_ring->next_to_use = 0;
1582 tx_ring->next_to_clean = 0;
9d5c8243
AK
1583 return 0;
1584
1585err:
1586 vfree(tx_ring->buffer_info);
1587 dev_err(&adapter->pdev->dev,
1588 "Unable to allocate memory for the transmit descriptor ring\n");
1589 return -ENOMEM;
1590}
1591
1592/**
1593 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1594 * (Descriptors) for all queues
1595 * @adapter: board private structure
1596 *
1597 * Return 0 on success, negative on failure
1598 **/
1599static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1600{
1601 int i, err = 0;
661086df 1602 int r_idx;
9d5c8243
AK
1603
1604 for (i = 0; i < adapter->num_tx_queues; i++) {
1605 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1606 if (err) {
1607 dev_err(&adapter->pdev->dev,
1608 "Allocation for Tx Queue %u failed\n", i);
1609 for (i--; i >= 0; i--)
3b644cf6 1610 igb_free_tx_resources(&adapter->tx_ring[i]);
9d5c8243
AK
1611 break;
1612 }
1613 }
1614
661086df
PWJ
1615 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1616 r_idx = i % adapter->num_tx_queues;
1617 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1618 }
9d5c8243
AK
1619 return err;
1620}
1621
1622/**
1623 * igb_configure_tx - Configure transmit Unit after Reset
1624 * @adapter: board private structure
1625 *
1626 * Configure the Tx unit of the MAC after a reset.
1627 **/
1628static void igb_configure_tx(struct igb_adapter *adapter)
1629{
1630 u64 tdba, tdwba;
1631 struct e1000_hw *hw = &adapter->hw;
1632 u32 tctl;
1633 u32 txdctl, txctrl;
1634 int i;
1635
1636 for (i = 0; i < adapter->num_tx_queues; i++) {
1637 struct igb_ring *ring = &(adapter->tx_ring[i]);
1638
1639 wr32(E1000_TDLEN(i),
1640 ring->count * sizeof(struct e1000_tx_desc));
1641 tdba = ring->dma;
1642 wr32(E1000_TDBAL(i),
1643 tdba & 0x00000000ffffffffULL);
1644 wr32(E1000_TDBAH(i), tdba >> 32);
1645
1646 tdwba = ring->dma + ring->count * sizeof(struct e1000_tx_desc);
1647 tdwba |= 1; /* enable head wb */
1648 wr32(E1000_TDWBAL(i),
1649 tdwba & 0x00000000ffffffffULL);
1650 wr32(E1000_TDWBAH(i), tdwba >> 32);
1651
1652 ring->head = E1000_TDH(i);
1653 ring->tail = E1000_TDT(i);
1654 writel(0, hw->hw_addr + ring->tail);
1655 writel(0, hw->hw_addr + ring->head);
1656 txdctl = rd32(E1000_TXDCTL(i));
1657 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1658 wr32(E1000_TXDCTL(i), txdctl);
1659
1660 /* Turn off Relaxed Ordering on head write-backs. The
1661 * writebacks MUST be delivered in order or it will
1662 * completely screw up our bookeeping.
1663 */
1664 txctrl = rd32(E1000_DCA_TXCTRL(i));
1665 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1666 wr32(E1000_DCA_TXCTRL(i), txctrl);
1667 }
1668
1669
1670
1671 /* Use the default values for the Tx Inter Packet Gap (IPG) timer */
1672
1673 /* Program the Transmit Control Register */
1674
1675 tctl = rd32(E1000_TCTL);
1676 tctl &= ~E1000_TCTL_CT;
1677 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1678 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1679
1680 igb_config_collision_dist(hw);
1681
1682 /* Setup Transmit Descriptor Settings for eop descriptor */
1683 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1684
1685 /* Enable transmits */
1686 tctl |= E1000_TCTL_EN;
1687
1688 wr32(E1000_TCTL, tctl);
1689}
1690
1691/**
1692 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1693 * @adapter: board private structure
1694 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1695 *
1696 * Returns 0 on success, negative on failure
1697 **/
1698
1699int igb_setup_rx_resources(struct igb_adapter *adapter,
1700 struct igb_ring *rx_ring)
1701{
1702 struct pci_dev *pdev = adapter->pdev;
1703 int size, desc_len;
1704
d3352520
AD
1705#ifdef CONFIG_IGB_LRO
1706 size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
1707 rx_ring->lro_mgr.lro_arr = vmalloc(size);
1708 if (!rx_ring->lro_mgr.lro_arr)
1709 goto err;
1710 memset(rx_ring->lro_mgr.lro_arr, 0, size);
1711#endif
1712
9d5c8243
AK
1713 size = sizeof(struct igb_buffer) * rx_ring->count;
1714 rx_ring->buffer_info = vmalloc(size);
1715 if (!rx_ring->buffer_info)
1716 goto err;
1717 memset(rx_ring->buffer_info, 0, size);
1718
1719 desc_len = sizeof(union e1000_adv_rx_desc);
1720
1721 /* Round up to nearest 4K */
1722 rx_ring->size = rx_ring->count * desc_len;
1723 rx_ring->size = ALIGN(rx_ring->size, 4096);
1724
1725 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1726 &rx_ring->dma);
1727
1728 if (!rx_ring->desc)
1729 goto err;
1730
1731 rx_ring->next_to_clean = 0;
1732 rx_ring->next_to_use = 0;
9d5c8243
AK
1733
1734 rx_ring->adapter = adapter;
9d5c8243
AK
1735
1736 return 0;
1737
1738err:
d3352520
AD
1739#ifdef CONFIG_IGB_LRO
1740 vfree(rx_ring->lro_mgr.lro_arr);
1741 rx_ring->lro_mgr.lro_arr = NULL;
1742#endif
9d5c8243
AK
1743 vfree(rx_ring->buffer_info);
1744 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1745 "the receive descriptor ring\n");
1746 return -ENOMEM;
1747}
1748
1749/**
1750 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
1751 * (Descriptors) for all queues
1752 * @adapter: board private structure
1753 *
1754 * Return 0 on success, negative on failure
1755 **/
1756static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
1757{
1758 int i, err = 0;
1759
1760 for (i = 0; i < adapter->num_rx_queues; i++) {
1761 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1762 if (err) {
1763 dev_err(&adapter->pdev->dev,
1764 "Allocation for Rx Queue %u failed\n", i);
1765 for (i--; i >= 0; i--)
3b644cf6 1766 igb_free_rx_resources(&adapter->rx_ring[i]);
9d5c8243
AK
1767 break;
1768 }
1769 }
1770
1771 return err;
1772}
1773
1774/**
1775 * igb_setup_rctl - configure the receive control registers
1776 * @adapter: Board private structure
1777 **/
1778static void igb_setup_rctl(struct igb_adapter *adapter)
1779{
1780 struct e1000_hw *hw = &adapter->hw;
1781 u32 rctl;
1782 u32 srrctl = 0;
1783 int i;
1784
1785 rctl = rd32(E1000_RCTL);
1786
1787 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1788
1789 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1790 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1791 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1792
87cb7e8c
AK
1793 /*
1794 * enable stripping of CRC. It's unlikely this will break BMC
1795 * redirection as it did with e1000. Newer features require
1796 * that the HW strips the CRC.
9d5c8243 1797 */
87cb7e8c 1798 rctl |= E1000_RCTL_SECRC;
9d5c8243
AK
1799
1800 rctl &= ~E1000_RCTL_SBP;
1801
1802 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1803 rctl &= ~E1000_RCTL_LPE;
1804 else
1805 rctl |= E1000_RCTL_LPE;
1806 if (adapter->rx_buffer_len <= IGB_RXBUFFER_2048) {
1807 /* Setup buffer sizes */
1808 rctl &= ~E1000_RCTL_SZ_4096;
1809 rctl |= E1000_RCTL_BSEX;
1810 switch (adapter->rx_buffer_len) {
1811 case IGB_RXBUFFER_256:
1812 rctl |= E1000_RCTL_SZ_256;
1813 rctl &= ~E1000_RCTL_BSEX;
1814 break;
1815 case IGB_RXBUFFER_512:
1816 rctl |= E1000_RCTL_SZ_512;
1817 rctl &= ~E1000_RCTL_BSEX;
1818 break;
1819 case IGB_RXBUFFER_1024:
1820 rctl |= E1000_RCTL_SZ_1024;
1821 rctl &= ~E1000_RCTL_BSEX;
1822 break;
1823 case IGB_RXBUFFER_2048:
1824 default:
1825 rctl |= E1000_RCTL_SZ_2048;
1826 rctl &= ~E1000_RCTL_BSEX;
1827 break;
9d5c8243
AK
1828 }
1829 } else {
1830 rctl &= ~E1000_RCTL_BSEX;
1831 srrctl = adapter->rx_buffer_len >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1832 }
1833
1834 /* 82575 and greater support packet-split where the protocol
1835 * header is placed in skb->data and the packet data is
1836 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1837 * In the case of a non-split, skb->data is linearly filled,
1838 * followed by the page buffers. Therefore, skb->data is
1839 * sized to hold the largest protocol header.
1840 */
1841 /* allocations using alloc_page take too long for regular MTU
1842 * so only enable packet split for jumbo frames */
1843 if (rctl & E1000_RCTL_LPE) {
1844 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
bf36c1a0 1845 srrctl |= adapter->rx_ps_hdr_size <<
9d5c8243 1846 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
9d5c8243
AK
1847 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1848 } else {
1849 adapter->rx_ps_hdr_size = 0;
1850 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1851 }
1852
1853 for (i = 0; i < adapter->num_rx_queues; i++)
1854 wr32(E1000_SRRCTL(i), srrctl);
1855
1856 wr32(E1000_RCTL, rctl);
1857}
1858
1859/**
1860 * igb_configure_rx - Configure receive Unit after Reset
1861 * @adapter: board private structure
1862 *
1863 * Configure the Rx unit of the MAC after a reset.
1864 **/
1865static void igb_configure_rx(struct igb_adapter *adapter)
1866{
1867 u64 rdba;
1868 struct e1000_hw *hw = &adapter->hw;
1869 u32 rctl, rxcsum;
1870 u32 rxdctl;
1871 int i;
1872
1873 /* disable receives while setting up the descriptors */
1874 rctl = rd32(E1000_RCTL);
1875 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1876 wrfl();
1877 mdelay(10);
1878
1879 if (adapter->itr_setting > 3)
6eb5a7f1 1880 wr32(E1000_ITR, adapter->itr);
9d5c8243
AK
1881
1882 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1883 * the Base and Length of the Rx Descriptor Ring */
1884 for (i = 0; i < adapter->num_rx_queues; i++) {
1885 struct igb_ring *ring = &(adapter->rx_ring[i]);
1886 rdba = ring->dma;
1887 wr32(E1000_RDBAL(i),
1888 rdba & 0x00000000ffffffffULL);
1889 wr32(E1000_RDBAH(i), rdba >> 32);
1890 wr32(E1000_RDLEN(i),
1891 ring->count * sizeof(union e1000_adv_rx_desc));
1892
1893 ring->head = E1000_RDH(i);
1894 ring->tail = E1000_RDT(i);
1895 writel(0, hw->hw_addr + ring->tail);
1896 writel(0, hw->hw_addr + ring->head);
1897
1898 rxdctl = rd32(E1000_RXDCTL(i));
1899 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1900 rxdctl &= 0xFFF00000;
1901 rxdctl |= IGB_RX_PTHRESH;
1902 rxdctl |= IGB_RX_HTHRESH << 8;
1903 rxdctl |= IGB_RX_WTHRESH << 16;
1904 wr32(E1000_RXDCTL(i), rxdctl);
d3352520
AD
1905#ifdef CONFIG_IGB_LRO
1906 /* Intitial LRO Settings */
1907 ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
1908 ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1909 ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
1910 ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1911 ring->lro_mgr.dev = adapter->netdev;
1912 ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1913 ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1914#endif
9d5c8243
AK
1915 }
1916
1917 if (adapter->num_rx_queues > 1) {
1918 u32 random[10];
1919 u32 mrqc;
1920 u32 j, shift;
1921 union e1000_reta {
1922 u32 dword;
1923 u8 bytes[4];
1924 } reta;
1925
1926 get_random_bytes(&random[0], 40);
1927
2d064c06
AD
1928 if (hw->mac.type >= e1000_82576)
1929 shift = 0;
1930 else
1931 shift = 6;
9d5c8243
AK
1932 for (j = 0; j < (32 * 4); j++) {
1933 reta.bytes[j & 3] =
1934 (j % adapter->num_rx_queues) << shift;
1935 if ((j & 3) == 3)
1936 writel(reta.dword,
1937 hw->hw_addr + E1000_RETA(0) + (j & ~3));
1938 }
1939 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
1940
1941 /* Fill out hash function seeds */
1942 for (j = 0; j < 10; j++)
1943 array_wr32(E1000_RSSRK(0), j, random[j]);
1944
1945 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
1946 E1000_MRQC_RSS_FIELD_IPV4_TCP);
1947 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
1948 E1000_MRQC_RSS_FIELD_IPV6_TCP);
1949 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
1950 E1000_MRQC_RSS_FIELD_IPV6_UDP);
1951 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
1952 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
1953
1954
1955 wr32(E1000_MRQC, mrqc);
1956
1957 /* Multiqueue and raw packet checksumming are mutually
1958 * exclusive. Note that this not the same as TCP/IP
1959 * checksumming, which works fine. */
1960 rxcsum = rd32(E1000_RXCSUM);
1961 rxcsum |= E1000_RXCSUM_PCSD;
1962 wr32(E1000_RXCSUM, rxcsum);
1963 } else {
1964 /* Enable Receive Checksum Offload for TCP and UDP */
1965 rxcsum = rd32(E1000_RXCSUM);
1966 if (adapter->rx_csum) {
1967 rxcsum |= E1000_RXCSUM_TUOFL;
1968
1969 /* Enable IPv4 payload checksum for UDP fragments
1970 * Must be used in conjunction with packet-split. */
1971 if (adapter->rx_ps_hdr_size)
1972 rxcsum |= E1000_RXCSUM_IPPCSE;
1973 } else {
1974 rxcsum &= ~E1000_RXCSUM_TUOFL;
1975 /* don't need to clear IPPCSE as it defaults to 0 */
1976 }
1977 wr32(E1000_RXCSUM, rxcsum);
1978 }
1979
1980 if (adapter->vlgrp)
1981 wr32(E1000_RLPML,
1982 adapter->max_frame_size + VLAN_TAG_SIZE);
1983 else
1984 wr32(E1000_RLPML, adapter->max_frame_size);
1985
1986 /* Enable Receives */
1987 wr32(E1000_RCTL, rctl);
1988}
1989
1990/**
1991 * igb_free_tx_resources - Free Tx Resources per Queue
1992 * @adapter: board private structure
1993 * @tx_ring: Tx descriptor ring for a specific queue
1994 *
1995 * Free all transmit software resources
1996 **/
3b644cf6 1997static void igb_free_tx_resources(struct igb_ring *tx_ring)
9d5c8243 1998{
3b644cf6 1999 struct pci_dev *pdev = tx_ring->adapter->pdev;
9d5c8243 2000
3b644cf6 2001 igb_clean_tx_ring(tx_ring);
9d5c8243
AK
2002
2003 vfree(tx_ring->buffer_info);
2004 tx_ring->buffer_info = NULL;
2005
2006 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2007
2008 tx_ring->desc = NULL;
2009}
2010
2011/**
2012 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2013 * @adapter: board private structure
2014 *
2015 * Free all transmit software resources
2016 **/
2017static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2018{
2019 int i;
2020
2021 for (i = 0; i < adapter->num_tx_queues; i++)
3b644cf6 2022 igb_free_tx_resources(&adapter->tx_ring[i]);
9d5c8243
AK
2023}
2024
2025static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2026 struct igb_buffer *buffer_info)
2027{
2028 if (buffer_info->dma) {
2029 pci_unmap_page(adapter->pdev,
2030 buffer_info->dma,
2031 buffer_info->length,
2032 PCI_DMA_TODEVICE);
2033 buffer_info->dma = 0;
2034 }
2035 if (buffer_info->skb) {
2036 dev_kfree_skb_any(buffer_info->skb);
2037 buffer_info->skb = NULL;
2038 }
2039 buffer_info->time_stamp = 0;
2040 /* buffer_info must be completely set up in the transmit path */
2041}
2042
2043/**
2044 * igb_clean_tx_ring - Free Tx Buffers
2045 * @adapter: board private structure
2046 * @tx_ring: ring to be cleaned
2047 **/
3b644cf6 2048static void igb_clean_tx_ring(struct igb_ring *tx_ring)
9d5c8243 2049{
3b644cf6 2050 struct igb_adapter *adapter = tx_ring->adapter;
9d5c8243
AK
2051 struct igb_buffer *buffer_info;
2052 unsigned long size;
2053 unsigned int i;
2054
2055 if (!tx_ring->buffer_info)
2056 return;
2057 /* Free all the Tx ring sk_buffs */
2058
2059 for (i = 0; i < tx_ring->count; i++) {
2060 buffer_info = &tx_ring->buffer_info[i];
2061 igb_unmap_and_free_tx_resource(adapter, buffer_info);
2062 }
2063
2064 size = sizeof(struct igb_buffer) * tx_ring->count;
2065 memset(tx_ring->buffer_info, 0, size);
2066
2067 /* Zero out the descriptor ring */
2068
2069 memset(tx_ring->desc, 0, tx_ring->size);
2070
2071 tx_ring->next_to_use = 0;
2072 tx_ring->next_to_clean = 0;
2073
2074 writel(0, adapter->hw.hw_addr + tx_ring->head);
2075 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2076}
2077
2078/**
2079 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2080 * @adapter: board private structure
2081 **/
2082static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2083{
2084 int i;
2085
2086 for (i = 0; i < adapter->num_tx_queues; i++)
3b644cf6 2087 igb_clean_tx_ring(&adapter->tx_ring[i]);
9d5c8243
AK
2088}
2089
2090/**
2091 * igb_free_rx_resources - Free Rx Resources
2092 * @adapter: board private structure
2093 * @rx_ring: ring to clean the resources from
2094 *
2095 * Free all receive software resources
2096 **/
3b644cf6 2097static void igb_free_rx_resources(struct igb_ring *rx_ring)
9d5c8243 2098{
3b644cf6 2099 struct pci_dev *pdev = rx_ring->adapter->pdev;
9d5c8243 2100
3b644cf6 2101 igb_clean_rx_ring(rx_ring);
9d5c8243
AK
2102
2103 vfree(rx_ring->buffer_info);
2104 rx_ring->buffer_info = NULL;
2105
d3352520
AD
2106#ifdef CONFIG_IGB_LRO
2107 vfree(rx_ring->lro_mgr.lro_arr);
2108 rx_ring->lro_mgr.lro_arr = NULL;
2109#endif
2110
9d5c8243
AK
2111 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2112
2113 rx_ring->desc = NULL;
2114}
2115
2116/**
2117 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2118 * @adapter: board private structure
2119 *
2120 * Free all receive software resources
2121 **/
2122static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2123{
2124 int i;
2125
2126 for (i = 0; i < adapter->num_rx_queues; i++)
3b644cf6 2127 igb_free_rx_resources(&adapter->rx_ring[i]);
9d5c8243
AK
2128}
2129
2130/**
2131 * igb_clean_rx_ring - Free Rx Buffers per Queue
2132 * @adapter: board private structure
2133 * @rx_ring: ring to free buffers from
2134 **/
3b644cf6 2135static void igb_clean_rx_ring(struct igb_ring *rx_ring)
9d5c8243 2136{
3b644cf6 2137 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243
AK
2138 struct igb_buffer *buffer_info;
2139 struct pci_dev *pdev = adapter->pdev;
2140 unsigned long size;
2141 unsigned int i;
2142
2143 if (!rx_ring->buffer_info)
2144 return;
2145 /* Free all the Rx ring sk_buffs */
2146 for (i = 0; i < rx_ring->count; i++) {
2147 buffer_info = &rx_ring->buffer_info[i];
2148 if (buffer_info->dma) {
2149 if (adapter->rx_ps_hdr_size)
2150 pci_unmap_single(pdev, buffer_info->dma,
2151 adapter->rx_ps_hdr_size,
2152 PCI_DMA_FROMDEVICE);
2153 else
2154 pci_unmap_single(pdev, buffer_info->dma,
2155 adapter->rx_buffer_len,
2156 PCI_DMA_FROMDEVICE);
2157 buffer_info->dma = 0;
2158 }
2159
2160 if (buffer_info->skb) {
2161 dev_kfree_skb(buffer_info->skb);
2162 buffer_info->skb = NULL;
2163 }
2164 if (buffer_info->page) {
bf36c1a0
AD
2165 if (buffer_info->page_dma)
2166 pci_unmap_page(pdev, buffer_info->page_dma,
2167 PAGE_SIZE / 2,
2168 PCI_DMA_FROMDEVICE);
9d5c8243
AK
2169 put_page(buffer_info->page);
2170 buffer_info->page = NULL;
2171 buffer_info->page_dma = 0;
bf36c1a0 2172 buffer_info->page_offset = 0;
9d5c8243
AK
2173 }
2174 }
2175
9d5c8243
AK
2176 size = sizeof(struct igb_buffer) * rx_ring->count;
2177 memset(rx_ring->buffer_info, 0, size);
2178
2179 /* Zero out the descriptor ring */
2180 memset(rx_ring->desc, 0, rx_ring->size);
2181
2182 rx_ring->next_to_clean = 0;
2183 rx_ring->next_to_use = 0;
2184
2185 writel(0, adapter->hw.hw_addr + rx_ring->head);
2186 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2187}
2188
2189/**
2190 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2191 * @adapter: board private structure
2192 **/
2193static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2194{
2195 int i;
2196
2197 for (i = 0; i < adapter->num_rx_queues; i++)
3b644cf6 2198 igb_clean_rx_ring(&adapter->rx_ring[i]);
9d5c8243
AK
2199}
2200
2201/**
2202 * igb_set_mac - Change the Ethernet Address of the NIC
2203 * @netdev: network interface device structure
2204 * @p: pointer to an address structure
2205 *
2206 * Returns 0 on success, negative on failure
2207 **/
2208static int igb_set_mac(struct net_device *netdev, void *p)
2209{
2210 struct igb_adapter *adapter = netdev_priv(netdev);
2211 struct sockaddr *addr = p;
2212
2213 if (!is_valid_ether_addr(addr->sa_data))
2214 return -EADDRNOTAVAIL;
2215
2216 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2217 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
2218
2219 adapter->hw.mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2220
2221 return 0;
2222}
2223
2224/**
2225 * igb_set_multi - Multicast and Promiscuous mode set
2226 * @netdev: network interface device structure
2227 *
2228 * The set_multi entry point is called whenever the multicast address
2229 * list or the network interface flags are updated. This routine is
2230 * responsible for configuring the hardware for proper multicast,
2231 * promiscuous mode, and all-multi behavior.
2232 **/
2233static void igb_set_multi(struct net_device *netdev)
2234{
2235 struct igb_adapter *adapter = netdev_priv(netdev);
2236 struct e1000_hw *hw = &adapter->hw;
2237 struct e1000_mac_info *mac = &hw->mac;
2238 struct dev_mc_list *mc_ptr;
2239 u8 *mta_list;
2240 u32 rctl;
2241 int i;
2242
2243 /* Check for Promiscuous and All Multicast modes */
2244
2245 rctl = rd32(E1000_RCTL);
2246
746b9f02 2247 if (netdev->flags & IFF_PROMISC) {
9d5c8243 2248 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
746b9f02
PM
2249 rctl &= ~E1000_RCTL_VFE;
2250 } else {
2251 if (netdev->flags & IFF_ALLMULTI) {
2252 rctl |= E1000_RCTL_MPE;
2253 rctl &= ~E1000_RCTL_UPE;
2254 } else
2255 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
78ed11a5 2256 rctl |= E1000_RCTL_VFE;
746b9f02 2257 }
9d5c8243
AK
2258 wr32(E1000_RCTL, rctl);
2259
2260 if (!netdev->mc_count) {
2261 /* nothing to program, so clear mc list */
2d064c06 2262 igb_update_mc_addr_list_82575(hw, NULL, 0, 1,
9d5c8243
AK
2263 mac->rar_entry_count);
2264 return;
2265 }
2266
2267 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2268 if (!mta_list)
2269 return;
2270
2271 /* The shared function expects a packed array of only addresses. */
2272 mc_ptr = netdev->mc_list;
2273
2274 for (i = 0; i < netdev->mc_count; i++) {
2275 if (!mc_ptr)
2276 break;
2277 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2278 mc_ptr = mc_ptr->next;
2279 }
2d064c06
AD
2280 igb_update_mc_addr_list_82575(hw, mta_list, i, 1,
2281 mac->rar_entry_count);
9d5c8243
AK
2282 kfree(mta_list);
2283}
2284
2285/* Need to wait a few seconds after link up to get diagnostic information from
2286 * the phy */
2287static void igb_update_phy_info(unsigned long data)
2288{
2289 struct igb_adapter *adapter = (struct igb_adapter *) data;
68707acb
BH
2290 if (adapter->hw.phy.ops.get_phy_info)
2291 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
9d5c8243
AK
2292}
2293
2294/**
2295 * igb_watchdog - Timer Call-back
2296 * @data: pointer to adapter cast into an unsigned long
2297 **/
2298static void igb_watchdog(unsigned long data)
2299{
2300 struct igb_adapter *adapter = (struct igb_adapter *)data;
2301 /* Do the rest outside of interrupt context */
2302 schedule_work(&adapter->watchdog_task);
2303}
2304
2305static void igb_watchdog_task(struct work_struct *work)
2306{
2307 struct igb_adapter *adapter = container_of(work,
2308 struct igb_adapter, watchdog_task);
2309 struct e1000_hw *hw = &adapter->hw;
2310
2311 struct net_device *netdev = adapter->netdev;
2312 struct igb_ring *tx_ring = adapter->tx_ring;
2313 struct e1000_mac_info *mac = &adapter->hw.mac;
2314 u32 link;
2315 s32 ret_val;
2316
2317 if ((netif_carrier_ok(netdev)) &&
2318 (rd32(E1000_STATUS) & E1000_STATUS_LU))
2319 goto link_up;
2320
2321 ret_val = hw->mac.ops.check_for_link(&adapter->hw);
2322 if ((ret_val == E1000_ERR_PHY) &&
2323 (hw->phy.type == e1000_phy_igp_3) &&
2324 (rd32(E1000_CTRL) &
2325 E1000_PHY_CTRL_GBE_DISABLE))
2326 dev_info(&adapter->pdev->dev,
2327 "Gigabit has been disabled, downgrading speed\n");
2328
2329 if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
2330 !(rd32(E1000_TXCW) & E1000_TXCW_ANE))
2331 link = mac->serdes_has_link;
2332 else
2333 link = rd32(E1000_STATUS) &
2334 E1000_STATUS_LU;
2335
2336 if (link) {
2337 if (!netif_carrier_ok(netdev)) {
2338 u32 ctrl;
2339 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2340 &adapter->link_speed,
2341 &adapter->link_duplex);
2342
2343 ctrl = rd32(E1000_CTRL);
2344 dev_info(&adapter->pdev->dev,
2345 "NIC Link is Up %d Mbps %s, "
2346 "Flow Control: %s\n",
2347 adapter->link_speed,
2348 adapter->link_duplex == FULL_DUPLEX ?
2349 "Full Duplex" : "Half Duplex",
2350 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2351 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2352 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2353 E1000_CTRL_TFCE) ? "TX" : "None")));
2354
2355 /* tweak tx_queue_len according to speed/duplex and
2356 * adjust the timeout factor */
2357 netdev->tx_queue_len = adapter->tx_queue_len;
2358 adapter->tx_timeout_factor = 1;
2359 switch (adapter->link_speed) {
2360 case SPEED_10:
2361 netdev->tx_queue_len = 10;
2362 adapter->tx_timeout_factor = 14;
2363 break;
2364 case SPEED_100:
2365 netdev->tx_queue_len = 100;
2366 /* maybe add some timeout factor ? */
2367 break;
2368 }
2369
2370 netif_carrier_on(netdev);
fd2ea0a7 2371 netif_tx_wake_all_queues(netdev);
9d5c8243
AK
2372
2373 if (!test_bit(__IGB_DOWN, &adapter->state))
2374 mod_timer(&adapter->phy_info_timer,
2375 round_jiffies(jiffies + 2 * HZ));
2376 }
2377 } else {
2378 if (netif_carrier_ok(netdev)) {
2379 adapter->link_speed = 0;
2380 adapter->link_duplex = 0;
2381 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2382 netif_carrier_off(netdev);
fd2ea0a7 2383 netif_tx_stop_all_queues(netdev);
9d5c8243
AK
2384 if (!test_bit(__IGB_DOWN, &adapter->state))
2385 mod_timer(&adapter->phy_info_timer,
2386 round_jiffies(jiffies + 2 * HZ));
2387 }
2388 }
2389
2390link_up:
2391 igb_update_stats(adapter);
2392
2393 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2394 adapter->tpt_old = adapter->stats.tpt;
2395 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
2396 adapter->colc_old = adapter->stats.colc;
2397
2398 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2399 adapter->gorc_old = adapter->stats.gorc;
2400 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2401 adapter->gotc_old = adapter->stats.gotc;
2402
2403 igb_update_adaptive(&adapter->hw);
2404
2405 if (!netif_carrier_ok(netdev)) {
2406 if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
2407 /* We've lost link, so the controller stops DMA,
2408 * but we've got queued Tx work that's never going
2409 * to get done, so reset controller to flush Tx.
2410 * (Do the reset outside of interrupt context). */
2411 adapter->tx_timeout_count++;
2412 schedule_work(&adapter->reset_task);
2413 }
2414 }
2415
2416 /* Cause software interrupt to ensure rx ring is cleaned */
2417 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2418
2419 /* Force detection of hung controller every watchdog period */
2420 tx_ring->detect_tx_hung = true;
2421
2422 /* Reset the timer */
2423 if (!test_bit(__IGB_DOWN, &adapter->state))
2424 mod_timer(&adapter->watchdog_timer,
2425 round_jiffies(jiffies + 2 * HZ));
2426}
2427
2428enum latency_range {
2429 lowest_latency = 0,
2430 low_latency = 1,
2431 bulk_latency = 2,
2432 latency_invalid = 255
2433};
2434
2435
6eb5a7f1
AD
2436/**
2437 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2438 *
2439 * Stores a new ITR value based on strictly on packet size. This
2440 * algorithm is less sophisticated than that used in igb_update_itr,
2441 * due to the difficulty of synchronizing statistics across multiple
2442 * receive rings. The divisors and thresholds used by this fuction
2443 * were determined based on theoretical maximum wire speed and testing
2444 * data, in order to minimize response time while increasing bulk
2445 * throughput.
2446 * This functionality is controlled by the InterruptThrottleRate module
2447 * parameter (see igb_param.c)
2448 * NOTE: This function is called only when operating in a multiqueue
2449 * receive environment.
2450 * @rx_ring: pointer to ring
2451 **/
2452static void igb_update_ring_itr(struct igb_ring *rx_ring)
9d5c8243 2453{
6eb5a7f1
AD
2454 int new_val = rx_ring->itr_val;
2455 int avg_wire_size = 0;
2456 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243 2457
6eb5a7f1
AD
2458 if (!rx_ring->total_packets)
2459 goto clear_counts; /* no packets, so don't do anything */
9d5c8243 2460
6eb5a7f1
AD
2461 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2462 * ints/sec - ITR timer value of 120 ticks.
2463 */
2464 if (adapter->link_speed != SPEED_1000) {
2465 new_val = 120;
2466 goto set_itr_val;
9d5c8243 2467 }
6eb5a7f1 2468 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
9d5c8243 2469
6eb5a7f1
AD
2470 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2471 avg_wire_size += 24;
2472
2473 /* Don't starve jumbo frames */
2474 avg_wire_size = min(avg_wire_size, 3000);
9d5c8243 2475
6eb5a7f1
AD
2476 /* Give a little boost to mid-size frames */
2477 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
2478 new_val = avg_wire_size / 3;
2479 else
2480 new_val = avg_wire_size / 2;
9d5c8243 2481
6eb5a7f1 2482set_itr_val:
9d5c8243
AK
2483 if (new_val != rx_ring->itr_val) {
2484 rx_ring->itr_val = new_val;
6eb5a7f1 2485 rx_ring->set_itr = 1;
9d5c8243 2486 }
6eb5a7f1
AD
2487clear_counts:
2488 rx_ring->total_bytes = 0;
2489 rx_ring->total_packets = 0;
9d5c8243
AK
2490}
2491
2492/**
2493 * igb_update_itr - update the dynamic ITR value based on statistics
2494 * Stores a new ITR value based on packets and byte
2495 * counts during the last interrupt. The advantage of per interrupt
2496 * computation is faster updates and more accurate ITR for the current
2497 * traffic pattern. Constants in this function were computed
2498 * based on theoretical maximum wire speed and thresholds were set based
2499 * on testing data as well as attempting to minimize response time
2500 * while increasing bulk throughput.
2501 * this functionality is controlled by the InterruptThrottleRate module
2502 * parameter (see igb_param.c)
2503 * NOTE: These calculations are only valid when operating in a single-
2504 * queue environment.
2505 * @adapter: pointer to adapter
2506 * @itr_setting: current adapter->itr
2507 * @packets: the number of packets during this measurement interval
2508 * @bytes: the number of bytes during this measurement interval
2509 **/
2510static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
2511 int packets, int bytes)
2512{
2513 unsigned int retval = itr_setting;
2514
2515 if (packets == 0)
2516 goto update_itr_done;
2517
2518 switch (itr_setting) {
2519 case lowest_latency:
2520 /* handle TSO and jumbo frames */
2521 if (bytes/packets > 8000)
2522 retval = bulk_latency;
2523 else if ((packets < 5) && (bytes > 512))
2524 retval = low_latency;
2525 break;
2526 case low_latency: /* 50 usec aka 20000 ints/s */
2527 if (bytes > 10000) {
2528 /* this if handles the TSO accounting */
2529 if (bytes/packets > 8000) {
2530 retval = bulk_latency;
2531 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2532 retval = bulk_latency;
2533 } else if ((packets > 35)) {
2534 retval = lowest_latency;
2535 }
2536 } else if (bytes/packets > 2000) {
2537 retval = bulk_latency;
2538 } else if (packets <= 2 && bytes < 512) {
2539 retval = lowest_latency;
2540 }
2541 break;
2542 case bulk_latency: /* 250 usec aka 4000 ints/s */
2543 if (bytes > 25000) {
2544 if (packets > 35)
2545 retval = low_latency;
2546 } else if (bytes < 6000) {
2547 retval = low_latency;
2548 }
2549 break;
2550 }
2551
2552update_itr_done:
2553 return retval;
2554}
2555
6eb5a7f1 2556static void igb_set_itr(struct igb_adapter *adapter)
9d5c8243
AK
2557{
2558 u16 current_itr;
2559 u32 new_itr = adapter->itr;
2560
2561 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2562 if (adapter->link_speed != SPEED_1000) {
2563 current_itr = 0;
2564 new_itr = 4000;
2565 goto set_itr_now;
2566 }
2567
2568 adapter->rx_itr = igb_update_itr(adapter,
2569 adapter->rx_itr,
2570 adapter->rx_ring->total_packets,
2571 adapter->rx_ring->total_bytes);
9d5c8243 2572
6eb5a7f1 2573 if (adapter->rx_ring->buddy) {
9d5c8243
AK
2574 adapter->tx_itr = igb_update_itr(adapter,
2575 adapter->tx_itr,
2576 adapter->tx_ring->total_packets,
2577 adapter->tx_ring->total_bytes);
9d5c8243
AK
2578
2579 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2580 } else {
2581 current_itr = adapter->rx_itr;
2582 }
2583
6eb5a7f1
AD
2584 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2585 if (adapter->itr_setting == 3 &&
2586 current_itr == lowest_latency)
2587 current_itr = low_latency;
2588
9d5c8243
AK
2589 switch (current_itr) {
2590 /* counts and packets in update_itr are dependent on these numbers */
2591 case lowest_latency:
2592 new_itr = 70000;
2593 break;
2594 case low_latency:
2595 new_itr = 20000; /* aka hwitr = ~200 */
2596 break;
2597 case bulk_latency:
2598 new_itr = 4000;
2599 break;
2600 default:
2601 break;
2602 }
2603
2604set_itr_now:
6eb5a7f1
AD
2605 adapter->rx_ring->total_bytes = 0;
2606 adapter->rx_ring->total_packets = 0;
2607 if (adapter->rx_ring->buddy) {
2608 adapter->rx_ring->buddy->total_bytes = 0;
2609 adapter->rx_ring->buddy->total_packets = 0;
2610 }
2611
9d5c8243
AK
2612 if (new_itr != adapter->itr) {
2613 /* this attempts to bias the interrupt rate towards Bulk
2614 * by adding intermediate steps when interrupt rate is
2615 * increasing */
2616 new_itr = new_itr > adapter->itr ?
2617 min(adapter->itr + (new_itr >> 2), new_itr) :
2618 new_itr;
2619 /* Don't write the value here; it resets the adapter's
2620 * internal timer, and causes us to delay far longer than
2621 * we should between interrupts. Instead, we write the ITR
2622 * value at the beginning of the next interrupt so the timing
2623 * ends up being correct.
2624 */
2625 adapter->itr = new_itr;
6eb5a7f1
AD
2626 adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
2627 adapter->rx_ring->set_itr = 1;
9d5c8243
AK
2628 }
2629
2630 return;
2631}
2632
2633
2634#define IGB_TX_FLAGS_CSUM 0x00000001
2635#define IGB_TX_FLAGS_VLAN 0x00000002
2636#define IGB_TX_FLAGS_TSO 0x00000004
2637#define IGB_TX_FLAGS_IPV4 0x00000008
2638#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
2639#define IGB_TX_FLAGS_VLAN_SHIFT 16
2640
2641static inline int igb_tso_adv(struct igb_adapter *adapter,
2642 struct igb_ring *tx_ring,
2643 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2644{
2645 struct e1000_adv_tx_context_desc *context_desc;
2646 unsigned int i;
2647 int err;
2648 struct igb_buffer *buffer_info;
2649 u32 info = 0, tu_cmd = 0;
2650 u32 mss_l4len_idx, l4len;
2651 *hdr_len = 0;
2652
2653 if (skb_header_cloned(skb)) {
2654 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2655 if (err)
2656 return err;
2657 }
2658
2659 l4len = tcp_hdrlen(skb);
2660 *hdr_len += l4len;
2661
2662 if (skb->protocol == htons(ETH_P_IP)) {
2663 struct iphdr *iph = ip_hdr(skb);
2664 iph->tot_len = 0;
2665 iph->check = 0;
2666 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2667 iph->daddr, 0,
2668 IPPROTO_TCP,
2669 0);
2670 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2671 ipv6_hdr(skb)->payload_len = 0;
2672 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2673 &ipv6_hdr(skb)->daddr,
2674 0, IPPROTO_TCP, 0);
2675 }
2676
2677 i = tx_ring->next_to_use;
2678
2679 buffer_info = &tx_ring->buffer_info[i];
2680 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2681 /* VLAN MACLEN IPLEN */
2682 if (tx_flags & IGB_TX_FLAGS_VLAN)
2683 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2684 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2685 *hdr_len += skb_network_offset(skb);
2686 info |= skb_network_header_len(skb);
2687 *hdr_len += skb_network_header_len(skb);
2688 context_desc->vlan_macip_lens = cpu_to_le32(info);
2689
2690 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2691 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2692
2693 if (skb->protocol == htons(ETH_P_IP))
2694 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2695 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2696
2697 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2698
2699 /* MSS L4LEN IDX */
2700 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2701 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2702
7dfc16fa
AD
2703 /* Context index must be unique per ring. */
2704 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2705 mss_l4len_idx |= tx_ring->queue_index << 4;
9d5c8243
AK
2706
2707 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2708 context_desc->seqnum_seed = 0;
2709
2710 buffer_info->time_stamp = jiffies;
2711 buffer_info->dma = 0;
2712 i++;
2713 if (i == tx_ring->count)
2714 i = 0;
2715
2716 tx_ring->next_to_use = i;
2717
2718 return true;
2719}
2720
2721static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2722 struct igb_ring *tx_ring,
2723 struct sk_buff *skb, u32 tx_flags)
2724{
2725 struct e1000_adv_tx_context_desc *context_desc;
2726 unsigned int i;
2727 struct igb_buffer *buffer_info;
2728 u32 info = 0, tu_cmd = 0;
2729
2730 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
2731 (tx_flags & IGB_TX_FLAGS_VLAN)) {
2732 i = tx_ring->next_to_use;
2733 buffer_info = &tx_ring->buffer_info[i];
2734 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2735
2736 if (tx_flags & IGB_TX_FLAGS_VLAN)
2737 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2738 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2739 if (skb->ip_summed == CHECKSUM_PARTIAL)
2740 info |= skb_network_header_len(skb);
2741
2742 context_desc->vlan_macip_lens = cpu_to_le32(info);
2743
2744 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2745
2746 if (skb->ip_summed == CHECKSUM_PARTIAL) {
44b0cda3
MW
2747 switch (skb->protocol) {
2748 case __constant_htons(ETH_P_IP):
9d5c8243 2749 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
44b0cda3
MW
2750 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2751 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2752 break;
2753 case __constant_htons(ETH_P_IPV6):
2754 /* XXX what about other V6 headers?? */
2755 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2756 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2757 break;
2758 default:
2759 if (unlikely(net_ratelimit()))
2760 dev_warn(&adapter->pdev->dev,
2761 "partial checksum but proto=%x!\n",
2762 skb->protocol);
2763 break;
2764 }
9d5c8243
AK
2765 }
2766
2767 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2768 context_desc->seqnum_seed = 0;
7dfc16fa
AD
2769 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2770 context_desc->mss_l4len_idx =
2771 cpu_to_le32(tx_ring->queue_index << 4);
9d5c8243
AK
2772
2773 buffer_info->time_stamp = jiffies;
2774 buffer_info->dma = 0;
2775
2776 i++;
2777 if (i == tx_ring->count)
2778 i = 0;
2779 tx_ring->next_to_use = i;
2780
2781 return true;
2782 }
2783
2784
2785 return false;
2786}
2787
2788#define IGB_MAX_TXD_PWR 16
2789#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
2790
2791static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2792 struct igb_ring *tx_ring,
2793 struct sk_buff *skb)
2794{
2795 struct igb_buffer *buffer_info;
2796 unsigned int len = skb_headlen(skb);
2797 unsigned int count = 0, i;
2798 unsigned int f;
2799
2800 i = tx_ring->next_to_use;
2801
2802 buffer_info = &tx_ring->buffer_info[i];
2803 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2804 buffer_info->length = len;
2805 /* set time_stamp *before* dma to help avoid a possible race */
2806 buffer_info->time_stamp = jiffies;
2807 buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
2808 PCI_DMA_TODEVICE);
2809 count++;
2810 i++;
2811 if (i == tx_ring->count)
2812 i = 0;
2813
2814 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2815 struct skb_frag_struct *frag;
2816
2817 frag = &skb_shinfo(skb)->frags[f];
2818 len = frag->size;
2819
2820 buffer_info = &tx_ring->buffer_info[i];
2821 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2822 buffer_info->length = len;
2823 buffer_info->time_stamp = jiffies;
2824 buffer_info->dma = pci_map_page(adapter->pdev,
2825 frag->page,
2826 frag->page_offset,
2827 len,
2828 PCI_DMA_TODEVICE);
2829
2830 count++;
2831 i++;
2832 if (i == tx_ring->count)
2833 i = 0;
2834 }
2835
2836 i = (i == 0) ? tx_ring->count - 1 : i - 1;
2837 tx_ring->buffer_info[i].skb = skb;
2838
2839 return count;
2840}
2841
2842static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
2843 struct igb_ring *tx_ring,
2844 int tx_flags, int count, u32 paylen,
2845 u8 hdr_len)
2846{
2847 union e1000_adv_tx_desc *tx_desc = NULL;
2848 struct igb_buffer *buffer_info;
2849 u32 olinfo_status = 0, cmd_type_len;
2850 unsigned int i;
2851
2852 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2853 E1000_ADVTXD_DCMD_DEXT);
2854
2855 if (tx_flags & IGB_TX_FLAGS_VLAN)
2856 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2857
2858 if (tx_flags & IGB_TX_FLAGS_TSO) {
2859 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2860
2861 /* insert tcp checksum */
2862 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2863
2864 /* insert ip checksum */
2865 if (tx_flags & IGB_TX_FLAGS_IPV4)
2866 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2867
2868 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
2869 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2870 }
2871
7dfc16fa
AD
2872 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
2873 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
2874 IGB_TX_FLAGS_VLAN)))
661086df 2875 olinfo_status |= tx_ring->queue_index << 4;
9d5c8243
AK
2876
2877 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2878
2879 i = tx_ring->next_to_use;
2880 while (count--) {
2881 buffer_info = &tx_ring->buffer_info[i];
2882 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
2883 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2884 tx_desc->read.cmd_type_len =
2885 cpu_to_le32(cmd_type_len | buffer_info->length);
2886 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2887 i++;
2888 if (i == tx_ring->count)
2889 i = 0;
2890 }
2891
2892 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2893 /* Force memory writes to complete before letting h/w
2894 * know there are new descriptors to fetch. (Only
2895 * applicable for weak-ordered memory model archs,
2896 * such as IA-64). */
2897 wmb();
2898
2899 tx_ring->next_to_use = i;
2900 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2901 /* we need this if more than one processor can write to our tail
2902 * at a time, it syncronizes IO on IA64/Altix systems */
2903 mmiowb();
2904}
2905
2906static int __igb_maybe_stop_tx(struct net_device *netdev,
2907 struct igb_ring *tx_ring, int size)
2908{
2909 struct igb_adapter *adapter = netdev_priv(netdev);
2910
661086df 2911 netif_stop_subqueue(netdev, tx_ring->queue_index);
661086df 2912
9d5c8243
AK
2913 /* Herbert's original patch had:
2914 * smp_mb__after_netif_stop_queue();
2915 * but since that doesn't exist yet, just open code it. */
2916 smp_mb();
2917
2918 /* We need to check again in a case another CPU has just
2919 * made room available. */
2920 if (IGB_DESC_UNUSED(tx_ring) < size)
2921 return -EBUSY;
2922
2923 /* A reprieve! */
661086df 2924 netif_wake_subqueue(netdev, tx_ring->queue_index);
9d5c8243
AK
2925 ++adapter->restart_queue;
2926 return 0;
2927}
2928
2929static int igb_maybe_stop_tx(struct net_device *netdev,
2930 struct igb_ring *tx_ring, int size)
2931{
2932 if (IGB_DESC_UNUSED(tx_ring) >= size)
2933 return 0;
2934 return __igb_maybe_stop_tx(netdev, tx_ring, size);
2935}
2936
2937#define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
2938
2939static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2940 struct net_device *netdev,
2941 struct igb_ring *tx_ring)
2942{
2943 struct igb_adapter *adapter = netdev_priv(netdev);
2944 unsigned int tx_flags = 0;
2945 unsigned int len;
9d5c8243
AK
2946 u8 hdr_len = 0;
2947 int tso = 0;
2948
2949 len = skb_headlen(skb);
2950
2951 if (test_bit(__IGB_DOWN, &adapter->state)) {
2952 dev_kfree_skb_any(skb);
2953 return NETDEV_TX_OK;
2954 }
2955
2956 if (skb->len <= 0) {
2957 dev_kfree_skb_any(skb);
2958 return NETDEV_TX_OK;
2959 }
2960
9d5c8243
AK
2961 /* need: 1 descriptor per page,
2962 * + 2 desc gap to keep tail from touching head,
2963 * + 1 desc for skb->data,
2964 * + 1 desc for context descriptor,
2965 * otherwise try next time */
2966 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
2967 /* this is a hard error */
9d5c8243
AK
2968 return NETDEV_TX_BUSY;
2969 }
6eb5a7f1 2970 skb_orphan(skb);
9d5c8243
AK
2971
2972 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2973 tx_flags |= IGB_TX_FLAGS_VLAN;
2974 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
2975 }
2976
661086df
PWJ
2977 if (skb->protocol == htons(ETH_P_IP))
2978 tx_flags |= IGB_TX_FLAGS_IPV4;
2979
9d5c8243
AK
2980 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
2981 &hdr_len) : 0;
2982
2983 if (tso < 0) {
2984 dev_kfree_skb_any(skb);
9d5c8243
AK
2985 return NETDEV_TX_OK;
2986 }
2987
2988 if (tso)
2989 tx_flags |= IGB_TX_FLAGS_TSO;
2990 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags))
2991 if (skb->ip_summed == CHECKSUM_PARTIAL)
2992 tx_flags |= IGB_TX_FLAGS_CSUM;
2993
9d5c8243
AK
2994 igb_tx_queue_adv(adapter, tx_ring, tx_flags,
2995 igb_tx_map_adv(adapter, tx_ring, skb),
2996 skb->len, hdr_len);
2997
2998 netdev->trans_start = jiffies;
2999
3000 /* Make sure there is space in the ring for the next send. */
3001 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3002
9d5c8243
AK
3003 return NETDEV_TX_OK;
3004}
3005
3006static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
3007{
3008 struct igb_adapter *adapter = netdev_priv(netdev);
661086df
PWJ
3009 struct igb_ring *tx_ring;
3010
661086df
PWJ
3011 int r_idx = 0;
3012 r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1);
3013 tx_ring = adapter->multi_tx_table[r_idx];
9d5c8243
AK
3014
3015 /* This goes back to the question of how to logically map a tx queue
3016 * to a flow. Right now, performance is impacted slightly negatively
3017 * if using multiple tx queues. If the stack breaks away from a
3018 * single qdisc implementation, we can look at this again. */
3019 return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
3020}
3021
3022/**
3023 * igb_tx_timeout - Respond to a Tx Hang
3024 * @netdev: network interface device structure
3025 **/
3026static void igb_tx_timeout(struct net_device *netdev)
3027{
3028 struct igb_adapter *adapter = netdev_priv(netdev);
3029 struct e1000_hw *hw = &adapter->hw;
3030
3031 /* Do the reset outside of interrupt context */
3032 adapter->tx_timeout_count++;
3033 schedule_work(&adapter->reset_task);
3034 wr32(E1000_EICS, adapter->eims_enable_mask &
3035 ~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER));
3036}
3037
3038static void igb_reset_task(struct work_struct *work)
3039{
3040 struct igb_adapter *adapter;
3041 adapter = container_of(work, struct igb_adapter, reset_task);
3042
3043 igb_reinit_locked(adapter);
3044}
3045
3046/**
3047 * igb_get_stats - Get System Network Statistics
3048 * @netdev: network interface device structure
3049 *
3050 * Returns the address of the device statistics structure.
3051 * The statistics are actually updated from the timer callback.
3052 **/
3053static struct net_device_stats *
3054igb_get_stats(struct net_device *netdev)
3055{
3056 struct igb_adapter *adapter = netdev_priv(netdev);
3057
3058 /* only return the current stats */
3059 return &adapter->net_stats;
3060}
3061
3062/**
3063 * igb_change_mtu - Change the Maximum Transfer Unit
3064 * @netdev: network interface device structure
3065 * @new_mtu: new value for maximum frame size
3066 *
3067 * Returns 0 on success, negative on failure
3068 **/
3069static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3070{
3071 struct igb_adapter *adapter = netdev_priv(netdev);
3072 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3073
3074 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3075 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3076 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3077 return -EINVAL;
3078 }
3079
3080#define MAX_STD_JUMBO_FRAME_SIZE 9234
3081 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3082 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3083 return -EINVAL;
3084 }
3085
3086 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3087 msleep(1);
3088 /* igb_down has a dependency on max_frame_size */
3089 adapter->max_frame_size = max_frame;
3090 if (netif_running(netdev))
3091 igb_down(adapter);
3092
3093 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3094 * means we reserve 2 more, this pushes us to allocate from the next
3095 * larger slab size.
3096 * i.e. RXBUFFER_2048 --> size-4096 slab
3097 */
3098
3099 if (max_frame <= IGB_RXBUFFER_256)
3100 adapter->rx_buffer_len = IGB_RXBUFFER_256;
3101 else if (max_frame <= IGB_RXBUFFER_512)
3102 adapter->rx_buffer_len = IGB_RXBUFFER_512;
3103 else if (max_frame <= IGB_RXBUFFER_1024)
3104 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3105 else if (max_frame <= IGB_RXBUFFER_2048)
3106 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3107 else
bf36c1a0
AD
3108#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3109 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3110#else
3111 adapter->rx_buffer_len = PAGE_SIZE / 2;
3112#endif
9d5c8243
AK
3113 /* adjust allocation if LPE protects us, and we aren't using SBP */
3114 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3115 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3116 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3117
3118 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3119 netdev->mtu, new_mtu);
3120 netdev->mtu = new_mtu;
3121
3122 if (netif_running(netdev))
3123 igb_up(adapter);
3124 else
3125 igb_reset(adapter);
3126
3127 clear_bit(__IGB_RESETTING, &adapter->state);
3128
3129 return 0;
3130}
3131
3132/**
3133 * igb_update_stats - Update the board statistics counters
3134 * @adapter: board private structure
3135 **/
3136
3137void igb_update_stats(struct igb_adapter *adapter)
3138{
3139 struct e1000_hw *hw = &adapter->hw;
3140 struct pci_dev *pdev = adapter->pdev;
3141 u16 phy_tmp;
3142
3143#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3144
3145 /*
3146 * Prevent stats update while adapter is being reset, or if the pci
3147 * connection is down.
3148 */
3149 if (adapter->link_speed == 0)
3150 return;
3151 if (pci_channel_offline(pdev))
3152 return;
3153
3154 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3155 adapter->stats.gprc += rd32(E1000_GPRC);
3156 adapter->stats.gorc += rd32(E1000_GORCL);
3157 rd32(E1000_GORCH); /* clear GORCL */
3158 adapter->stats.bprc += rd32(E1000_BPRC);
3159 adapter->stats.mprc += rd32(E1000_MPRC);
3160 adapter->stats.roc += rd32(E1000_ROC);
3161
3162 adapter->stats.prc64 += rd32(E1000_PRC64);
3163 adapter->stats.prc127 += rd32(E1000_PRC127);
3164 adapter->stats.prc255 += rd32(E1000_PRC255);
3165 adapter->stats.prc511 += rd32(E1000_PRC511);
3166 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3167 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3168 adapter->stats.symerrs += rd32(E1000_SYMERRS);
3169 adapter->stats.sec += rd32(E1000_SEC);
3170
3171 adapter->stats.mpc += rd32(E1000_MPC);
3172 adapter->stats.scc += rd32(E1000_SCC);
3173 adapter->stats.ecol += rd32(E1000_ECOL);
3174 adapter->stats.mcc += rd32(E1000_MCC);
3175 adapter->stats.latecol += rd32(E1000_LATECOL);
3176 adapter->stats.dc += rd32(E1000_DC);
3177 adapter->stats.rlec += rd32(E1000_RLEC);
3178 adapter->stats.xonrxc += rd32(E1000_XONRXC);
3179 adapter->stats.xontxc += rd32(E1000_XONTXC);
3180 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3181 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3182 adapter->stats.fcruc += rd32(E1000_FCRUC);
3183 adapter->stats.gptc += rd32(E1000_GPTC);
3184 adapter->stats.gotc += rd32(E1000_GOTCL);
3185 rd32(E1000_GOTCH); /* clear GOTCL */
3186 adapter->stats.rnbc += rd32(E1000_RNBC);
3187 adapter->stats.ruc += rd32(E1000_RUC);
3188 adapter->stats.rfc += rd32(E1000_RFC);
3189 adapter->stats.rjc += rd32(E1000_RJC);
3190 adapter->stats.tor += rd32(E1000_TORH);
3191 adapter->stats.tot += rd32(E1000_TOTH);
3192 adapter->stats.tpr += rd32(E1000_TPR);
3193
3194 adapter->stats.ptc64 += rd32(E1000_PTC64);
3195 adapter->stats.ptc127 += rd32(E1000_PTC127);
3196 adapter->stats.ptc255 += rd32(E1000_PTC255);
3197 adapter->stats.ptc511 += rd32(E1000_PTC511);
3198 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3199 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3200
3201 adapter->stats.mptc += rd32(E1000_MPTC);
3202 adapter->stats.bptc += rd32(E1000_BPTC);
3203
3204 /* used for adaptive IFS */
3205
3206 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3207 adapter->stats.tpt += hw->mac.tx_packet_delta;
3208 hw->mac.collision_delta = rd32(E1000_COLC);
3209 adapter->stats.colc += hw->mac.collision_delta;
3210
3211 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3212 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3213 adapter->stats.tncrs += rd32(E1000_TNCRS);
3214 adapter->stats.tsctc += rd32(E1000_TSCTC);
3215 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3216
3217 adapter->stats.iac += rd32(E1000_IAC);
3218 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3219 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3220 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3221 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3222 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3223 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3224 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3225 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3226
3227 /* Fill out the OS statistics structure */
3228 adapter->net_stats.multicast = adapter->stats.mprc;
3229 adapter->net_stats.collisions = adapter->stats.colc;
3230
3231 /* Rx Errors */
3232
3233 /* RLEC on some newer hardware can be incorrect so build
3234 * our own version based on RUC and ROC */
3235 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3236 adapter->stats.crcerrs + adapter->stats.algnerrc +
3237 adapter->stats.ruc + adapter->stats.roc +
3238 adapter->stats.cexterr;
3239 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
3240 adapter->stats.roc;
3241 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3242 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3243 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3244
3245 /* Tx Errors */
3246 adapter->net_stats.tx_errors = adapter->stats.ecol +
3247 adapter->stats.latecol;
3248 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3249 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3250 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3251
3252 /* Tx Dropped needs to be maintained elsewhere */
3253
3254 /* Phy Stats */
3255 if (hw->phy.media_type == e1000_media_type_copper) {
3256 if ((adapter->link_speed == SPEED_1000) &&
3257 (!hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS,
3258 &phy_tmp))) {
3259 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3260 adapter->phy_stats.idle_errors += phy_tmp;
3261 }
3262 }
3263
3264 /* Management Stats */
3265 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3266 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3267 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3268}
3269
3270
3271static irqreturn_t igb_msix_other(int irq, void *data)
3272{
3273 struct net_device *netdev = data;
3274 struct igb_adapter *adapter = netdev_priv(netdev);
3275 struct e1000_hw *hw = &adapter->hw;
844290e5 3276 u32 icr = rd32(E1000_ICR);
9d5c8243 3277
844290e5
PW
3278 /* reading ICR causes bit 31 of EICR to be cleared */
3279 if (!(icr & E1000_ICR_LSC))
3280 goto no_link_interrupt;
3281 hw->mac.get_link_status = 1;
3282 /* guard against interrupt when we're going down */
3283 if (!test_bit(__IGB_DOWN, &adapter->state))
3284 mod_timer(&adapter->watchdog_timer, jiffies + 1);
661086df 3285
9d5c8243
AK
3286no_link_interrupt:
3287 wr32(E1000_IMS, E1000_IMS_LSC);
844290e5 3288 wr32(E1000_EIMS, adapter->eims_other);
9d5c8243
AK
3289
3290 return IRQ_HANDLED;
3291}
3292
3293static irqreturn_t igb_msix_tx(int irq, void *data)
3294{
3295 struct igb_ring *tx_ring = data;
3296 struct igb_adapter *adapter = tx_ring->adapter;
3297 struct e1000_hw *hw = &adapter->hw;
3298
fe4506b6 3299#ifdef CONFIG_DCA
7dfc16fa 3300 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6
JC
3301 igb_update_tx_dca(tx_ring);
3302#endif
9d5c8243
AK
3303 tx_ring->total_bytes = 0;
3304 tx_ring->total_packets = 0;
661086df
PWJ
3305
3306 /* auto mask will automatically reenable the interrupt when we write
3307 * EICS */
3b644cf6 3308 if (!igb_clean_tx_irq(tx_ring))
9d5c8243
AK
3309 /* Ring was not completely cleaned, so fire another interrupt */
3310 wr32(E1000_EICS, tx_ring->eims_value);
661086df 3311 else
9d5c8243 3312 wr32(E1000_EIMS, tx_ring->eims_value);
661086df 3313
9d5c8243
AK
3314 return IRQ_HANDLED;
3315}
3316
6eb5a7f1
AD
3317static void igb_write_itr(struct igb_ring *ring)
3318{
3319 struct e1000_hw *hw = &ring->adapter->hw;
3320 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3321 switch (hw->mac.type) {
3322 case e1000_82576:
3323 wr32(ring->itr_register,
3324 ring->itr_val |
3325 0x80000000);
3326 break;
3327 default:
3328 wr32(ring->itr_register,
3329 ring->itr_val |
3330 (ring->itr_val << 16));
3331 break;
3332 }
3333 ring->set_itr = 0;
3334 }
3335}
3336
9d5c8243
AK
3337static irqreturn_t igb_msix_rx(int irq, void *data)
3338{
3339 struct igb_ring *rx_ring = data;
3340 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243 3341
844290e5
PW
3342 /* Write the ITR value calculated at the end of the
3343 * previous interrupt.
3344 */
9d5c8243 3345
6eb5a7f1 3346 igb_write_itr(rx_ring);
9d5c8243 3347
844290e5
PW
3348 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi))
3349 __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
3350
fe4506b6 3351#ifdef CONFIG_DCA
7dfc16fa 3352 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6
JC
3353 igb_update_rx_dca(rx_ring);
3354#endif
3355 return IRQ_HANDLED;
3356}
3357
3358#ifdef CONFIG_DCA
3359static void igb_update_rx_dca(struct igb_ring *rx_ring)
3360{
3361 u32 dca_rxctrl;
3362 struct igb_adapter *adapter = rx_ring->adapter;
3363 struct e1000_hw *hw = &adapter->hw;
3364 int cpu = get_cpu();
3365 int q = rx_ring - adapter->rx_ring;
3366
3367 if (rx_ring->cpu != cpu) {
3368 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
2d064c06
AD
3369 if (hw->mac.type == e1000_82576) {
3370 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
3371 dca_rxctrl |= dca_get_tag(cpu) <<
3372 E1000_DCA_RXCTRL_CPUID_SHIFT;
3373 } else {
3374 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3375 dca_rxctrl |= dca_get_tag(cpu);
3376 }
fe4506b6
JC
3377 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3378 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3379 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3380 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3381 rx_ring->cpu = cpu;
3382 }
3383 put_cpu();
3384}
3385
3386static void igb_update_tx_dca(struct igb_ring *tx_ring)
3387{
3388 u32 dca_txctrl;
3389 struct igb_adapter *adapter = tx_ring->adapter;
3390 struct e1000_hw *hw = &adapter->hw;
3391 int cpu = get_cpu();
3392 int q = tx_ring - adapter->tx_ring;
3393
3394 if (tx_ring->cpu != cpu) {
3395 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
2d064c06
AD
3396 if (hw->mac.type == e1000_82576) {
3397 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3398 dca_txctrl |= dca_get_tag(cpu) <<
3399 E1000_DCA_TXCTRL_CPUID_SHIFT;
3400 } else {
3401 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3402 dca_txctrl |= dca_get_tag(cpu);
3403 }
fe4506b6
JC
3404 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3405 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3406 tx_ring->cpu = cpu;
3407 }
3408 put_cpu();
3409}
3410
3411static void igb_setup_dca(struct igb_adapter *adapter)
3412{
3413 int i;
3414
7dfc16fa 3415 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
fe4506b6
JC
3416 return;
3417
3418 for (i = 0; i < adapter->num_tx_queues; i++) {
3419 adapter->tx_ring[i].cpu = -1;
3420 igb_update_tx_dca(&adapter->tx_ring[i]);
3421 }
3422 for (i = 0; i < adapter->num_rx_queues; i++) {
3423 adapter->rx_ring[i].cpu = -1;
3424 igb_update_rx_dca(&adapter->rx_ring[i]);
3425 }
3426}
3427
3428static int __igb_notify_dca(struct device *dev, void *data)
3429{
3430 struct net_device *netdev = dev_get_drvdata(dev);
3431 struct igb_adapter *adapter = netdev_priv(netdev);
3432 struct e1000_hw *hw = &adapter->hw;
3433 unsigned long event = *(unsigned long *)data;
3434
7dfc16fa
AD
3435 if (!(adapter->flags & IGB_FLAG_HAS_DCA))
3436 goto out;
3437
fe4506b6
JC
3438 switch (event) {
3439 case DCA_PROVIDER_ADD:
3440 /* if already enabled, don't do it again */
7dfc16fa 3441 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6 3442 break;
7dfc16fa 3443 adapter->flags |= IGB_FLAG_DCA_ENABLED;
fe4506b6
JC
3444 /* Always use CB2 mode, difference is masked
3445 * in the CB driver. */
3446 wr32(E1000_DCA_CTRL, 2);
3447 if (dca_add_requester(dev) == 0) {
3448 dev_info(&adapter->pdev->dev, "DCA enabled\n");
3449 igb_setup_dca(adapter);
3450 break;
3451 }
3452 /* Fall Through since DCA is disabled. */
3453 case DCA_PROVIDER_REMOVE:
7dfc16fa 3454 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6
JC
3455 /* without this a class_device is left
3456 * hanging around in the sysfs model */
3457 dca_remove_requester(dev);
3458 dev_info(&adapter->pdev->dev, "DCA disabled\n");
7dfc16fa 3459 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
fe4506b6
JC
3460 wr32(E1000_DCA_CTRL, 1);
3461 }
3462 break;
3463 }
7dfc16fa 3464out:
fe4506b6 3465 return 0;
9d5c8243
AK
3466}
3467
fe4506b6
JC
3468static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
3469 void *p)
3470{
3471 int ret_val;
3472
3473 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
3474 __igb_notify_dca);
3475
3476 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3477}
3478#endif /* CONFIG_DCA */
9d5c8243
AK
3479
3480/**
3481 * igb_intr_msi - Interrupt Handler
3482 * @irq: interrupt number
3483 * @data: pointer to a network interface device structure
3484 **/
3485static irqreturn_t igb_intr_msi(int irq, void *data)
3486{
3487 struct net_device *netdev = data;
3488 struct igb_adapter *adapter = netdev_priv(netdev);
9d5c8243
AK
3489 struct e1000_hw *hw = &adapter->hw;
3490 /* read ICR disables interrupts using IAM */
3491 u32 icr = rd32(E1000_ICR);
3492
6eb5a7f1 3493 igb_write_itr(adapter->rx_ring);
9d5c8243 3494
9d5c8243
AK
3495 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3496 hw->mac.get_link_status = 1;
3497 if (!test_bit(__IGB_DOWN, &adapter->state))
3498 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3499 }
3500
844290e5 3501 netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
9d5c8243
AK
3502
3503 return IRQ_HANDLED;
3504}
3505
3506/**
3507 * igb_intr - Interrupt Handler
3508 * @irq: interrupt number
3509 * @data: pointer to a network interface device structure
3510 **/
3511static irqreturn_t igb_intr(int irq, void *data)
3512{
3513 struct net_device *netdev = data;
3514 struct igb_adapter *adapter = netdev_priv(netdev);
9d5c8243
AK
3515 struct e1000_hw *hw = &adapter->hw;
3516 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3517 * need for the IMC write */
3518 u32 icr = rd32(E1000_ICR);
3519 u32 eicr = 0;
3520 if (!icr)
3521 return IRQ_NONE; /* Not our interrupt */
3522
6eb5a7f1 3523 igb_write_itr(adapter->rx_ring);
9d5c8243
AK
3524
3525 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3526 * not set, then the adapter didn't send an interrupt */
3527 if (!(icr & E1000_ICR_INT_ASSERTED))
3528 return IRQ_NONE;
3529
3530 eicr = rd32(E1000_EICR);
3531
3532 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3533 hw->mac.get_link_status = 1;
3534 /* guard against interrupt when we're going down */
3535 if (!test_bit(__IGB_DOWN, &adapter->state))
3536 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3537 }
3538
844290e5 3539 netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
9d5c8243
AK
3540
3541 return IRQ_HANDLED;
3542}
3543
3544/**
661086df
PWJ
3545 * igb_poll - NAPI Rx polling callback
3546 * @napi: napi polling structure
3547 * @budget: count of how many packets we should handle
9d5c8243 3548 **/
661086df 3549static int igb_poll(struct napi_struct *napi, int budget)
9d5c8243 3550{
661086df
PWJ
3551 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
3552 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243 3553 struct net_device *netdev = adapter->netdev;
661086df 3554 int tx_clean_complete, work_done = 0;
9d5c8243 3555
661086df 3556 /* this poll routine only supports one tx and one rx queue */
fe4506b6 3557#ifdef CONFIG_DCA
7dfc16fa 3558 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6
JC
3559 igb_update_tx_dca(&adapter->tx_ring[0]);
3560#endif
661086df 3561 tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]);
fe4506b6
JC
3562
3563#ifdef CONFIG_DCA
7dfc16fa 3564 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6
JC
3565 igb_update_rx_dca(&adapter->rx_ring[0]);
3566#endif
661086df 3567 igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget);
9d5c8243
AK
3568
3569 /* If no Tx and not enough Rx work done, exit the polling mode */
3570 if ((tx_clean_complete && (work_done < budget)) ||
3571 !netif_running(netdev)) {
9d5c8243 3572 if (adapter->itr_setting & 3)
6eb5a7f1 3573 igb_set_itr(adapter);
9d5c8243
AK
3574 netif_rx_complete(netdev, napi);
3575 if (!test_bit(__IGB_DOWN, &adapter->state))
3576 igb_irq_enable(adapter);
3577 return 0;
3578 }
3579
3580 return 1;
3581}
3582
3583static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3584{
3585 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
3586 struct igb_adapter *adapter = rx_ring->adapter;
3587 struct e1000_hw *hw = &adapter->hw;
3588 struct net_device *netdev = adapter->netdev;
3589 int work_done = 0;
3590
3591 /* Keep link state information with original netdev */
3592 if (!netif_carrier_ok(netdev))
3593 goto quit_polling;
3594
fe4506b6 3595#ifdef CONFIG_DCA
7dfc16fa 3596 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6
JC
3597 igb_update_rx_dca(rx_ring);
3598#endif
3b644cf6 3599 igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
9d5c8243
AK
3600
3601
3602 /* If not enough Rx work done, exit the polling mode */
3603 if ((work_done == 0) || !netif_running(netdev)) {
3604quit_polling:
3605 netif_rx_complete(netdev, napi);
3606
6eb5a7f1
AD
3607 if (adapter->itr_setting & 3) {
3608 if (adapter->num_rx_queues == 1)
3609 igb_set_itr(adapter);
3610 else
3611 igb_update_ring_itr(rx_ring);
9d5c8243 3612 }
844290e5
PW
3613
3614 if (!test_bit(__IGB_DOWN, &adapter->state))
3615 wr32(E1000_EIMS, rx_ring->eims_value);
3616
9d5c8243
AK
3617 return 0;
3618 }
3619
3620 return 1;
3621}
6d8126f9
AV
3622
3623static inline u32 get_head(struct igb_ring *tx_ring)
3624{
3625 void *end = (struct e1000_tx_desc *)tx_ring->desc + tx_ring->count;
3626 return le32_to_cpu(*(volatile __le32 *)end);
3627}
3628
9d5c8243
AK
3629/**
3630 * igb_clean_tx_irq - Reclaim resources after transmit completes
3631 * @adapter: board private structure
3632 * returns true if ring is completely cleaned
3633 **/
3b644cf6 3634static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
9d5c8243 3635{
3b644cf6 3636 struct igb_adapter *adapter = tx_ring->adapter;
9d5c8243 3637 struct e1000_hw *hw = &adapter->hw;
3b644cf6 3638 struct net_device *netdev = adapter->netdev;
9d5c8243
AK
3639 struct e1000_tx_desc *tx_desc;
3640 struct igb_buffer *buffer_info;
3641 struct sk_buff *skb;
3642 unsigned int i;
3643 u32 head, oldhead;
3644 unsigned int count = 0;
3645 bool cleaned = false;
3646 bool retval = true;
3647 unsigned int total_bytes = 0, total_packets = 0;
3648
3649 rmb();
6d8126f9 3650 head = get_head(tx_ring);
9d5c8243
AK
3651 i = tx_ring->next_to_clean;
3652 while (1) {
3653 while (i != head) {
3654 cleaned = true;
3655 tx_desc = E1000_TX_DESC(*tx_ring, i);
3656 buffer_info = &tx_ring->buffer_info[i];
3657 skb = buffer_info->skb;
3658
3659 if (skb) {
3660 unsigned int segs, bytecount;
3661 /* gso_segs is currently only valid for tcp */
3662 segs = skb_shinfo(skb)->gso_segs ?: 1;
3663 /* multiply data chunks by size of headers */
3664 bytecount = ((segs - 1) * skb_headlen(skb)) +
3665 skb->len;
3666 total_packets += segs;
3667 total_bytes += bytecount;
3668 }
3669
3670 igb_unmap_and_free_tx_resource(adapter, buffer_info);
3671 tx_desc->upper.data = 0;
3672
3673 i++;
3674 if (i == tx_ring->count)
3675 i = 0;
3676
3677 count++;
3678 if (count == IGB_MAX_TX_CLEAN) {
3679 retval = false;
3680 goto done_cleaning;
3681 }
3682 }
3683 oldhead = head;
3684 rmb();
6d8126f9 3685 head = get_head(tx_ring);
9d5c8243
AK
3686 if (head == oldhead)
3687 goto done_cleaning;
3688 } /* while (1) */
3689
3690done_cleaning:
3691 tx_ring->next_to_clean = i;
3692
3693 if (unlikely(cleaned &&
3694 netif_carrier_ok(netdev) &&
3695 IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
3696 /* Make sure that anybody stopping the queue after this
3697 * sees the new next_to_clean.
3698 */
3699 smp_mb();
661086df
PWJ
3700 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
3701 !(test_bit(__IGB_DOWN, &adapter->state))) {
3702 netif_wake_subqueue(netdev, tx_ring->queue_index);
3703 ++adapter->restart_queue;
3704 }
9d5c8243
AK
3705 }
3706
3707 if (tx_ring->detect_tx_hung) {
3708 /* Detect a transmit hang in hardware, this serializes the
3709 * check with the clearing of time_stamp and movement of i */
3710 tx_ring->detect_tx_hung = false;
3711 if (tx_ring->buffer_info[i].time_stamp &&
3712 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
3713 (adapter->tx_timeout_factor * HZ))
3714 && !(rd32(E1000_STATUS) &
3715 E1000_STATUS_TXOFF)) {
3716
3717 tx_desc = E1000_TX_DESC(*tx_ring, i);
3718 /* detected Tx unit hang */
3719 dev_err(&adapter->pdev->dev,
3720 "Detected Tx Unit Hang\n"
2d064c06 3721 " Tx Queue <%d>\n"
9d5c8243
AK
3722 " TDH <%x>\n"
3723 " TDT <%x>\n"
3724 " next_to_use <%x>\n"
3725 " next_to_clean <%x>\n"
3726 " head (WB) <%x>\n"
3727 "buffer_info[next_to_clean]\n"
3728 " time_stamp <%lx>\n"
3729 " jiffies <%lx>\n"
3730 " desc.status <%x>\n",
2d064c06 3731 tx_ring->queue_index,
9d5c8243
AK
3732 readl(adapter->hw.hw_addr + tx_ring->head),
3733 readl(adapter->hw.hw_addr + tx_ring->tail),
3734 tx_ring->next_to_use,
3735 tx_ring->next_to_clean,
3736 head,
3737 tx_ring->buffer_info[i].time_stamp,
3738 jiffies,
3739 tx_desc->upper.fields.status);
661086df 3740 netif_stop_subqueue(netdev, tx_ring->queue_index);
9d5c8243
AK
3741 }
3742 }
3743 tx_ring->total_bytes += total_bytes;
3744 tx_ring->total_packets += total_packets;
e21ed353
AD
3745 tx_ring->tx_stats.bytes += total_bytes;
3746 tx_ring->tx_stats.packets += total_packets;
9d5c8243
AK
3747 adapter->net_stats.tx_bytes += total_bytes;
3748 adapter->net_stats.tx_packets += total_packets;
3749 return retval;
3750}
3751
d3352520
AD
3752#ifdef CONFIG_IGB_LRO
3753 /**
3754 * igb_get_skb_hdr - helper function for LRO header processing
3755 * @skb: pointer to sk_buff to be added to LRO packet
3756 * @iphdr: pointer to ip header structure
3757 * @tcph: pointer to tcp header structure
3758 * @hdr_flags: pointer to header flags
3759 * @priv: pointer to the receive descriptor for the current sk_buff
3760 **/
3761static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
3762 u64 *hdr_flags, void *priv)
3763{
3764 union e1000_adv_rx_desc *rx_desc = priv;
3765 u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
3766 (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
3767
3768 /* Verify that this is a valid IPv4 TCP packet */
3769 if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
3770 E1000_RXDADV_PKTTYPE_TCP))
3771 return -1;
3772
3773 /* Set network headers */
3774 skb_reset_network_header(skb);
3775 skb_set_transport_header(skb, ip_hdrlen(skb));
3776 *iphdr = ip_hdr(skb);
3777 *tcph = tcp_hdr(skb);
3778 *hdr_flags = LRO_IPV4 | LRO_TCP;
3779
3780 return 0;
3781
3782}
3783#endif /* CONFIG_IGB_LRO */
9d5c8243
AK
3784
3785/**
3786 * igb_receive_skb - helper function to handle rx indications
d3352520 3787 * @ring: pointer to receive ring receving this packet
9d5c8243
AK
3788 * @status: descriptor status field as written by hardware
3789 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3790 * @skb: pointer to sk_buff to be indicated to stack
3791 **/
d3352520
AD
3792static void igb_receive_skb(struct igb_ring *ring, u8 status,
3793 union e1000_adv_rx_desc * rx_desc,
3794 struct sk_buff *skb)
3795{
3796 struct igb_adapter * adapter = ring->adapter;
3797 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
3798
3799#ifdef CONFIG_IGB_LRO
3800 if (adapter->netdev->features & NETIF_F_LRO &&
3801 skb->ip_summed == CHECKSUM_UNNECESSARY) {
3802 if (vlan_extracted)
3803 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
3804 adapter->vlgrp,
3805 le16_to_cpu(rx_desc->wb.upper.vlan),
3806 rx_desc);
3807 else
3808 lro_receive_skb(&ring->lro_mgr,skb, rx_desc);
3809 ring->lro_used = 1;
3810 } else {
3811#endif
3812 if (vlan_extracted)
3813 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3814 le16_to_cpu(rx_desc->wb.upper.vlan));
3815 else
3816
3817 netif_receive_skb(skb);
3818#ifdef CONFIG_IGB_LRO
3819 }
3820#endif
9d5c8243
AK
3821}
3822
3823
3824static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
3825 u32 status_err, struct sk_buff *skb)
3826{
3827 skb->ip_summed = CHECKSUM_NONE;
3828
3829 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
3830 if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
3831 return;
3832 /* TCP/UDP checksum error bit is set */
3833 if (status_err &
3834 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
3835 /* let the stack verify checksum errors */
3836 adapter->hw_csum_err++;
3837 return;
3838 }
3839 /* It must be a TCP or UDP packet with a valid checksum */
3840 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
3841 skb->ip_summed = CHECKSUM_UNNECESSARY;
3842
3843 adapter->hw_csum_good++;
3844}
3845
3b644cf6
MW
3846static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3847 int *work_done, int budget)
9d5c8243 3848{
3b644cf6 3849 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243
AK
3850 struct net_device *netdev = adapter->netdev;
3851 struct pci_dev *pdev = adapter->pdev;
3852 union e1000_adv_rx_desc *rx_desc , *next_rxd;
3853 struct igb_buffer *buffer_info , *next_buffer;
3854 struct sk_buff *skb;
bf36c1a0 3855 unsigned int i;
9d5c8243
AK
3856 u32 length, hlen, staterr;
3857 bool cleaned = false;
3858 int cleaned_count = 0;
3859 unsigned int total_bytes = 0, total_packets = 0;
3860
3861 i = rx_ring->next_to_clean;
3862 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3863 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3864
3865 while (staterr & E1000_RXD_STAT_DD) {
3866 if (*work_done >= budget)
3867 break;
3868 (*work_done)++;
3869 buffer_info = &rx_ring->buffer_info[i];
3870
3871 /* HW will not DMA in data larger than the given buffer, even
3872 * if it parses the (NFS, of course) header to be larger. In
3873 * that case, it fills the header buffer and spills the rest
3874 * into the page.
3875 */
7deb07b1
AV
3876 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
3877 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
9d5c8243
AK
3878 if (hlen > adapter->rx_ps_hdr_size)
3879 hlen = adapter->rx_ps_hdr_size;
3880
3881 length = le16_to_cpu(rx_desc->wb.upper.length);
3882 cleaned = true;
3883 cleaned_count++;
3884
bf36c1a0
AD
3885 skb = buffer_info->skb;
3886 prefetch(skb->data - NET_IP_ALIGN);
3887 buffer_info->skb = NULL;
3888 if (!adapter->rx_ps_hdr_size) {
3889 pci_unmap_single(pdev, buffer_info->dma,
3890 adapter->rx_buffer_len +
3891 NET_IP_ALIGN,
3892 PCI_DMA_FROMDEVICE);
3893 skb_put(skb, length);
3894 goto send_up;
9d5c8243
AK
3895 }
3896
bf36c1a0
AD
3897 if (!skb_shinfo(skb)->nr_frags) {
3898 pci_unmap_single(pdev, buffer_info->dma,
3899 adapter->rx_ps_hdr_size +
3900 NET_IP_ALIGN,
3901 PCI_DMA_FROMDEVICE);
3902 skb_put(skb, hlen);
3903 }
3904
3905 if (length) {
9d5c8243 3906 pci_unmap_page(pdev, buffer_info->page_dma,
bf36c1a0 3907 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
9d5c8243 3908 buffer_info->page_dma = 0;
bf36c1a0
AD
3909
3910 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
3911 buffer_info->page,
3912 buffer_info->page_offset,
3913 length);
3914
3915 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
3916 (page_count(buffer_info->page) != 1))
3917 buffer_info->page = NULL;
3918 else
3919 get_page(buffer_info->page);
9d5c8243
AK
3920
3921 skb->len += length;
3922 skb->data_len += length;
9d5c8243 3923
bf36c1a0 3924 skb->truesize += length;
9d5c8243
AK
3925 }
3926send_up:
9d5c8243
AK
3927 i++;
3928 if (i == rx_ring->count)
3929 i = 0;
3930 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
3931 prefetch(next_rxd);
3932 next_buffer = &rx_ring->buffer_info[i];
3933
bf36c1a0
AD
3934 if (!(staterr & E1000_RXD_STAT_EOP)) {
3935 buffer_info->skb = xchg(&next_buffer->skb, skb);
3936 buffer_info->dma = xchg(&next_buffer->dma, 0);
3937 goto next_desc;
3938 }
3939
9d5c8243
AK
3940 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
3941 dev_kfree_skb_irq(skb);
3942 goto next_desc;
3943 }
9d5c8243
AK
3944
3945 total_bytes += skb->len;
3946 total_packets++;
3947
3948 igb_rx_checksum_adv(adapter, staterr, skb);
3949
3950 skb->protocol = eth_type_trans(skb, netdev);
3951
d3352520 3952 igb_receive_skb(rx_ring, staterr, rx_desc, skb);
9d5c8243
AK
3953
3954 netdev->last_rx = jiffies;
3955
3956next_desc:
3957 rx_desc->wb.upper.status_error = 0;
3958
3959 /* return some buffers to hardware, one at a time is too slow */
3960 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
3b644cf6 3961 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
9d5c8243
AK
3962 cleaned_count = 0;
3963 }
3964
3965 /* use prefetched values */
3966 rx_desc = next_rxd;
3967 buffer_info = next_buffer;
3968
3969 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3970 }
bf36c1a0 3971
9d5c8243
AK
3972 rx_ring->next_to_clean = i;
3973 cleaned_count = IGB_DESC_UNUSED(rx_ring);
3974
d3352520
AD
3975#ifdef CONFIG_IGB_LRO
3976 if (rx_ring->lro_used) {
3977 lro_flush_all(&rx_ring->lro_mgr);
3978 rx_ring->lro_used = 0;
3979 }
3980#endif
3981
9d5c8243 3982 if (cleaned_count)
3b644cf6 3983 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
9d5c8243
AK
3984
3985 rx_ring->total_packets += total_packets;
3986 rx_ring->total_bytes += total_bytes;
3987 rx_ring->rx_stats.packets += total_packets;
3988 rx_ring->rx_stats.bytes += total_bytes;
3989 adapter->net_stats.rx_bytes += total_bytes;
3990 adapter->net_stats.rx_packets += total_packets;
3991 return cleaned;
3992}
3993
3994
3995/**
3996 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
3997 * @adapter: address of board private structure
3998 **/
3b644cf6 3999static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
9d5c8243
AK
4000 int cleaned_count)
4001{
3b644cf6 4002 struct igb_adapter *adapter = rx_ring->adapter;
9d5c8243
AK
4003 struct net_device *netdev = adapter->netdev;
4004 struct pci_dev *pdev = adapter->pdev;
4005 union e1000_adv_rx_desc *rx_desc;
4006 struct igb_buffer *buffer_info;
4007 struct sk_buff *skb;
4008 unsigned int i;
4009
4010 i = rx_ring->next_to_use;
4011 buffer_info = &rx_ring->buffer_info[i];
4012
4013 while (cleaned_count--) {
4014 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4015
bf36c1a0 4016 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
9d5c8243 4017 if (!buffer_info->page) {
bf36c1a0
AD
4018 buffer_info->page = alloc_page(GFP_ATOMIC);
4019 if (!buffer_info->page) {
4020 adapter->alloc_rx_buff_failed++;
4021 goto no_buffers;
4022 }
4023 buffer_info->page_offset = 0;
4024 } else {
4025 buffer_info->page_offset ^= PAGE_SIZE / 2;
9d5c8243
AK
4026 }
4027 buffer_info->page_dma =
4028 pci_map_page(pdev,
4029 buffer_info->page,
bf36c1a0
AD
4030 buffer_info->page_offset,
4031 PAGE_SIZE / 2,
9d5c8243
AK
4032 PCI_DMA_FROMDEVICE);
4033 }
4034
4035 if (!buffer_info->skb) {
4036 int bufsz;
4037
4038 if (adapter->rx_ps_hdr_size)
4039 bufsz = adapter->rx_ps_hdr_size;
4040 else
4041 bufsz = adapter->rx_buffer_len;
4042 bufsz += NET_IP_ALIGN;
4043 skb = netdev_alloc_skb(netdev, bufsz);
4044
4045 if (!skb) {
4046 adapter->alloc_rx_buff_failed++;
4047 goto no_buffers;
4048 }
4049
4050 /* Make buffer alignment 2 beyond a 16 byte boundary
4051 * this will result in a 16 byte aligned IP header after
4052 * the 14 byte MAC header is removed
4053 */
4054 skb_reserve(skb, NET_IP_ALIGN);
4055
4056 buffer_info->skb = skb;
4057 buffer_info->dma = pci_map_single(pdev, skb->data,
4058 bufsz,
4059 PCI_DMA_FROMDEVICE);
4060
4061 }
4062 /* Refresh the desc even if buffer_addrs didn't change because
4063 * each write-back erases this info. */
4064 if (adapter->rx_ps_hdr_size) {
4065 rx_desc->read.pkt_addr =
4066 cpu_to_le64(buffer_info->page_dma);
4067 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4068 } else {
4069 rx_desc->read.pkt_addr =
4070 cpu_to_le64(buffer_info->dma);
4071 rx_desc->read.hdr_addr = 0;
4072 }
4073
4074 i++;
4075 if (i == rx_ring->count)
4076 i = 0;
4077 buffer_info = &rx_ring->buffer_info[i];
4078 }
4079
4080no_buffers:
4081 if (rx_ring->next_to_use != i) {
4082 rx_ring->next_to_use = i;
4083 if (i == 0)
4084 i = (rx_ring->count - 1);
4085 else
4086 i--;
4087
4088 /* Force memory writes to complete before letting h/w
4089 * know there are new descriptors to fetch. (Only
4090 * applicable for weak-ordered memory model archs,
4091 * such as IA-64). */
4092 wmb();
4093 writel(i, adapter->hw.hw_addr + rx_ring->tail);
4094 }
4095}
4096
4097/**
4098 * igb_mii_ioctl -
4099 * @netdev:
4100 * @ifreq:
4101 * @cmd:
4102 **/
4103static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4104{
4105 struct igb_adapter *adapter = netdev_priv(netdev);
4106 struct mii_ioctl_data *data = if_mii(ifr);
4107
4108 if (adapter->hw.phy.media_type != e1000_media_type_copper)
4109 return -EOPNOTSUPP;
4110
4111 switch (cmd) {
4112 case SIOCGMIIPHY:
4113 data->phy_id = adapter->hw.phy.addr;
4114 break;
4115 case SIOCGMIIREG:
4116 if (!capable(CAP_NET_ADMIN))
4117 return -EPERM;
4118 if (adapter->hw.phy.ops.read_phy_reg(&adapter->hw,
4119 data->reg_num
4120 & 0x1F, &data->val_out))
4121 return -EIO;
4122 break;
4123 case SIOCSMIIREG:
4124 default:
4125 return -EOPNOTSUPP;
4126 }
4127 return 0;
4128}
4129
4130/**
4131 * igb_ioctl -
4132 * @netdev:
4133 * @ifreq:
4134 * @cmd:
4135 **/
4136static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4137{
4138 switch (cmd) {
4139 case SIOCGMIIPHY:
4140 case SIOCGMIIREG:
4141 case SIOCSMIIREG:
4142 return igb_mii_ioctl(netdev, ifr, cmd);
4143 default:
4144 return -EOPNOTSUPP;
4145 }
4146}
4147
4148static void igb_vlan_rx_register(struct net_device *netdev,
4149 struct vlan_group *grp)
4150{
4151 struct igb_adapter *adapter = netdev_priv(netdev);
4152 struct e1000_hw *hw = &adapter->hw;
4153 u32 ctrl, rctl;
4154
4155 igb_irq_disable(adapter);
4156 adapter->vlgrp = grp;
4157
4158 if (grp) {
4159 /* enable VLAN tag insert/strip */
4160 ctrl = rd32(E1000_CTRL);
4161 ctrl |= E1000_CTRL_VME;
4162 wr32(E1000_CTRL, ctrl);
4163
4164 /* enable VLAN receive filtering */
4165 rctl = rd32(E1000_RCTL);
9d5c8243
AK
4166 rctl &= ~E1000_RCTL_CFIEN;
4167 wr32(E1000_RCTL, rctl);
4168 igb_update_mng_vlan(adapter);
4169 wr32(E1000_RLPML,
4170 adapter->max_frame_size + VLAN_TAG_SIZE);
4171 } else {
4172 /* disable VLAN tag insert/strip */
4173 ctrl = rd32(E1000_CTRL);
4174 ctrl &= ~E1000_CTRL_VME;
4175 wr32(E1000_CTRL, ctrl);
4176
9d5c8243
AK
4177 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
4178 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4179 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
4180 }
4181 wr32(E1000_RLPML,
4182 adapter->max_frame_size);
4183 }
4184
4185 if (!test_bit(__IGB_DOWN, &adapter->state))
4186 igb_irq_enable(adapter);
4187}
4188
4189static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4190{
4191 struct igb_adapter *adapter = netdev_priv(netdev);
4192 struct e1000_hw *hw = &adapter->hw;
4193 u32 vfta, index;
4194
4195 if ((adapter->hw.mng_cookie.status &
4196 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4197 (vid == adapter->mng_vlan_id))
4198 return;
4199 /* add VID to filter table */
4200 index = (vid >> 5) & 0x7F;
4201 vfta = array_rd32(E1000_VFTA, index);
4202 vfta |= (1 << (vid & 0x1F));
4203 igb_write_vfta(&adapter->hw, index, vfta);
4204}
4205
4206static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4207{
4208 struct igb_adapter *adapter = netdev_priv(netdev);
4209 struct e1000_hw *hw = &adapter->hw;
4210 u32 vfta, index;
4211
4212 igb_irq_disable(adapter);
4213 vlan_group_set_device(adapter->vlgrp, vid, NULL);
4214
4215 if (!test_bit(__IGB_DOWN, &adapter->state))
4216 igb_irq_enable(adapter);
4217
4218 if ((adapter->hw.mng_cookie.status &
4219 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4220 (vid == adapter->mng_vlan_id)) {
4221 /* release control to f/w */
4222 igb_release_hw_control(adapter);
4223 return;
4224 }
4225
4226 /* remove VID from filter table */
4227 index = (vid >> 5) & 0x7F;
4228 vfta = array_rd32(E1000_VFTA, index);
4229 vfta &= ~(1 << (vid & 0x1F));
4230 igb_write_vfta(&adapter->hw, index, vfta);
4231}
4232
4233static void igb_restore_vlan(struct igb_adapter *adapter)
4234{
4235 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4236
4237 if (adapter->vlgrp) {
4238 u16 vid;
4239 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4240 if (!vlan_group_get_device(adapter->vlgrp, vid))
4241 continue;
4242 igb_vlan_rx_add_vid(adapter->netdev, vid);
4243 }
4244 }
4245}
4246
4247int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
4248{
4249 struct e1000_mac_info *mac = &adapter->hw.mac;
4250
4251 mac->autoneg = 0;
4252
4253 /* Fiber NICs only allow 1000 gbps Full duplex */
4254 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
4255 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4256 dev_err(&adapter->pdev->dev,
4257 "Unsupported Speed/Duplex configuration\n");
4258 return -EINVAL;
4259 }
4260
4261 switch (spddplx) {
4262 case SPEED_10 + DUPLEX_HALF:
4263 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4264 break;
4265 case SPEED_10 + DUPLEX_FULL:
4266 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4267 break;
4268 case SPEED_100 + DUPLEX_HALF:
4269 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4270 break;
4271 case SPEED_100 + DUPLEX_FULL:
4272 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4273 break;
4274 case SPEED_1000 + DUPLEX_FULL:
4275 mac->autoneg = 1;
4276 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
4277 break;
4278 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4279 default:
4280 dev_err(&adapter->pdev->dev,
4281 "Unsupported Speed/Duplex configuration\n");
4282 return -EINVAL;
4283 }
4284 return 0;
4285}
4286
4287
4288static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
4289{
4290 struct net_device *netdev = pci_get_drvdata(pdev);
4291 struct igb_adapter *adapter = netdev_priv(netdev);
4292 struct e1000_hw *hw = &adapter->hw;
2d064c06 4293 u32 ctrl, rctl, status;
9d5c8243
AK
4294 u32 wufc = adapter->wol;
4295#ifdef CONFIG_PM
4296 int retval = 0;
4297#endif
4298
4299 netif_device_detach(netdev);
4300
a88f10ec
AD
4301 if (netif_running(netdev))
4302 igb_close(netdev);
4303
4304 igb_reset_interrupt_capability(adapter);
4305
4306 igb_free_queues(adapter);
9d5c8243
AK
4307
4308#ifdef CONFIG_PM
4309 retval = pci_save_state(pdev);
4310 if (retval)
4311 return retval;
4312#endif
4313
4314 status = rd32(E1000_STATUS);
4315 if (status & E1000_STATUS_LU)
4316 wufc &= ~E1000_WUFC_LNKC;
4317
4318 if (wufc) {
4319 igb_setup_rctl(adapter);
4320 igb_set_multi(netdev);
4321
4322 /* turn on all-multi mode if wake on multicast is enabled */
4323 if (wufc & E1000_WUFC_MC) {
4324 rctl = rd32(E1000_RCTL);
4325 rctl |= E1000_RCTL_MPE;
4326 wr32(E1000_RCTL, rctl);
4327 }
4328
4329 ctrl = rd32(E1000_CTRL);
4330 /* advertise wake from D3Cold */
4331 #define E1000_CTRL_ADVD3WUC 0x00100000
4332 /* phy power management enable */
4333 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4334 ctrl |= E1000_CTRL_ADVD3WUC;
4335 wr32(E1000_CTRL, ctrl);
4336
9d5c8243
AK
4337 /* Allow time for pending master requests to run */
4338 igb_disable_pcie_master(&adapter->hw);
4339
4340 wr32(E1000_WUC, E1000_WUC_PME_EN);
4341 wr32(E1000_WUFC, wufc);
9d5c8243
AK
4342 } else {
4343 wr32(E1000_WUC, 0);
4344 wr32(E1000_WUFC, 0);
9d5c8243
AK
4345 }
4346
2d064c06
AD
4347 /* make sure adapter isn't asleep if manageability/wol is enabled */
4348 if (wufc || adapter->en_mng_pt) {
9d5c8243
AK
4349 pci_enable_wake(pdev, PCI_D3hot, 1);
4350 pci_enable_wake(pdev, PCI_D3cold, 1);
2d064c06
AD
4351 } else {
4352 igb_shutdown_fiber_serdes_link_82575(hw);
4353 pci_enable_wake(pdev, PCI_D3hot, 0);
4354 pci_enable_wake(pdev, PCI_D3cold, 0);
9d5c8243
AK
4355 }
4356
4357 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4358 * would have already happened in close and is redundant. */
4359 igb_release_hw_control(adapter);
4360
4361 pci_disable_device(pdev);
4362
4363 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4364
4365 return 0;
4366}
4367
4368#ifdef CONFIG_PM
4369static int igb_resume(struct pci_dev *pdev)
4370{
4371 struct net_device *netdev = pci_get_drvdata(pdev);
4372 struct igb_adapter *adapter = netdev_priv(netdev);
4373 struct e1000_hw *hw = &adapter->hw;
4374 u32 err;
4375
4376 pci_set_power_state(pdev, PCI_D0);
4377 pci_restore_state(pdev);
42bfd33a
TI
4378
4379 if (adapter->need_ioport)
4380 err = pci_enable_device(pdev);
4381 else
4382 err = pci_enable_device_mem(pdev);
9d5c8243
AK
4383 if (err) {
4384 dev_err(&pdev->dev,
4385 "igb: Cannot enable PCI device from suspend\n");
4386 return err;
4387 }
4388 pci_set_master(pdev);
4389
4390 pci_enable_wake(pdev, PCI_D3hot, 0);
4391 pci_enable_wake(pdev, PCI_D3cold, 0);
4392
a88f10ec
AD
4393 igb_set_interrupt_capability(adapter);
4394
4395 if (igb_alloc_queues(adapter)) {
4396 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4397 return -ENOMEM;
9d5c8243
AK
4398 }
4399
4400 /* e1000_power_up_phy(adapter); */
4401
4402 igb_reset(adapter);
4403 wr32(E1000_WUS, ~0);
4404
a88f10ec
AD
4405 if (netif_running(netdev)) {
4406 err = igb_open(netdev);
4407 if (err)
4408 return err;
4409 }
9d5c8243
AK
4410
4411 netif_device_attach(netdev);
4412
4413 /* let the f/w know that the h/w is now under the control of the
4414 * driver. */
4415 igb_get_hw_control(adapter);
4416
4417 return 0;
4418}
4419#endif
4420
4421static void igb_shutdown(struct pci_dev *pdev)
4422{
4423 igb_suspend(pdev, PMSG_SUSPEND);
4424}
4425
4426#ifdef CONFIG_NET_POLL_CONTROLLER
4427/*
4428 * Polling 'interrupt' - used by things like netconsole to send skbs
4429 * without having to re-enable interrupts. It's not called while
4430 * the interrupt routine is executing.
4431 */
4432static void igb_netpoll(struct net_device *netdev)
4433{
4434 struct igb_adapter *adapter = netdev_priv(netdev);
4435 int i;
4436 int work_done = 0;
4437
4438 igb_irq_disable(adapter);
7dfc16fa
AD
4439 adapter->flags |= IGB_FLAG_IN_NETPOLL;
4440
9d5c8243 4441 for (i = 0; i < adapter->num_tx_queues; i++)
3b644cf6 4442 igb_clean_tx_irq(&adapter->tx_ring[i]);
9d5c8243
AK
4443
4444 for (i = 0; i < adapter->num_rx_queues; i++)
3b644cf6 4445 igb_clean_rx_irq_adv(&adapter->rx_ring[i],
9d5c8243
AK
4446 &work_done,
4447 adapter->rx_ring[i].napi.weight);
4448
7dfc16fa 4449 adapter->flags &= ~IGB_FLAG_IN_NETPOLL;
9d5c8243
AK
4450 igb_irq_enable(adapter);
4451}
4452#endif /* CONFIG_NET_POLL_CONTROLLER */
4453
4454/**
4455 * igb_io_error_detected - called when PCI error is detected
4456 * @pdev: Pointer to PCI device
4457 * @state: The current pci connection state
4458 *
4459 * This function is called after a PCI bus error affecting
4460 * this device has been detected.
4461 */
4462static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
4463 pci_channel_state_t state)
4464{
4465 struct net_device *netdev = pci_get_drvdata(pdev);
4466 struct igb_adapter *adapter = netdev_priv(netdev);
4467
4468 netif_device_detach(netdev);
4469
4470 if (netif_running(netdev))
4471 igb_down(adapter);
4472 pci_disable_device(pdev);
4473
4474 /* Request a slot slot reset. */
4475 return PCI_ERS_RESULT_NEED_RESET;
4476}
4477
4478/**
4479 * igb_io_slot_reset - called after the pci bus has been reset.
4480 * @pdev: Pointer to PCI device
4481 *
4482 * Restart the card from scratch, as if from a cold-boot. Implementation
4483 * resembles the first-half of the igb_resume routine.
4484 */
4485static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
4486{
4487 struct net_device *netdev = pci_get_drvdata(pdev);
4488 struct igb_adapter *adapter = netdev_priv(netdev);
4489 struct e1000_hw *hw = &adapter->hw;
42bfd33a 4490 int err;
9d5c8243 4491
42bfd33a
TI
4492 if (adapter->need_ioport)
4493 err = pci_enable_device(pdev);
4494 else
4495 err = pci_enable_device_mem(pdev);
4496 if (err) {
9d5c8243
AK
4497 dev_err(&pdev->dev,
4498 "Cannot re-enable PCI device after reset.\n");
4499 return PCI_ERS_RESULT_DISCONNECT;
4500 }
4501 pci_set_master(pdev);
c682fc23 4502 pci_restore_state(pdev);
9d5c8243
AK
4503
4504 pci_enable_wake(pdev, PCI_D3hot, 0);
4505 pci_enable_wake(pdev, PCI_D3cold, 0);
4506
4507 igb_reset(adapter);
4508 wr32(E1000_WUS, ~0);
4509
4510 return PCI_ERS_RESULT_RECOVERED;
4511}
4512
4513/**
4514 * igb_io_resume - called when traffic can start flowing again.
4515 * @pdev: Pointer to PCI device
4516 *
4517 * This callback is called when the error recovery driver tells us that
4518 * its OK to resume normal operation. Implementation resembles the
4519 * second-half of the igb_resume routine.
4520 */
4521static void igb_io_resume(struct pci_dev *pdev)
4522{
4523 struct net_device *netdev = pci_get_drvdata(pdev);
4524 struct igb_adapter *adapter = netdev_priv(netdev);
4525
4526 igb_init_manageability(adapter);
4527
4528 if (netif_running(netdev)) {
4529 if (igb_up(adapter)) {
4530 dev_err(&pdev->dev, "igb_up failed after reset\n");
4531 return;
4532 }
4533 }
4534
4535 netif_device_attach(netdev);
4536
4537 /* let the f/w know that the h/w is now under the control of the
4538 * driver. */
4539 igb_get_hw_control(adapter);
4540
4541}
4542
4543/* igb_main.c */
This page took 0.351085 seconds and 5 git commands to generate.