1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/mii.h>
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/interrupt.h>
43 #include <linux/if_ether.h>
45 #include <linux/dca.h>
49 #define DRV_VERSION "1.2.45-k2"
50 char igb_driver_name
[] = "igb";
51 char igb_driver_version
[] = DRV_VERSION
;
52 static const char igb_driver_string
[] =
53 "Intel(R) Gigabit Ethernet Network Driver";
54 static const char igb_copyright
[] = "Copyright (c) 2007 Intel Corporation.";
57 static const struct e1000_info
*igb_info_tbl
[] = {
58 [board_82575
] = &e1000_82575_info
,
61 static struct pci_device_id igb_pci_tbl
[] = {
62 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_COPPER
), board_82575
},
63 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_FIBER_SERDES
), board_82575
},
64 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575GB_QUAD_COPPER
), board_82575
},
65 /* required last entry */
69 MODULE_DEVICE_TABLE(pci
, igb_pci_tbl
);
71 void igb_reset(struct igb_adapter
*);
72 static int igb_setup_all_tx_resources(struct igb_adapter
*);
73 static int igb_setup_all_rx_resources(struct igb_adapter
*);
74 static void igb_free_all_tx_resources(struct igb_adapter
*);
75 static void igb_free_all_rx_resources(struct igb_adapter
*);
76 static void igb_free_tx_resources(struct igb_ring
*);
77 static void igb_free_rx_resources(struct igb_ring
*);
78 void igb_update_stats(struct igb_adapter
*);
79 static int igb_probe(struct pci_dev
*, const struct pci_device_id
*);
80 static void __devexit
igb_remove(struct pci_dev
*pdev
);
81 static int igb_sw_init(struct igb_adapter
*);
82 static int igb_open(struct net_device
*);
83 static int igb_close(struct net_device
*);
84 static void igb_configure_tx(struct igb_adapter
*);
85 static void igb_configure_rx(struct igb_adapter
*);
86 static void igb_setup_rctl(struct igb_adapter
*);
87 static void igb_clean_all_tx_rings(struct igb_adapter
*);
88 static void igb_clean_all_rx_rings(struct igb_adapter
*);
89 static void igb_clean_tx_ring(struct igb_ring
*);
90 static void igb_clean_rx_ring(struct igb_ring
*);
91 static void igb_set_multi(struct net_device
*);
92 static void igb_update_phy_info(unsigned long);
93 static void igb_watchdog(unsigned long);
94 static void igb_watchdog_task(struct work_struct
*);
95 static int igb_xmit_frame_ring_adv(struct sk_buff
*, struct net_device
*,
97 static int igb_xmit_frame_adv(struct sk_buff
*skb
, struct net_device
*);
98 static struct net_device_stats
*igb_get_stats(struct net_device
*);
99 static int igb_change_mtu(struct net_device
*, int);
100 static int igb_set_mac(struct net_device
*, void *);
101 static irqreturn_t
igb_intr(int irq
, void *);
102 static irqreturn_t
igb_intr_msi(int irq
, void *);
103 static irqreturn_t
igb_msix_other(int irq
, void *);
104 static irqreturn_t
igb_msix_rx(int irq
, void *);
105 static irqreturn_t
igb_msix_tx(int irq
, void *);
106 static int igb_clean_rx_ring_msix(struct napi_struct
*, int);
108 static void igb_update_rx_dca(struct igb_ring
*);
109 static void igb_update_tx_dca(struct igb_ring
*);
110 static void igb_setup_dca(struct igb_adapter
*);
111 #endif /* CONFIG_DCA */
112 static bool igb_clean_tx_irq(struct igb_ring
*);
113 static int igb_poll(struct napi_struct
*, int);
114 static bool igb_clean_rx_irq_adv(struct igb_ring
*, int *, int);
115 static void igb_alloc_rx_buffers_adv(struct igb_ring
*, int);
116 static int igb_ioctl(struct net_device
*, struct ifreq
*, int cmd
);
117 static void igb_tx_timeout(struct net_device
*);
118 static void igb_reset_task(struct work_struct
*);
119 static void igb_vlan_rx_register(struct net_device
*, struct vlan_group
*);
120 static void igb_vlan_rx_add_vid(struct net_device
*, u16
);
121 static void igb_vlan_rx_kill_vid(struct net_device
*, u16
);
122 static void igb_restore_vlan(struct igb_adapter
*);
124 static int igb_suspend(struct pci_dev
*, pm_message_t
);
126 static int igb_resume(struct pci_dev
*);
128 static void igb_shutdown(struct pci_dev
*);
130 static int igb_notify_dca(struct notifier_block
*, unsigned long, void *);
131 static struct notifier_block dca_notifier
= {
132 .notifier_call
= igb_notify_dca
,
138 #ifdef CONFIG_NET_POLL_CONTROLLER
139 /* for netdump / net console */
140 static void igb_netpoll(struct net_device
*);
143 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*,
144 pci_channel_state_t
);
145 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*);
146 static void igb_io_resume(struct pci_dev
*);
148 static struct pci_error_handlers igb_err_handler
= {
149 .error_detected
= igb_io_error_detected
,
150 .slot_reset
= igb_io_slot_reset
,
151 .resume
= igb_io_resume
,
155 static struct pci_driver igb_driver
= {
156 .name
= igb_driver_name
,
157 .id_table
= igb_pci_tbl
,
159 .remove
= __devexit_p(igb_remove
),
161 /* Power Managment Hooks */
162 .suspend
= igb_suspend
,
163 .resume
= igb_resume
,
165 .shutdown
= igb_shutdown
,
166 .err_handler
= &igb_err_handler
169 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
170 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
171 MODULE_LICENSE("GPL");
172 MODULE_VERSION(DRV_VERSION
);
176 * igb_get_hw_dev_name - return device name string
177 * used by hardware layer to print debugging information
179 char *igb_get_hw_dev_name(struct e1000_hw
*hw
)
181 struct igb_adapter
*adapter
= hw
->back
;
182 return adapter
->netdev
->name
;
187 * igb_init_module - Driver Registration Routine
189 * igb_init_module is the first routine called when the driver is
190 * loaded. All it does is register with the PCI subsystem.
192 static int __init
igb_init_module(void)
195 printk(KERN_INFO
"%s - version %s\n",
196 igb_driver_string
, igb_driver_version
);
198 printk(KERN_INFO
"%s\n", igb_copyright
);
200 ret
= pci_register_driver(&igb_driver
);
202 dca_register_notify(&dca_notifier
);
207 module_init(igb_init_module
);
210 * igb_exit_module - Driver Exit Cleanup Routine
212 * igb_exit_module is called just before the driver is removed
215 static void __exit
igb_exit_module(void)
218 dca_unregister_notify(&dca_notifier
);
220 pci_unregister_driver(&igb_driver
);
223 module_exit(igb_exit_module
);
226 * igb_alloc_queues - Allocate memory for all rings
227 * @adapter: board private structure to initialize
229 * We allocate one ring per queue at run-time since we don't know the
230 * number of queues at compile-time.
232 static int igb_alloc_queues(struct igb_adapter
*adapter
)
236 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
237 sizeof(struct igb_ring
), GFP_KERNEL
);
238 if (!adapter
->tx_ring
)
241 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
242 sizeof(struct igb_ring
), GFP_KERNEL
);
243 if (!adapter
->rx_ring
) {
244 kfree(adapter
->tx_ring
);
248 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
249 struct igb_ring
*ring
= &(adapter
->tx_ring
[i
]);
250 ring
->adapter
= adapter
;
251 ring
->queue_index
= i
;
253 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
254 struct igb_ring
*ring
= &(adapter
->rx_ring
[i
]);
255 ring
->adapter
= adapter
;
256 ring
->queue_index
= i
;
257 ring
->itr_register
= E1000_ITR
;
259 /* set a default napi handler for each rx_ring */
260 netif_napi_add(adapter
->netdev
, &ring
->napi
, igb_poll
, 64);
265 #define IGB_N0_QUEUE -1
266 static void igb_assign_vector(struct igb_adapter
*adapter
, int rx_queue
,
267 int tx_queue
, int msix_vector
)
270 struct e1000_hw
*hw
= &adapter
->hw
;
271 /* The 82575 assigns vectors using a bitmask, which matches the
272 bitmask for the EICR/EIMS/EIMC registers. To assign one
273 or more queues to a vector, we write the appropriate bits
274 into the MSIXBM register for that vector. */
275 if (rx_queue
> IGB_N0_QUEUE
) {
276 msixbm
= E1000_EICR_RX_QUEUE0
<< rx_queue
;
277 adapter
->rx_ring
[rx_queue
].eims_value
= msixbm
;
279 if (tx_queue
> IGB_N0_QUEUE
) {
280 msixbm
|= E1000_EICR_TX_QUEUE0
<< tx_queue
;
281 adapter
->tx_ring
[tx_queue
].eims_value
=
282 E1000_EICR_TX_QUEUE0
<< tx_queue
;
284 array_wr32(E1000_MSIXBM(0), msix_vector
, msixbm
);
288 * igb_configure_msix - Configure MSI-X hardware
290 * igb_configure_msix sets up the hardware to properly
291 * generate MSI-X interrupts.
293 static void igb_configure_msix(struct igb_adapter
*adapter
)
297 struct e1000_hw
*hw
= &adapter
->hw
;
299 adapter
->eims_enable_mask
= 0;
301 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
302 struct igb_ring
*tx_ring
= &adapter
->tx_ring
[i
];
303 igb_assign_vector(adapter
, IGB_N0_QUEUE
, i
, vector
++);
304 adapter
->eims_enable_mask
|= tx_ring
->eims_value
;
305 if (tx_ring
->itr_val
)
306 writel(1000000000 / (tx_ring
->itr_val
* 256),
307 hw
->hw_addr
+ tx_ring
->itr_register
);
309 writel(1, hw
->hw_addr
+ tx_ring
->itr_register
);
312 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
313 struct igb_ring
*rx_ring
= &adapter
->rx_ring
[i
];
314 igb_assign_vector(adapter
, i
, IGB_N0_QUEUE
, vector
++);
315 adapter
->eims_enable_mask
|= rx_ring
->eims_value
;
316 if (rx_ring
->itr_val
)
317 writel(1000000000 / (rx_ring
->itr_val
* 256),
318 hw
->hw_addr
+ rx_ring
->itr_register
);
320 writel(1, hw
->hw_addr
+ rx_ring
->itr_register
);
324 /* set vector for other causes, i.e. link changes */
325 array_wr32(E1000_MSIXBM(0), vector
++,
328 tmp
= rd32(E1000_CTRL_EXT
);
329 /* enable MSI-X PBA support*/
330 tmp
|= E1000_CTRL_EXT_PBA_CLR
;
332 /* Auto-Mask interrupts upon ICR read. */
333 tmp
|= E1000_CTRL_EXT_EIAME
;
334 tmp
|= E1000_CTRL_EXT_IRCA
;
336 wr32(E1000_CTRL_EXT
, tmp
);
337 adapter
->eims_enable_mask
|= E1000_EIMS_OTHER
;
338 adapter
->eims_other
= E1000_EIMS_OTHER
;
344 * igb_request_msix - Initialize MSI-X interrupts
346 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
349 static int igb_request_msix(struct igb_adapter
*adapter
)
351 struct net_device
*netdev
= adapter
->netdev
;
352 int i
, err
= 0, vector
= 0;
356 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
357 struct igb_ring
*ring
= &(adapter
->tx_ring
[i
]);
358 sprintf(ring
->name
, "%s-tx%d", netdev
->name
, i
);
359 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
360 &igb_msix_tx
, 0, ring
->name
,
361 &(adapter
->tx_ring
[i
]));
364 ring
->itr_register
= E1000_EITR(0) + (vector
<< 2);
365 ring
->itr_val
= adapter
->itr
;
368 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
369 struct igb_ring
*ring
= &(adapter
->rx_ring
[i
]);
370 if (strlen(netdev
->name
) < (IFNAMSIZ
- 5))
371 sprintf(ring
->name
, "%s-rx%d", netdev
->name
, i
);
373 memcpy(ring
->name
, netdev
->name
, IFNAMSIZ
);
374 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
375 &igb_msix_rx
, 0, ring
->name
,
376 &(adapter
->rx_ring
[i
]));
379 ring
->itr_register
= E1000_EITR(0) + (vector
<< 2);
380 ring
->itr_val
= adapter
->itr
;
381 /* overwrite the poll routine for MSIX, we've already done
383 ring
->napi
.poll
= &igb_clean_rx_ring_msix
;
387 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
388 &igb_msix_other
, 0, netdev
->name
, netdev
);
392 igb_configure_msix(adapter
);
398 static void igb_reset_interrupt_capability(struct igb_adapter
*adapter
)
400 if (adapter
->msix_entries
) {
401 pci_disable_msix(adapter
->pdev
);
402 kfree(adapter
->msix_entries
);
403 adapter
->msix_entries
= NULL
;
404 } else if (adapter
->msi_enabled
)
405 pci_disable_msi(adapter
->pdev
);
411 * igb_set_interrupt_capability - set MSI or MSI-X if supported
413 * Attempt to configure interrupts using the best available
414 * capabilities of the hardware and kernel.
416 static void igb_set_interrupt_capability(struct igb_adapter
*adapter
)
421 numvecs
= adapter
->num_tx_queues
+ adapter
->num_rx_queues
+ 1;
422 adapter
->msix_entries
= kcalloc(numvecs
, sizeof(struct msix_entry
),
424 if (!adapter
->msix_entries
)
427 for (i
= 0; i
< numvecs
; i
++)
428 adapter
->msix_entries
[i
].entry
= i
;
430 err
= pci_enable_msix(adapter
->pdev
,
431 adapter
->msix_entries
,
436 igb_reset_interrupt_capability(adapter
);
438 /* If we can't do MSI-X, try MSI */
440 adapter
->num_rx_queues
= 1;
441 adapter
->num_tx_queues
= 1;
442 if (!pci_enable_msi(adapter
->pdev
))
443 adapter
->msi_enabled
= 1;
445 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
446 /* Notify the stack of the (possibly) reduced Tx Queue count. */
447 adapter
->netdev
->egress_subqueue_count
= adapter
->num_tx_queues
;
453 * igb_request_irq - initialize interrupts
455 * Attempts to configure interrupts using the best available
456 * capabilities of the hardware and kernel.
458 static int igb_request_irq(struct igb_adapter
*adapter
)
460 struct net_device
*netdev
= adapter
->netdev
;
461 struct e1000_hw
*hw
= &adapter
->hw
;
464 if (adapter
->msix_entries
) {
465 err
= igb_request_msix(adapter
);
468 /* fall back to MSI */
469 igb_reset_interrupt_capability(adapter
);
470 if (!pci_enable_msi(adapter
->pdev
))
471 adapter
->msi_enabled
= 1;
472 igb_free_all_tx_resources(adapter
);
473 igb_free_all_rx_resources(adapter
);
474 adapter
->num_rx_queues
= 1;
475 igb_alloc_queues(adapter
);
477 wr32(E1000_MSIXBM(0), (E1000_EICR_RX_QUEUE0
|
481 if (adapter
->msi_enabled
) {
482 err
= request_irq(adapter
->pdev
->irq
, &igb_intr_msi
, 0,
483 netdev
->name
, netdev
);
486 /* fall back to legacy interrupts */
487 igb_reset_interrupt_capability(adapter
);
488 adapter
->msi_enabled
= 0;
491 err
= request_irq(adapter
->pdev
->irq
, &igb_intr
, IRQF_SHARED
,
492 netdev
->name
, netdev
);
495 dev_err(&adapter
->pdev
->dev
, "Error %d getting interrupt\n",
502 static void igb_free_irq(struct igb_adapter
*adapter
)
504 struct net_device
*netdev
= adapter
->netdev
;
506 if (adapter
->msix_entries
) {
509 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
510 free_irq(adapter
->msix_entries
[vector
++].vector
,
511 &(adapter
->tx_ring
[i
]));
512 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
513 free_irq(adapter
->msix_entries
[vector
++].vector
,
514 &(adapter
->rx_ring
[i
]));
516 free_irq(adapter
->msix_entries
[vector
++].vector
, netdev
);
520 free_irq(adapter
->pdev
->irq
, netdev
);
524 * igb_irq_disable - Mask off interrupt generation on the NIC
525 * @adapter: board private structure
527 static void igb_irq_disable(struct igb_adapter
*adapter
)
529 struct e1000_hw
*hw
= &adapter
->hw
;
531 if (adapter
->msix_entries
) {
533 wr32(E1000_EIMC
, ~0);
540 synchronize_irq(adapter
->pdev
->irq
);
544 * igb_irq_enable - Enable default interrupt generation settings
545 * @adapter: board private structure
547 static void igb_irq_enable(struct igb_adapter
*adapter
)
549 struct e1000_hw
*hw
= &adapter
->hw
;
551 if (adapter
->msix_entries
) {
552 wr32(E1000_EIAC
, adapter
->eims_enable_mask
);
553 wr32(E1000_EIAM
, adapter
->eims_enable_mask
);
554 wr32(E1000_EIMS
, adapter
->eims_enable_mask
);
555 wr32(E1000_IMS
, E1000_IMS_LSC
);
557 wr32(E1000_IMS
, IMS_ENABLE_MASK
);
558 wr32(E1000_IAM
, IMS_ENABLE_MASK
);
562 static void igb_update_mng_vlan(struct igb_adapter
*adapter
)
564 struct net_device
*netdev
= adapter
->netdev
;
565 u16 vid
= adapter
->hw
.mng_cookie
.vlan_id
;
566 u16 old_vid
= adapter
->mng_vlan_id
;
567 if (adapter
->vlgrp
) {
568 if (!vlan_group_get_device(adapter
->vlgrp
, vid
)) {
569 if (adapter
->hw
.mng_cookie
.status
&
570 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) {
571 igb_vlan_rx_add_vid(netdev
, vid
);
572 adapter
->mng_vlan_id
= vid
;
574 adapter
->mng_vlan_id
= IGB_MNG_VLAN_NONE
;
576 if ((old_vid
!= (u16
)IGB_MNG_VLAN_NONE
) &&
578 !vlan_group_get_device(adapter
->vlgrp
, old_vid
))
579 igb_vlan_rx_kill_vid(netdev
, old_vid
);
581 adapter
->mng_vlan_id
= vid
;
586 * igb_release_hw_control - release control of the h/w to f/w
587 * @adapter: address of board private structure
589 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
590 * For ASF and Pass Through versions of f/w this means that the
591 * driver is no longer loaded.
594 static void igb_release_hw_control(struct igb_adapter
*adapter
)
596 struct e1000_hw
*hw
= &adapter
->hw
;
599 /* Let firmware take over control of h/w */
600 ctrl_ext
= rd32(E1000_CTRL_EXT
);
602 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
607 * igb_get_hw_control - get control of the h/w from f/w
608 * @adapter: address of board private structure
610 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
611 * For ASF and Pass Through versions of f/w this means that
612 * the driver is loaded.
615 static void igb_get_hw_control(struct igb_adapter
*adapter
)
617 struct e1000_hw
*hw
= &adapter
->hw
;
620 /* Let firmware know the driver has taken over */
621 ctrl_ext
= rd32(E1000_CTRL_EXT
);
623 ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
626 static void igb_init_manageability(struct igb_adapter
*adapter
)
628 struct e1000_hw
*hw
= &adapter
->hw
;
630 if (adapter
->en_mng_pt
) {
631 u32 manc2h
= rd32(E1000_MANC2H
);
632 u32 manc
= rd32(E1000_MANC
);
634 /* enable receiving management packets to the host */
635 /* this will probably generate destination unreachable messages
636 * from the host OS, but the packets will be handled on SMBUS */
637 manc
|= E1000_MANC_EN_MNG2HOST
;
638 #define E1000_MNG2HOST_PORT_623 (1 << 5)
639 #define E1000_MNG2HOST_PORT_664 (1 << 6)
640 manc2h
|= E1000_MNG2HOST_PORT_623
;
641 manc2h
|= E1000_MNG2HOST_PORT_664
;
642 wr32(E1000_MANC2H
, manc2h
);
644 wr32(E1000_MANC
, manc
);
649 * igb_configure - configure the hardware for RX and TX
650 * @adapter: private board structure
652 static void igb_configure(struct igb_adapter
*adapter
)
654 struct net_device
*netdev
= adapter
->netdev
;
657 igb_get_hw_control(adapter
);
658 igb_set_multi(netdev
);
660 igb_restore_vlan(adapter
);
661 igb_init_manageability(adapter
);
663 igb_configure_tx(adapter
);
664 igb_setup_rctl(adapter
);
665 igb_configure_rx(adapter
);
667 igb_rx_fifo_flush_82575(&adapter
->hw
);
669 /* call IGB_DESC_UNUSED which always leaves
670 * at least 1 descriptor unused to make sure
671 * next_to_use != next_to_clean */
672 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
673 struct igb_ring
*ring
= &adapter
->rx_ring
[i
];
674 igb_alloc_rx_buffers_adv(ring
, IGB_DESC_UNUSED(ring
));
678 adapter
->tx_queue_len
= netdev
->tx_queue_len
;
683 * igb_up - Open the interface and prepare it to handle traffic
684 * @adapter: board private structure
687 int igb_up(struct igb_adapter
*adapter
)
689 struct e1000_hw
*hw
= &adapter
->hw
;
692 /* hardware has been reset, we need to reload some things */
693 igb_configure(adapter
);
695 clear_bit(__IGB_DOWN
, &adapter
->state
);
697 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
698 napi_enable(&adapter
->rx_ring
[i
].napi
);
699 if (adapter
->msix_entries
)
700 igb_configure_msix(adapter
);
702 /* Clear any pending interrupts. */
704 igb_irq_enable(adapter
);
706 /* Fire a link change interrupt to start the watchdog. */
707 wr32(E1000_ICS
, E1000_ICS_LSC
);
711 void igb_down(struct igb_adapter
*adapter
)
713 struct e1000_hw
*hw
= &adapter
->hw
;
714 struct net_device
*netdev
= adapter
->netdev
;
718 /* signal that we're down so the interrupt handler does not
719 * reschedule our watchdog timer */
720 set_bit(__IGB_DOWN
, &adapter
->state
);
722 /* disable receives in the hardware */
723 rctl
= rd32(E1000_RCTL
);
724 wr32(E1000_RCTL
, rctl
& ~E1000_RCTL_EN
);
725 /* flush and sleep below */
727 netif_stop_queue(netdev
);
728 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
729 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
730 netif_stop_subqueue(netdev
, i
);
733 /* disable transmits in the hardware */
734 tctl
= rd32(E1000_TCTL
);
735 tctl
&= ~E1000_TCTL_EN
;
736 wr32(E1000_TCTL
, tctl
);
737 /* flush both disables and wait for them to finish */
741 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
742 napi_disable(&adapter
->rx_ring
[i
].napi
);
744 igb_irq_disable(adapter
);
746 del_timer_sync(&adapter
->watchdog_timer
);
747 del_timer_sync(&adapter
->phy_info_timer
);
749 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
750 netif_carrier_off(netdev
);
751 adapter
->link_speed
= 0;
752 adapter
->link_duplex
= 0;
754 if (!pci_channel_offline(adapter
->pdev
))
756 igb_clean_all_tx_rings(adapter
);
757 igb_clean_all_rx_rings(adapter
);
760 void igb_reinit_locked(struct igb_adapter
*adapter
)
762 WARN_ON(in_interrupt());
763 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
767 clear_bit(__IGB_RESETTING
, &adapter
->state
);
770 void igb_reset(struct igb_adapter
*adapter
)
772 struct e1000_hw
*hw
= &adapter
->hw
;
773 struct e1000_fc_info
*fc
= &adapter
->hw
.fc
;
774 u32 pba
= 0, tx_space
, min_tx_space
, min_rx_space
;
777 /* Repartition Pba for greater than 9k mtu
778 * To take effect CTRL.RST is required.
782 if (adapter
->max_frame_size
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) {
783 /* adjust PBA for jumbo frames */
784 wr32(E1000_PBA
, pba
);
786 /* To maintain wire speed transmits, the Tx FIFO should be
787 * large enough to accommodate two full transmit packets,
788 * rounded up to the next 1KB and expressed in KB. Likewise,
789 * the Rx FIFO should be large enough to accommodate at least
790 * one full receive packet and is similarly rounded up and
791 * expressed in KB. */
792 pba
= rd32(E1000_PBA
);
793 /* upper 16 bits has Tx packet buffer allocation size in KB */
794 tx_space
= pba
>> 16;
795 /* lower 16 bits has Rx packet buffer allocation size in KB */
797 /* the tx fifo also stores 16 bytes of information about the tx
798 * but don't include ethernet FCS because hardware appends it */
799 min_tx_space
= (adapter
->max_frame_size
+
800 sizeof(struct e1000_tx_desc
) -
802 min_tx_space
= ALIGN(min_tx_space
, 1024);
804 /* software strips receive CRC, so leave room for it */
805 min_rx_space
= adapter
->max_frame_size
;
806 min_rx_space
= ALIGN(min_rx_space
, 1024);
809 /* If current Tx allocation is less than the min Tx FIFO size,
810 * and the min Tx FIFO size is less than the current Rx FIFO
811 * allocation, take space away from current Rx allocation */
812 if (tx_space
< min_tx_space
&&
813 ((min_tx_space
- tx_space
) < pba
)) {
814 pba
= pba
- (min_tx_space
- tx_space
);
816 /* if short on rx space, rx wins and must trump tx
818 if (pba
< min_rx_space
)
822 wr32(E1000_PBA
, pba
);
824 /* flow control settings */
825 /* The high water mark must be low enough to fit one full frame
826 * (or the size used for early receive) above it in the Rx FIFO.
827 * Set it to the lower of:
828 * - 90% of the Rx FIFO size, or
829 * - the full Rx FIFO size minus one full frame */
830 hwm
= min(((pba
<< 10) * 9 / 10),
831 ((pba
<< 10) - adapter
->max_frame_size
));
833 fc
->high_water
= hwm
& 0xFFF8; /* 8-byte granularity */
834 fc
->low_water
= fc
->high_water
- 8;
835 fc
->pause_time
= 0xFFFF;
837 fc
->type
= fc
->original_type
;
839 /* Allow time for pending master requests to run */
840 adapter
->hw
.mac
.ops
.reset_hw(&adapter
->hw
);
843 if (adapter
->hw
.mac
.ops
.init_hw(&adapter
->hw
))
844 dev_err(&adapter
->pdev
->dev
, "Hardware Error\n");
846 igb_update_mng_vlan(adapter
);
848 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
849 wr32(E1000_VET
, ETHERNET_IEEE_VLAN_TYPE
);
851 igb_reset_adaptive(&adapter
->hw
);
852 if (adapter
->hw
.phy
.ops
.get_phy_info
)
853 adapter
->hw
.phy
.ops
.get_phy_info(&adapter
->hw
);
857 * igb_is_need_ioport - determine if an adapter needs ioport resources or not
858 * @pdev: PCI device information struct
860 * Returns true if an adapter needs ioport resources
862 static int igb_is_need_ioport(struct pci_dev
*pdev
)
864 switch (pdev
->device
) {
865 /* Currently there are no adapters that need ioport resources */
872 * igb_probe - Device Initialization Routine
873 * @pdev: PCI device information struct
874 * @ent: entry in igb_pci_tbl
876 * Returns 0 on success, negative on failure
878 * igb_probe initializes an adapter identified by a pci_dev structure.
879 * The OS initialization, configuring of the adapter private structure,
880 * and a hardware reset occur.
882 static int __devinit
igb_probe(struct pci_dev
*pdev
,
883 const struct pci_device_id
*ent
)
885 struct net_device
*netdev
;
886 struct igb_adapter
*adapter
;
888 const struct e1000_info
*ei
= igb_info_tbl
[ent
->driver_data
];
889 unsigned long mmio_start
, mmio_len
;
890 int i
, err
, pci_using_dac
;
892 u16 eeprom_apme_mask
= IGB_EEPROM_APME
;
894 int bars
, need_ioport
;
896 /* do not allocate ioport bars when not needed */
897 need_ioport
= igb_is_need_ioport(pdev
);
899 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
| IORESOURCE_IO
);
900 err
= pci_enable_device(pdev
);
902 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
);
903 err
= pci_enable_device_mem(pdev
);
909 err
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
911 err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
915 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
917 err
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
919 dev_err(&pdev
->dev
, "No usable DMA "
920 "configuration, aborting\n");
926 err
= pci_request_selected_regions(pdev
, bars
, igb_driver_name
);
930 pci_set_master(pdev
);
931 pci_save_state(pdev
);
934 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
935 netdev
= alloc_etherdev_mq(sizeof(struct igb_adapter
), IGB_MAX_TX_QUEUES
);
937 netdev
= alloc_etherdev(sizeof(struct igb_adapter
));
938 #endif /* CONFIG_NETDEVICES_MULTIQUEUE */
940 goto err_alloc_etherdev
;
942 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
944 pci_set_drvdata(pdev
, netdev
);
945 adapter
= netdev_priv(netdev
);
946 adapter
->netdev
= netdev
;
947 adapter
->pdev
= pdev
;
950 adapter
->msg_enable
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
;
951 adapter
->bars
= bars
;
952 adapter
->need_ioport
= need_ioport
;
954 mmio_start
= pci_resource_start(pdev
, 0);
955 mmio_len
= pci_resource_len(pdev
, 0);
958 adapter
->hw
.hw_addr
= ioremap(mmio_start
, mmio_len
);
959 if (!adapter
->hw
.hw_addr
)
962 netdev
->open
= &igb_open
;
963 netdev
->stop
= &igb_close
;
964 netdev
->get_stats
= &igb_get_stats
;
965 netdev
->set_multicast_list
= &igb_set_multi
;
966 netdev
->set_mac_address
= &igb_set_mac
;
967 netdev
->change_mtu
= &igb_change_mtu
;
968 netdev
->do_ioctl
= &igb_ioctl
;
969 igb_set_ethtool_ops(netdev
);
970 netdev
->tx_timeout
= &igb_tx_timeout
;
971 netdev
->watchdog_timeo
= 5 * HZ
;
972 netdev
->vlan_rx_register
= igb_vlan_rx_register
;
973 netdev
->vlan_rx_add_vid
= igb_vlan_rx_add_vid
;
974 netdev
->vlan_rx_kill_vid
= igb_vlan_rx_kill_vid
;
975 #ifdef CONFIG_NET_POLL_CONTROLLER
976 netdev
->poll_controller
= igb_netpoll
;
978 netdev
->hard_start_xmit
= &igb_xmit_frame_adv
;
980 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
982 netdev
->mem_start
= mmio_start
;
983 netdev
->mem_end
= mmio_start
+ mmio_len
;
985 /* PCI config space info */
986 hw
->vendor_id
= pdev
->vendor
;
987 hw
->device_id
= pdev
->device
;
988 hw
->revision_id
= pdev
->revision
;
989 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
990 hw
->subsystem_device_id
= pdev
->subsystem_device
;
992 /* setup the private structure */
994 /* Copy the default MAC, PHY and NVM function pointers */
995 memcpy(&hw
->mac
.ops
, ei
->mac_ops
, sizeof(hw
->mac
.ops
));
996 memcpy(&hw
->phy
.ops
, ei
->phy_ops
, sizeof(hw
->phy
.ops
));
997 memcpy(&hw
->nvm
.ops
, ei
->nvm_ops
, sizeof(hw
->nvm
.ops
));
998 /* Initialize skew-specific constants */
999 err
= ei
->get_invariants(hw
);
1003 err
= igb_sw_init(adapter
);
1007 igb_get_bus_info_pcie(hw
);
1009 hw
->phy
.autoneg_wait_to_complete
= false;
1010 hw
->mac
.adaptive_ifs
= true;
1012 /* Copper options */
1013 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
1014 hw
->phy
.mdix
= AUTO_ALL_MODES
;
1015 hw
->phy
.disable_polarity_correction
= false;
1016 hw
->phy
.ms_type
= e1000_ms_hw_default
;
1019 if (igb_check_reset_block(hw
))
1020 dev_info(&pdev
->dev
,
1021 "PHY reset is blocked due to SOL/IDER session.\n");
1023 netdev
->features
= NETIF_F_SG
|
1025 NETIF_F_HW_VLAN_TX
|
1026 NETIF_F_HW_VLAN_RX
|
1027 NETIF_F_HW_VLAN_FILTER
;
1029 netdev
->features
|= NETIF_F_TSO
;
1030 netdev
->features
|= NETIF_F_TSO6
;
1032 netdev
->vlan_features
|= NETIF_F_TSO
;
1033 netdev
->vlan_features
|= NETIF_F_TSO6
;
1034 netdev
->vlan_features
|= NETIF_F_HW_CSUM
;
1035 netdev
->vlan_features
|= NETIF_F_SG
;
1038 netdev
->features
|= NETIF_F_HIGHDMA
;
1040 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
1041 netdev
->features
|= NETIF_F_MULTI_QUEUE
;
1044 netdev
->features
|= NETIF_F_LLTX
;
1045 adapter
->en_mng_pt
= igb_enable_mng_pass_thru(&adapter
->hw
);
1047 /* before reading the NVM, reset the controller to put the device in a
1048 * known good starting state */
1049 hw
->mac
.ops
.reset_hw(hw
);
1051 /* make sure the NVM is good */
1052 if (igb_validate_nvm_checksum(hw
) < 0) {
1053 dev_err(&pdev
->dev
, "The NVM Checksum Is Not Valid\n");
1058 /* copy the MAC address out of the NVM */
1059 if (hw
->mac
.ops
.read_mac_addr(hw
))
1060 dev_err(&pdev
->dev
, "NVM Read Error\n");
1062 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1063 memcpy(netdev
->perm_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1065 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
1066 dev_err(&pdev
->dev
, "Invalid MAC Address\n");
1071 init_timer(&adapter
->watchdog_timer
);
1072 adapter
->watchdog_timer
.function
= &igb_watchdog
;
1073 adapter
->watchdog_timer
.data
= (unsigned long) adapter
;
1075 init_timer(&adapter
->phy_info_timer
);
1076 adapter
->phy_info_timer
.function
= &igb_update_phy_info
;
1077 adapter
->phy_info_timer
.data
= (unsigned long) adapter
;
1079 INIT_WORK(&adapter
->reset_task
, igb_reset_task
);
1080 INIT_WORK(&adapter
->watchdog_task
, igb_watchdog_task
);
1082 /* Initialize link & ring properties that are user-changeable */
1083 adapter
->tx_ring
->count
= 256;
1084 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1085 adapter
->tx_ring
[i
].count
= adapter
->tx_ring
->count
;
1086 adapter
->rx_ring
->count
= 256;
1087 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1088 adapter
->rx_ring
[i
].count
= adapter
->rx_ring
->count
;
1090 adapter
->fc_autoneg
= true;
1091 hw
->mac
.autoneg
= true;
1092 hw
->phy
.autoneg_advertised
= 0x2f;
1094 hw
->fc
.original_type
= e1000_fc_default
;
1095 hw
->fc
.type
= e1000_fc_default
;
1097 adapter
->itr_setting
= 3;
1098 adapter
->itr
= IGB_START_ITR
;
1100 igb_validate_mdi_setting(hw
);
1102 adapter
->rx_csum
= 1;
1104 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1105 * enable the ACPI Magic Packet filter
1108 if (hw
->bus
.func
== 0 ||
1109 hw
->device_id
== E1000_DEV_ID_82575EB_COPPER
)
1110 hw
->nvm
.ops
.read_nvm(hw
, NVM_INIT_CONTROL3_PORT_A
, 1,
1113 if (eeprom_data
& eeprom_apme_mask
)
1114 adapter
->eeprom_wol
|= E1000_WUFC_MAG
;
1116 /* now that we have the eeprom settings, apply the special cases where
1117 * the eeprom may be wrong or the board simply won't support wake on
1118 * lan on a particular port */
1119 switch (pdev
->device
) {
1120 case E1000_DEV_ID_82575GB_QUAD_COPPER
:
1121 adapter
->eeprom_wol
= 0;
1123 case E1000_DEV_ID_82575EB_FIBER_SERDES
:
1124 /* Wake events only supported on port A for dual fiber
1125 * regardless of eeprom setting */
1126 if (rd32(E1000_STATUS
) & E1000_STATUS_FUNC_1
)
1127 adapter
->eeprom_wol
= 0;
1131 /* initialize the wol settings based on the eeprom settings */
1132 adapter
->wol
= adapter
->eeprom_wol
;
1134 /* reset the hardware with the new settings */
1137 /* let the f/w know that the h/w is now under the control of the
1139 igb_get_hw_control(adapter
);
1141 /* tell the stack to leave us alone until igb_open() is called */
1142 netif_carrier_off(netdev
);
1143 netif_stop_queue(netdev
);
1144 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
1145 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1146 netif_stop_subqueue(netdev
, i
);
1149 strcpy(netdev
->name
, "eth%d");
1150 err
= register_netdev(netdev
);
1155 if (dca_add_requester(&pdev
->dev
) == 0) {
1156 adapter
->dca_enabled
= true;
1157 dev_info(&pdev
->dev
, "DCA enabled\n");
1158 /* Always use CB2 mode, difference is masked
1159 * in the CB driver. */
1160 wr32(E1000_DCA_CTRL
, 2);
1161 igb_setup_dca(adapter
);
1165 dev_info(&pdev
->dev
, "Intel(R) Gigabit Ethernet Network Connection\n");
1166 /* print bus type/speed/width info */
1167 dev_info(&pdev
->dev
,
1168 "%s: (PCIe:%s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
1170 ((hw
->bus
.speed
== e1000_bus_speed_2500
)
1171 ? "2.5Gb/s" : "unknown"),
1172 ((hw
->bus
.width
== e1000_bus_width_pcie_x4
)
1173 ? "Width x4" : (hw
->bus
.width
== e1000_bus_width_pcie_x1
)
1174 ? "Width x1" : "unknown"),
1175 netdev
->dev_addr
[0], netdev
->dev_addr
[1], netdev
->dev_addr
[2],
1176 netdev
->dev_addr
[3], netdev
->dev_addr
[4], netdev
->dev_addr
[5]);
1178 igb_read_part_num(hw
, &part_num
);
1179 dev_info(&pdev
->dev
, "%s: PBA No: %06x-%03x\n", netdev
->name
,
1180 (part_num
>> 8), (part_num
& 0xff));
1182 dev_info(&pdev
->dev
,
1183 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1184 adapter
->msix_entries
? "MSI-X" :
1185 adapter
->msi_enabled
? "MSI" : "legacy",
1186 adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1191 igb_release_hw_control(adapter
);
1193 if (!igb_check_reset_block(hw
))
1194 hw
->phy
.ops
.reset_phy(hw
);
1196 if (hw
->flash_address
)
1197 iounmap(hw
->flash_address
);
1199 igb_remove_device(hw
);
1200 kfree(adapter
->tx_ring
);
1201 kfree(adapter
->rx_ring
);
1204 iounmap(hw
->hw_addr
);
1206 free_netdev(netdev
);
1208 pci_release_selected_regions(pdev
, bars
);
1211 pci_disable_device(pdev
);
1216 * igb_remove - Device Removal Routine
1217 * @pdev: PCI device information struct
1219 * igb_remove is called by the PCI subsystem to alert the driver
1220 * that it should release a PCI device. The could be caused by a
1221 * Hot-Plug event, or because the driver is going to be removed from
1224 static void __devexit
igb_remove(struct pci_dev
*pdev
)
1226 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1227 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1228 struct e1000_hw
*hw
= &adapter
->hw
;
1230 /* flush_scheduled work may reschedule our watchdog task, so
1231 * explicitly disable watchdog tasks from being rescheduled */
1232 set_bit(__IGB_DOWN
, &adapter
->state
);
1233 del_timer_sync(&adapter
->watchdog_timer
);
1234 del_timer_sync(&adapter
->phy_info_timer
);
1236 flush_scheduled_work();
1239 if (adapter
->dca_enabled
) {
1240 dev_info(&pdev
->dev
, "DCA disabled\n");
1241 dca_remove_requester(&pdev
->dev
);
1242 adapter
->dca_enabled
= false;
1243 wr32(E1000_DCA_CTRL
, 1);
1247 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1248 * would have already happened in close and is redundant. */
1249 igb_release_hw_control(adapter
);
1251 unregister_netdev(netdev
);
1253 if (!igb_check_reset_block(&adapter
->hw
))
1254 adapter
->hw
.phy
.ops
.reset_phy(&adapter
->hw
);
1256 igb_remove_device(&adapter
->hw
);
1257 igb_reset_interrupt_capability(adapter
);
1259 kfree(adapter
->tx_ring
);
1260 kfree(adapter
->rx_ring
);
1262 iounmap(adapter
->hw
.hw_addr
);
1263 if (adapter
->hw
.flash_address
)
1264 iounmap(adapter
->hw
.flash_address
);
1265 pci_release_selected_regions(pdev
, adapter
->bars
);
1267 free_netdev(netdev
);
1269 pci_disable_device(pdev
);
1273 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1274 * @adapter: board private structure to initialize
1276 * igb_sw_init initializes the Adapter private data structure.
1277 * Fields are initialized based on PCI device information and
1278 * OS network device settings (MTU size).
1280 static int __devinit
igb_sw_init(struct igb_adapter
*adapter
)
1282 struct e1000_hw
*hw
= &adapter
->hw
;
1283 struct net_device
*netdev
= adapter
->netdev
;
1284 struct pci_dev
*pdev
= adapter
->pdev
;
1286 pci_read_config_word(pdev
, PCI_COMMAND
, &hw
->bus
.pci_cmd_word
);
1288 adapter
->rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1289 adapter
->rx_ps_hdr_size
= 0; /* disable packet split */
1290 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1291 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
1293 /* Number of supported queues. */
1294 /* Having more queues than CPUs doesn't make sense. */
1295 adapter
->num_rx_queues
= min((u32
)IGB_MAX_RX_QUEUES
, (u32
)num_online_cpus());
1296 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
1297 adapter
->num_tx_queues
= min(IGB_MAX_TX_QUEUES
, num_online_cpus());
1299 adapter
->num_tx_queues
= 1;
1300 #endif /* CONFIG_NET_MULTI_QUEUE_DEVICE */
1302 /* This call may decrease the number of queues depending on
1303 * interrupt mode. */
1304 igb_set_interrupt_capability(adapter
);
1306 if (igb_alloc_queues(adapter
)) {
1307 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
1311 /* Explicitly disable IRQ since the NIC can be in any state. */
1312 igb_irq_disable(adapter
);
1314 set_bit(__IGB_DOWN
, &adapter
->state
);
1319 * igb_open - Called when a network interface is made active
1320 * @netdev: network interface device structure
1322 * Returns 0 on success, negative value on failure
1324 * The open entry point is called when a network interface is made
1325 * active by the system (IFF_UP). At this point all resources needed
1326 * for transmit and receive operations are allocated, the interrupt
1327 * handler is registered with the OS, the watchdog timer is started,
1328 * and the stack is notified that the interface is ready.
1330 static int igb_open(struct net_device
*netdev
)
1332 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1333 struct e1000_hw
*hw
= &adapter
->hw
;
1337 /* disallow open during test */
1338 if (test_bit(__IGB_TESTING
, &adapter
->state
))
1341 /* allocate transmit descriptors */
1342 err
= igb_setup_all_tx_resources(adapter
);
1346 /* allocate receive descriptors */
1347 err
= igb_setup_all_rx_resources(adapter
);
1351 /* e1000_power_up_phy(adapter); */
1353 adapter
->mng_vlan_id
= IGB_MNG_VLAN_NONE
;
1354 if ((adapter
->hw
.mng_cookie
.status
&
1355 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
))
1356 igb_update_mng_vlan(adapter
);
1358 /* before we allocate an interrupt, we must be ready to handle it.
1359 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1360 * as soon as we call pci_request_irq, so we have to setup our
1361 * clean_rx handler before we do so. */
1362 igb_configure(adapter
);
1364 err
= igb_request_irq(adapter
);
1368 /* From here on the code is the same as igb_up() */
1369 clear_bit(__IGB_DOWN
, &adapter
->state
);
1371 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1372 napi_enable(&adapter
->rx_ring
[i
].napi
);
1374 /* Clear any pending interrupts. */
1377 igb_irq_enable(adapter
);
1379 /* Fire a link status change interrupt to start the watchdog. */
1380 wr32(E1000_ICS
, E1000_ICS_LSC
);
1385 igb_release_hw_control(adapter
);
1386 /* e1000_power_down_phy(adapter); */
1387 igb_free_all_rx_resources(adapter
);
1389 igb_free_all_tx_resources(adapter
);
1397 * igb_close - Disables a network interface
1398 * @netdev: network interface device structure
1400 * Returns 0, this is not allowed to fail
1402 * The close entry point is called when an interface is de-activated
1403 * by the OS. The hardware is still under the driver's control, but
1404 * needs to be disabled. A global MAC reset is issued to stop the
1405 * hardware, and all transmit and receive resources are freed.
1407 static int igb_close(struct net_device
*netdev
)
1409 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1411 WARN_ON(test_bit(__IGB_RESETTING
, &adapter
->state
));
1414 igb_free_irq(adapter
);
1416 igb_free_all_tx_resources(adapter
);
1417 igb_free_all_rx_resources(adapter
);
1419 /* kill manageability vlan ID if supported, but not if a vlan with
1420 * the same ID is registered on the host OS (let 8021q kill it) */
1421 if ((adapter
->hw
.mng_cookie
.status
&
1422 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
1424 vlan_group_get_device(adapter
->vlgrp
, adapter
->mng_vlan_id
)))
1425 igb_vlan_rx_kill_vid(netdev
, adapter
->mng_vlan_id
);
1431 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1432 * @adapter: board private structure
1433 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1435 * Return 0 on success, negative on failure
1438 int igb_setup_tx_resources(struct igb_adapter
*adapter
,
1439 struct igb_ring
*tx_ring
)
1441 struct pci_dev
*pdev
= adapter
->pdev
;
1444 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
1445 tx_ring
->buffer_info
= vmalloc(size
);
1446 if (!tx_ring
->buffer_info
)
1448 memset(tx_ring
->buffer_info
, 0, size
);
1450 /* round up to nearest 4K */
1451 tx_ring
->size
= tx_ring
->count
* sizeof(struct e1000_tx_desc
)
1453 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
1455 tx_ring
->desc
= pci_alloc_consistent(pdev
, tx_ring
->size
,
1461 tx_ring
->adapter
= adapter
;
1462 tx_ring
->next_to_use
= 0;
1463 tx_ring
->next_to_clean
= 0;
1467 vfree(tx_ring
->buffer_info
);
1468 dev_err(&adapter
->pdev
->dev
,
1469 "Unable to allocate memory for the transmit descriptor ring\n");
1474 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1475 * (Descriptors) for all queues
1476 * @adapter: board private structure
1478 * Return 0 on success, negative on failure
1480 static int igb_setup_all_tx_resources(struct igb_adapter
*adapter
)
1483 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
1487 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1488 err
= igb_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
1490 dev_err(&adapter
->pdev
->dev
,
1491 "Allocation for Tx Queue %u failed\n", i
);
1492 for (i
--; i
>= 0; i
--)
1493 igb_free_tx_resources(&adapter
->tx_ring
[i
]);
1498 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
1499 for (i
= 0; i
< IGB_MAX_TX_QUEUES
; i
++) {
1500 r_idx
= i
% adapter
->num_tx_queues
;
1501 adapter
->multi_tx_table
[i
] = &adapter
->tx_ring
[r_idx
];
1508 * igb_configure_tx - Configure transmit Unit after Reset
1509 * @adapter: board private structure
1511 * Configure the Tx unit of the MAC after a reset.
1513 static void igb_configure_tx(struct igb_adapter
*adapter
)
1516 struct e1000_hw
*hw
= &adapter
->hw
;
1521 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1522 struct igb_ring
*ring
= &(adapter
->tx_ring
[i
]);
1524 wr32(E1000_TDLEN(i
),
1525 ring
->count
* sizeof(struct e1000_tx_desc
));
1527 wr32(E1000_TDBAL(i
),
1528 tdba
& 0x00000000ffffffffULL
);
1529 wr32(E1000_TDBAH(i
), tdba
>> 32);
1531 tdwba
= ring
->dma
+ ring
->count
* sizeof(struct e1000_tx_desc
);
1532 tdwba
|= 1; /* enable head wb */
1533 wr32(E1000_TDWBAL(i
),
1534 tdwba
& 0x00000000ffffffffULL
);
1535 wr32(E1000_TDWBAH(i
), tdwba
>> 32);
1537 ring
->head
= E1000_TDH(i
);
1538 ring
->tail
= E1000_TDT(i
);
1539 writel(0, hw
->hw_addr
+ ring
->tail
);
1540 writel(0, hw
->hw_addr
+ ring
->head
);
1541 txdctl
= rd32(E1000_TXDCTL(i
));
1542 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
1543 wr32(E1000_TXDCTL(i
), txdctl
);
1545 /* Turn off Relaxed Ordering on head write-backs. The
1546 * writebacks MUST be delivered in order or it will
1547 * completely screw up our bookeeping.
1549 txctrl
= rd32(E1000_DCA_TXCTRL(i
));
1550 txctrl
&= ~E1000_DCA_TXCTRL_TX_WB_RO_EN
;
1551 wr32(E1000_DCA_TXCTRL(i
), txctrl
);
1556 /* Use the default values for the Tx Inter Packet Gap (IPG) timer */
1558 /* Program the Transmit Control Register */
1560 tctl
= rd32(E1000_TCTL
);
1561 tctl
&= ~E1000_TCTL_CT
;
1562 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
1563 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
1565 igb_config_collision_dist(hw
);
1567 /* Setup Transmit Descriptor Settings for eop descriptor */
1568 adapter
->txd_cmd
= E1000_TXD_CMD_EOP
| E1000_TXD_CMD_RS
;
1570 /* Enable transmits */
1571 tctl
|= E1000_TCTL_EN
;
1573 wr32(E1000_TCTL
, tctl
);
1577 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1578 * @adapter: board private structure
1579 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1581 * Returns 0 on success, negative on failure
1584 int igb_setup_rx_resources(struct igb_adapter
*adapter
,
1585 struct igb_ring
*rx_ring
)
1587 struct pci_dev
*pdev
= adapter
->pdev
;
1590 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
1591 rx_ring
->buffer_info
= vmalloc(size
);
1592 if (!rx_ring
->buffer_info
)
1594 memset(rx_ring
->buffer_info
, 0, size
);
1596 desc_len
= sizeof(union e1000_adv_rx_desc
);
1598 /* Round up to nearest 4K */
1599 rx_ring
->size
= rx_ring
->count
* desc_len
;
1600 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
1602 rx_ring
->desc
= pci_alloc_consistent(pdev
, rx_ring
->size
,
1608 rx_ring
->next_to_clean
= 0;
1609 rx_ring
->next_to_use
= 0;
1610 rx_ring
->pending_skb
= NULL
;
1612 rx_ring
->adapter
= adapter
;
1617 vfree(rx_ring
->buffer_info
);
1618 dev_err(&adapter
->pdev
->dev
, "Unable to allocate memory for "
1619 "the receive descriptor ring\n");
1624 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
1625 * (Descriptors) for all queues
1626 * @adapter: board private structure
1628 * Return 0 on success, negative on failure
1630 static int igb_setup_all_rx_resources(struct igb_adapter
*adapter
)
1634 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1635 err
= igb_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
1637 dev_err(&adapter
->pdev
->dev
,
1638 "Allocation for Rx Queue %u failed\n", i
);
1639 for (i
--; i
>= 0; i
--)
1640 igb_free_rx_resources(&adapter
->rx_ring
[i
]);
1649 * igb_setup_rctl - configure the receive control registers
1650 * @adapter: Board private structure
1652 static void igb_setup_rctl(struct igb_adapter
*adapter
)
1654 struct e1000_hw
*hw
= &adapter
->hw
;
1659 rctl
= rd32(E1000_RCTL
);
1661 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
1663 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
|
1664 E1000_RCTL_LBM_NO
| E1000_RCTL_RDMTS_HALF
|
1665 (adapter
->hw
.mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
1668 * enable stripping of CRC. It's unlikely this will break BMC
1669 * redirection as it did with e1000. Newer features require
1670 * that the HW strips the CRC.
1672 rctl
|= E1000_RCTL_SECRC
;
1674 rctl
&= ~E1000_RCTL_SBP
;
1676 if (adapter
->netdev
->mtu
<= ETH_DATA_LEN
)
1677 rctl
&= ~E1000_RCTL_LPE
;
1679 rctl
|= E1000_RCTL_LPE
;
1680 if (adapter
->rx_buffer_len
<= IGB_RXBUFFER_2048
) {
1681 /* Setup buffer sizes */
1682 rctl
&= ~E1000_RCTL_SZ_4096
;
1683 rctl
|= E1000_RCTL_BSEX
;
1684 switch (adapter
->rx_buffer_len
) {
1685 case IGB_RXBUFFER_256
:
1686 rctl
|= E1000_RCTL_SZ_256
;
1687 rctl
&= ~E1000_RCTL_BSEX
;
1689 case IGB_RXBUFFER_512
:
1690 rctl
|= E1000_RCTL_SZ_512
;
1691 rctl
&= ~E1000_RCTL_BSEX
;
1693 case IGB_RXBUFFER_1024
:
1694 rctl
|= E1000_RCTL_SZ_1024
;
1695 rctl
&= ~E1000_RCTL_BSEX
;
1697 case IGB_RXBUFFER_2048
:
1699 rctl
|= E1000_RCTL_SZ_2048
;
1700 rctl
&= ~E1000_RCTL_BSEX
;
1702 case IGB_RXBUFFER_4096
:
1703 rctl
|= E1000_RCTL_SZ_4096
;
1705 case IGB_RXBUFFER_8192
:
1706 rctl
|= E1000_RCTL_SZ_8192
;
1708 case IGB_RXBUFFER_16384
:
1709 rctl
|= E1000_RCTL_SZ_16384
;
1713 rctl
&= ~E1000_RCTL_BSEX
;
1714 srrctl
= adapter
->rx_buffer_len
>> E1000_SRRCTL_BSIZEPKT_SHIFT
;
1717 /* 82575 and greater support packet-split where the protocol
1718 * header is placed in skb->data and the packet data is
1719 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1720 * In the case of a non-split, skb->data is linearly filled,
1721 * followed by the page buffers. Therefore, skb->data is
1722 * sized to hold the largest protocol header.
1724 /* allocations using alloc_page take too long for regular MTU
1725 * so only enable packet split for jumbo frames */
1726 if (rctl
& E1000_RCTL_LPE
) {
1727 adapter
->rx_ps_hdr_size
= IGB_RXBUFFER_128
;
1728 srrctl
= adapter
->rx_ps_hdr_size
<<
1729 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
1730 /* buffer size is ALWAYS one page */
1731 srrctl
|= PAGE_SIZE
>> E1000_SRRCTL_BSIZEPKT_SHIFT
;
1732 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1734 adapter
->rx_ps_hdr_size
= 0;
1735 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1738 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1739 wr32(E1000_SRRCTL(i
), srrctl
);
1741 wr32(E1000_RCTL
, rctl
);
1745 * igb_configure_rx - Configure receive Unit after Reset
1746 * @adapter: board private structure
1748 * Configure the Rx unit of the MAC after a reset.
1750 static void igb_configure_rx(struct igb_adapter
*adapter
)
1753 struct e1000_hw
*hw
= &adapter
->hw
;
1758 /* disable receives while setting up the descriptors */
1759 rctl
= rd32(E1000_RCTL
);
1760 wr32(E1000_RCTL
, rctl
& ~E1000_RCTL_EN
);
1764 if (adapter
->itr_setting
> 3)
1766 1000000000 / (adapter
->itr
* 256));
1768 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1769 * the Base and Length of the Rx Descriptor Ring */
1770 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1771 struct igb_ring
*ring
= &(adapter
->rx_ring
[i
]);
1773 wr32(E1000_RDBAL(i
),
1774 rdba
& 0x00000000ffffffffULL
);
1775 wr32(E1000_RDBAH(i
), rdba
>> 32);
1776 wr32(E1000_RDLEN(i
),
1777 ring
->count
* sizeof(union e1000_adv_rx_desc
));
1779 ring
->head
= E1000_RDH(i
);
1780 ring
->tail
= E1000_RDT(i
);
1781 writel(0, hw
->hw_addr
+ ring
->tail
);
1782 writel(0, hw
->hw_addr
+ ring
->head
);
1784 rxdctl
= rd32(E1000_RXDCTL(i
));
1785 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
1786 rxdctl
&= 0xFFF00000;
1787 rxdctl
|= IGB_RX_PTHRESH
;
1788 rxdctl
|= IGB_RX_HTHRESH
<< 8;
1789 rxdctl
|= IGB_RX_WTHRESH
<< 16;
1790 wr32(E1000_RXDCTL(i
), rxdctl
);
1793 if (adapter
->num_rx_queues
> 1) {
1802 get_random_bytes(&random
[0], 40);
1805 for (j
= 0; j
< (32 * 4); j
++) {
1807 (j
% adapter
->num_rx_queues
) << shift
;
1810 hw
->hw_addr
+ E1000_RETA(0) + (j
& ~3));
1812 mrqc
= E1000_MRQC_ENABLE_RSS_4Q
;
1814 /* Fill out hash function seeds */
1815 for (j
= 0; j
< 10; j
++)
1816 array_wr32(E1000_RSSRK(0), j
, random
[j
]);
1818 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV4
|
1819 E1000_MRQC_RSS_FIELD_IPV4_TCP
);
1820 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV6
|
1821 E1000_MRQC_RSS_FIELD_IPV6_TCP
);
1822 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV4_UDP
|
1823 E1000_MRQC_RSS_FIELD_IPV6_UDP
);
1824 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX
|
1825 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX
);
1828 wr32(E1000_MRQC
, mrqc
);
1830 /* Multiqueue and raw packet checksumming are mutually
1831 * exclusive. Note that this not the same as TCP/IP
1832 * checksumming, which works fine. */
1833 rxcsum
= rd32(E1000_RXCSUM
);
1834 rxcsum
|= E1000_RXCSUM_PCSD
;
1835 wr32(E1000_RXCSUM
, rxcsum
);
1837 /* Enable Receive Checksum Offload for TCP and UDP */
1838 rxcsum
= rd32(E1000_RXCSUM
);
1839 if (adapter
->rx_csum
) {
1840 rxcsum
|= E1000_RXCSUM_TUOFL
;
1842 /* Enable IPv4 payload checksum for UDP fragments
1843 * Must be used in conjunction with packet-split. */
1844 if (adapter
->rx_ps_hdr_size
)
1845 rxcsum
|= E1000_RXCSUM_IPPCSE
;
1847 rxcsum
&= ~E1000_RXCSUM_TUOFL
;
1848 /* don't need to clear IPPCSE as it defaults to 0 */
1850 wr32(E1000_RXCSUM
, rxcsum
);
1855 adapter
->max_frame_size
+ VLAN_TAG_SIZE
);
1857 wr32(E1000_RLPML
, adapter
->max_frame_size
);
1859 /* Enable Receives */
1860 wr32(E1000_RCTL
, rctl
);
1864 * igb_free_tx_resources - Free Tx Resources per Queue
1865 * @adapter: board private structure
1866 * @tx_ring: Tx descriptor ring for a specific queue
1868 * Free all transmit software resources
1870 static void igb_free_tx_resources(struct igb_ring
*tx_ring
)
1872 struct pci_dev
*pdev
= tx_ring
->adapter
->pdev
;
1874 igb_clean_tx_ring(tx_ring
);
1876 vfree(tx_ring
->buffer_info
);
1877 tx_ring
->buffer_info
= NULL
;
1879 pci_free_consistent(pdev
, tx_ring
->size
, tx_ring
->desc
, tx_ring
->dma
);
1881 tx_ring
->desc
= NULL
;
1885 * igb_free_all_tx_resources - Free Tx Resources for All Queues
1886 * @adapter: board private structure
1888 * Free all transmit software resources
1890 static void igb_free_all_tx_resources(struct igb_adapter
*adapter
)
1894 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1895 igb_free_tx_resources(&adapter
->tx_ring
[i
]);
1898 static void igb_unmap_and_free_tx_resource(struct igb_adapter
*adapter
,
1899 struct igb_buffer
*buffer_info
)
1901 if (buffer_info
->dma
) {
1902 pci_unmap_page(adapter
->pdev
,
1904 buffer_info
->length
,
1906 buffer_info
->dma
= 0;
1908 if (buffer_info
->skb
) {
1909 dev_kfree_skb_any(buffer_info
->skb
);
1910 buffer_info
->skb
= NULL
;
1912 buffer_info
->time_stamp
= 0;
1913 /* buffer_info must be completely set up in the transmit path */
1917 * igb_clean_tx_ring - Free Tx Buffers
1918 * @adapter: board private structure
1919 * @tx_ring: ring to be cleaned
1921 static void igb_clean_tx_ring(struct igb_ring
*tx_ring
)
1923 struct igb_adapter
*adapter
= tx_ring
->adapter
;
1924 struct igb_buffer
*buffer_info
;
1928 if (!tx_ring
->buffer_info
)
1930 /* Free all the Tx ring sk_buffs */
1932 for (i
= 0; i
< tx_ring
->count
; i
++) {
1933 buffer_info
= &tx_ring
->buffer_info
[i
];
1934 igb_unmap_and_free_tx_resource(adapter
, buffer_info
);
1937 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
1938 memset(tx_ring
->buffer_info
, 0, size
);
1940 /* Zero out the descriptor ring */
1942 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1944 tx_ring
->next_to_use
= 0;
1945 tx_ring
->next_to_clean
= 0;
1947 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1948 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1952 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
1953 * @adapter: board private structure
1955 static void igb_clean_all_tx_rings(struct igb_adapter
*adapter
)
1959 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1960 igb_clean_tx_ring(&adapter
->tx_ring
[i
]);
1964 * igb_free_rx_resources - Free Rx Resources
1965 * @adapter: board private structure
1966 * @rx_ring: ring to clean the resources from
1968 * Free all receive software resources
1970 static void igb_free_rx_resources(struct igb_ring
*rx_ring
)
1972 struct pci_dev
*pdev
= rx_ring
->adapter
->pdev
;
1974 igb_clean_rx_ring(rx_ring
);
1976 vfree(rx_ring
->buffer_info
);
1977 rx_ring
->buffer_info
= NULL
;
1979 pci_free_consistent(pdev
, rx_ring
->size
, rx_ring
->desc
, rx_ring
->dma
);
1981 rx_ring
->desc
= NULL
;
1985 * igb_free_all_rx_resources - Free Rx Resources for All Queues
1986 * @adapter: board private structure
1988 * Free all receive software resources
1990 static void igb_free_all_rx_resources(struct igb_adapter
*adapter
)
1994 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1995 igb_free_rx_resources(&adapter
->rx_ring
[i
]);
1999 * igb_clean_rx_ring - Free Rx Buffers per Queue
2000 * @adapter: board private structure
2001 * @rx_ring: ring to free buffers from
2003 static void igb_clean_rx_ring(struct igb_ring
*rx_ring
)
2005 struct igb_adapter
*adapter
= rx_ring
->adapter
;
2006 struct igb_buffer
*buffer_info
;
2007 struct pci_dev
*pdev
= adapter
->pdev
;
2011 if (!rx_ring
->buffer_info
)
2013 /* Free all the Rx ring sk_buffs */
2014 for (i
= 0; i
< rx_ring
->count
; i
++) {
2015 buffer_info
= &rx_ring
->buffer_info
[i
];
2016 if (buffer_info
->dma
) {
2017 if (adapter
->rx_ps_hdr_size
)
2018 pci_unmap_single(pdev
, buffer_info
->dma
,
2019 adapter
->rx_ps_hdr_size
,
2020 PCI_DMA_FROMDEVICE
);
2022 pci_unmap_single(pdev
, buffer_info
->dma
,
2023 adapter
->rx_buffer_len
,
2024 PCI_DMA_FROMDEVICE
);
2025 buffer_info
->dma
= 0;
2028 if (buffer_info
->skb
) {
2029 dev_kfree_skb(buffer_info
->skb
);
2030 buffer_info
->skb
= NULL
;
2032 if (buffer_info
->page
) {
2033 pci_unmap_page(pdev
, buffer_info
->page_dma
,
2034 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
2035 put_page(buffer_info
->page
);
2036 buffer_info
->page
= NULL
;
2037 buffer_info
->page_dma
= 0;
2041 /* there also may be some cached data from a chained receive */
2042 if (rx_ring
->pending_skb
) {
2043 dev_kfree_skb(rx_ring
->pending_skb
);
2044 rx_ring
->pending_skb
= NULL
;
2047 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
2048 memset(rx_ring
->buffer_info
, 0, size
);
2050 /* Zero out the descriptor ring */
2051 memset(rx_ring
->desc
, 0, rx_ring
->size
);
2053 rx_ring
->next_to_clean
= 0;
2054 rx_ring
->next_to_use
= 0;
2056 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
2057 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
2061 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2062 * @adapter: board private structure
2064 static void igb_clean_all_rx_rings(struct igb_adapter
*adapter
)
2068 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2069 igb_clean_rx_ring(&adapter
->rx_ring
[i
]);
2073 * igb_set_mac - Change the Ethernet Address of the NIC
2074 * @netdev: network interface device structure
2075 * @p: pointer to an address structure
2077 * Returns 0 on success, negative on failure
2079 static int igb_set_mac(struct net_device
*netdev
, void *p
)
2081 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2082 struct sockaddr
*addr
= p
;
2084 if (!is_valid_ether_addr(addr
->sa_data
))
2085 return -EADDRNOTAVAIL
;
2087 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2088 memcpy(adapter
->hw
.mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
2090 adapter
->hw
.mac
.ops
.rar_set(&adapter
->hw
, adapter
->hw
.mac
.addr
, 0);
2096 * igb_set_multi - Multicast and Promiscuous mode set
2097 * @netdev: network interface device structure
2099 * The set_multi entry point is called whenever the multicast address
2100 * list or the network interface flags are updated. This routine is
2101 * responsible for configuring the hardware for proper multicast,
2102 * promiscuous mode, and all-multi behavior.
2104 static void igb_set_multi(struct net_device
*netdev
)
2106 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2107 struct e1000_hw
*hw
= &adapter
->hw
;
2108 struct e1000_mac_info
*mac
= &hw
->mac
;
2109 struct dev_mc_list
*mc_ptr
;
2114 /* Check for Promiscuous and All Multicast modes */
2116 rctl
= rd32(E1000_RCTL
);
2118 if (netdev
->flags
& IFF_PROMISC
)
2119 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2120 else if (netdev
->flags
& IFF_ALLMULTI
) {
2121 rctl
|= E1000_RCTL_MPE
;
2122 rctl
&= ~E1000_RCTL_UPE
;
2124 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2126 wr32(E1000_RCTL
, rctl
);
2128 if (!netdev
->mc_count
) {
2129 /* nothing to program, so clear mc list */
2130 igb_update_mc_addr_list(hw
, NULL
, 0, 1,
2131 mac
->rar_entry_count
);
2135 mta_list
= kzalloc(netdev
->mc_count
* 6, GFP_ATOMIC
);
2139 /* The shared function expects a packed array of only addresses. */
2140 mc_ptr
= netdev
->mc_list
;
2142 for (i
= 0; i
< netdev
->mc_count
; i
++) {
2145 memcpy(mta_list
+ (i
*ETH_ALEN
), mc_ptr
->dmi_addr
, ETH_ALEN
);
2146 mc_ptr
= mc_ptr
->next
;
2148 igb_update_mc_addr_list(hw
, mta_list
, i
, 1, mac
->rar_entry_count
);
2152 /* Need to wait a few seconds after link up to get diagnostic information from
2154 static void igb_update_phy_info(unsigned long data
)
2156 struct igb_adapter
*adapter
= (struct igb_adapter
*) data
;
2157 if (adapter
->hw
.phy
.ops
.get_phy_info
)
2158 adapter
->hw
.phy
.ops
.get_phy_info(&adapter
->hw
);
2162 * igb_watchdog - Timer Call-back
2163 * @data: pointer to adapter cast into an unsigned long
2165 static void igb_watchdog(unsigned long data
)
2167 struct igb_adapter
*adapter
= (struct igb_adapter
*)data
;
2168 /* Do the rest outside of interrupt context */
2169 schedule_work(&adapter
->watchdog_task
);
2172 static void igb_watchdog_task(struct work_struct
*work
)
2174 struct igb_adapter
*adapter
= container_of(work
,
2175 struct igb_adapter
, watchdog_task
);
2176 struct e1000_hw
*hw
= &adapter
->hw
;
2178 struct net_device
*netdev
= adapter
->netdev
;
2179 struct igb_ring
*tx_ring
= adapter
->tx_ring
;
2180 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
2183 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
2187 if ((netif_carrier_ok(netdev
)) &&
2188 (rd32(E1000_STATUS
) & E1000_STATUS_LU
))
2191 ret_val
= hw
->mac
.ops
.check_for_link(&adapter
->hw
);
2192 if ((ret_val
== E1000_ERR_PHY
) &&
2193 (hw
->phy
.type
== e1000_phy_igp_3
) &&
2195 E1000_PHY_CTRL_GBE_DISABLE
))
2196 dev_info(&adapter
->pdev
->dev
,
2197 "Gigabit has been disabled, downgrading speed\n");
2199 if ((hw
->phy
.media_type
== e1000_media_type_internal_serdes
) &&
2200 !(rd32(E1000_TXCW
) & E1000_TXCW_ANE
))
2201 link
= mac
->serdes_has_link
;
2203 link
= rd32(E1000_STATUS
) &
2207 if (!netif_carrier_ok(netdev
)) {
2209 hw
->mac
.ops
.get_speed_and_duplex(&adapter
->hw
,
2210 &adapter
->link_speed
,
2211 &adapter
->link_duplex
);
2213 ctrl
= rd32(E1000_CTRL
);
2214 dev_info(&adapter
->pdev
->dev
,
2215 "NIC Link is Up %d Mbps %s, "
2216 "Flow Control: %s\n",
2217 adapter
->link_speed
,
2218 adapter
->link_duplex
== FULL_DUPLEX
?
2219 "Full Duplex" : "Half Duplex",
2220 ((ctrl
& E1000_CTRL_TFCE
) && (ctrl
&
2221 E1000_CTRL_RFCE
)) ? "RX/TX" : ((ctrl
&
2222 E1000_CTRL_RFCE
) ? "RX" : ((ctrl
&
2223 E1000_CTRL_TFCE
) ? "TX" : "None")));
2225 /* tweak tx_queue_len according to speed/duplex and
2226 * adjust the timeout factor */
2227 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
2228 adapter
->tx_timeout_factor
= 1;
2229 switch (adapter
->link_speed
) {
2231 netdev
->tx_queue_len
= 10;
2232 adapter
->tx_timeout_factor
= 14;
2235 netdev
->tx_queue_len
= 100;
2236 /* maybe add some timeout factor ? */
2240 netif_carrier_on(netdev
);
2241 netif_wake_queue(netdev
);
2242 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
2243 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2244 netif_wake_subqueue(netdev
, i
);
2247 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
2248 mod_timer(&adapter
->phy_info_timer
,
2249 round_jiffies(jiffies
+ 2 * HZ
));
2252 if (netif_carrier_ok(netdev
)) {
2253 adapter
->link_speed
= 0;
2254 adapter
->link_duplex
= 0;
2255 dev_info(&adapter
->pdev
->dev
, "NIC Link is Down\n");
2256 netif_carrier_off(netdev
);
2257 netif_stop_queue(netdev
);
2258 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
2259 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2260 netif_stop_subqueue(netdev
, i
);
2262 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
2263 mod_timer(&adapter
->phy_info_timer
,
2264 round_jiffies(jiffies
+ 2 * HZ
));
2269 igb_update_stats(adapter
);
2271 mac
->tx_packet_delta
= adapter
->stats
.tpt
- adapter
->tpt_old
;
2272 adapter
->tpt_old
= adapter
->stats
.tpt
;
2273 mac
->collision_delta
= adapter
->stats
.colc
- adapter
->colc_old
;
2274 adapter
->colc_old
= adapter
->stats
.colc
;
2276 adapter
->gorc
= adapter
->stats
.gorc
- adapter
->gorc_old
;
2277 adapter
->gorc_old
= adapter
->stats
.gorc
;
2278 adapter
->gotc
= adapter
->stats
.gotc
- adapter
->gotc_old
;
2279 adapter
->gotc_old
= adapter
->stats
.gotc
;
2281 igb_update_adaptive(&adapter
->hw
);
2283 if (!netif_carrier_ok(netdev
)) {
2284 if (IGB_DESC_UNUSED(tx_ring
) + 1 < tx_ring
->count
) {
2285 /* We've lost link, so the controller stops DMA,
2286 * but we've got queued Tx work that's never going
2287 * to get done, so reset controller to flush Tx.
2288 * (Do the reset outside of interrupt context). */
2289 adapter
->tx_timeout_count
++;
2290 schedule_work(&adapter
->reset_task
);
2294 /* Cause software interrupt to ensure rx ring is cleaned */
2295 wr32(E1000_ICS
, E1000_ICS_RXDMT0
);
2297 /* Force detection of hung controller every watchdog period */
2298 tx_ring
->detect_tx_hung
= true;
2300 /* Reset the timer */
2301 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
2302 mod_timer(&adapter
->watchdog_timer
,
2303 round_jiffies(jiffies
+ 2 * HZ
));
2306 enum latency_range
{
2310 latency_invalid
= 255
2314 static void igb_lower_rx_eitr(struct igb_adapter
*adapter
,
2315 struct igb_ring
*rx_ring
)
2317 struct e1000_hw
*hw
= &adapter
->hw
;
2320 new_val
= rx_ring
->itr_val
/ 2;
2321 if (new_val
< IGB_MIN_DYN_ITR
)
2322 new_val
= IGB_MIN_DYN_ITR
;
2324 if (new_val
!= rx_ring
->itr_val
) {
2325 rx_ring
->itr_val
= new_val
;
2326 wr32(rx_ring
->itr_register
,
2327 1000000000 / (new_val
* 256));
2331 static void igb_raise_rx_eitr(struct igb_adapter
*adapter
,
2332 struct igb_ring
*rx_ring
)
2334 struct e1000_hw
*hw
= &adapter
->hw
;
2337 new_val
= rx_ring
->itr_val
* 2;
2338 if (new_val
> IGB_MAX_DYN_ITR
)
2339 new_val
= IGB_MAX_DYN_ITR
;
2341 if (new_val
!= rx_ring
->itr_val
) {
2342 rx_ring
->itr_val
= new_val
;
2343 wr32(rx_ring
->itr_register
,
2344 1000000000 / (new_val
* 256));
2349 * igb_update_itr - update the dynamic ITR value based on statistics
2350 * Stores a new ITR value based on packets and byte
2351 * counts during the last interrupt. The advantage of per interrupt
2352 * computation is faster updates and more accurate ITR for the current
2353 * traffic pattern. Constants in this function were computed
2354 * based on theoretical maximum wire speed and thresholds were set based
2355 * on testing data as well as attempting to minimize response time
2356 * while increasing bulk throughput.
2357 * this functionality is controlled by the InterruptThrottleRate module
2358 * parameter (see igb_param.c)
2359 * NOTE: These calculations are only valid when operating in a single-
2360 * queue environment.
2361 * @adapter: pointer to adapter
2362 * @itr_setting: current adapter->itr
2363 * @packets: the number of packets during this measurement interval
2364 * @bytes: the number of bytes during this measurement interval
2366 static unsigned int igb_update_itr(struct igb_adapter
*adapter
, u16 itr_setting
,
2367 int packets
, int bytes
)
2369 unsigned int retval
= itr_setting
;
2372 goto update_itr_done
;
2374 switch (itr_setting
) {
2375 case lowest_latency
:
2376 /* handle TSO and jumbo frames */
2377 if (bytes
/packets
> 8000)
2378 retval
= bulk_latency
;
2379 else if ((packets
< 5) && (bytes
> 512))
2380 retval
= low_latency
;
2382 case low_latency
: /* 50 usec aka 20000 ints/s */
2383 if (bytes
> 10000) {
2384 /* this if handles the TSO accounting */
2385 if (bytes
/packets
> 8000) {
2386 retval
= bulk_latency
;
2387 } else if ((packets
< 10) || ((bytes
/packets
) > 1200)) {
2388 retval
= bulk_latency
;
2389 } else if ((packets
> 35)) {
2390 retval
= lowest_latency
;
2392 } else if (bytes
/packets
> 2000) {
2393 retval
= bulk_latency
;
2394 } else if (packets
<= 2 && bytes
< 512) {
2395 retval
= lowest_latency
;
2398 case bulk_latency
: /* 250 usec aka 4000 ints/s */
2399 if (bytes
> 25000) {
2401 retval
= low_latency
;
2402 } else if (bytes
< 6000) {
2403 retval
= low_latency
;
2412 static void igb_set_itr(struct igb_adapter
*adapter
, u16 itr_register
,
2416 u32 new_itr
= adapter
->itr
;
2418 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2419 if (adapter
->link_speed
!= SPEED_1000
) {
2425 adapter
->rx_itr
= igb_update_itr(adapter
,
2427 adapter
->rx_ring
->total_packets
,
2428 adapter
->rx_ring
->total_bytes
);
2429 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2430 if (adapter
->itr_setting
== 3 && adapter
->rx_itr
== lowest_latency
)
2431 adapter
->rx_itr
= low_latency
;
2434 adapter
->tx_itr
= igb_update_itr(adapter
,
2436 adapter
->tx_ring
->total_packets
,
2437 adapter
->tx_ring
->total_bytes
);
2438 /* conservative mode (itr 3) eliminates the
2439 * lowest_latency setting */
2440 if (adapter
->itr_setting
== 3 &&
2441 adapter
->tx_itr
== lowest_latency
)
2442 adapter
->tx_itr
= low_latency
;
2444 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
2446 current_itr
= adapter
->rx_itr
;
2449 switch (current_itr
) {
2450 /* counts and packets in update_itr are dependent on these numbers */
2451 case lowest_latency
:
2455 new_itr
= 20000; /* aka hwitr = ~200 */
2465 if (new_itr
!= adapter
->itr
) {
2466 /* this attempts to bias the interrupt rate towards Bulk
2467 * by adding intermediate steps when interrupt rate is
2469 new_itr
= new_itr
> adapter
->itr
?
2470 min(adapter
->itr
+ (new_itr
>> 2), new_itr
) :
2472 /* Don't write the value here; it resets the adapter's
2473 * internal timer, and causes us to delay far longer than
2474 * we should between interrupts. Instead, we write the ITR
2475 * value at the beginning of the next interrupt so the timing
2476 * ends up being correct.
2478 adapter
->itr
= new_itr
;
2479 adapter
->set_itr
= 1;
2486 #define IGB_TX_FLAGS_CSUM 0x00000001
2487 #define IGB_TX_FLAGS_VLAN 0x00000002
2488 #define IGB_TX_FLAGS_TSO 0x00000004
2489 #define IGB_TX_FLAGS_IPV4 0x00000008
2490 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
2491 #define IGB_TX_FLAGS_VLAN_SHIFT 16
2493 static inline int igb_tso_adv(struct igb_adapter
*adapter
,
2494 struct igb_ring
*tx_ring
,
2495 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
2497 struct e1000_adv_tx_context_desc
*context_desc
;
2500 struct igb_buffer
*buffer_info
;
2501 u32 info
= 0, tu_cmd
= 0;
2502 u32 mss_l4len_idx
, l4len
;
2505 if (skb_header_cloned(skb
)) {
2506 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2511 l4len
= tcp_hdrlen(skb
);
2514 if (skb
->protocol
== htons(ETH_P_IP
)) {
2515 struct iphdr
*iph
= ip_hdr(skb
);
2518 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2522 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
2523 ipv6_hdr(skb
)->payload_len
= 0;
2524 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2525 &ipv6_hdr(skb
)->daddr
,
2529 i
= tx_ring
->next_to_use
;
2531 buffer_info
= &tx_ring
->buffer_info
[i
];
2532 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
2533 /* VLAN MACLEN IPLEN */
2534 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
2535 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
2536 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
2537 *hdr_len
+= skb_network_offset(skb
);
2538 info
|= skb_network_header_len(skb
);
2539 *hdr_len
+= skb_network_header_len(skb
);
2540 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
2542 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2543 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
2545 if (skb
->protocol
== htons(ETH_P_IP
))
2546 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
2547 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2549 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
2552 mss_l4len_idx
= (skb_shinfo(skb
)->gso_size
<< E1000_ADVTXD_MSS_SHIFT
);
2553 mss_l4len_idx
|= (l4len
<< E1000_ADVTXD_L4LEN_SHIFT
);
2555 /* Context index must be unique per ring. Luckily, so is the interrupt
2557 mss_l4len_idx
|= tx_ring
->eims_value
>> 4;
2559 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
2560 context_desc
->seqnum_seed
= 0;
2562 buffer_info
->time_stamp
= jiffies
;
2563 buffer_info
->dma
= 0;
2565 if (i
== tx_ring
->count
)
2568 tx_ring
->next_to_use
= i
;
2573 static inline bool igb_tx_csum_adv(struct igb_adapter
*adapter
,
2574 struct igb_ring
*tx_ring
,
2575 struct sk_buff
*skb
, u32 tx_flags
)
2577 struct e1000_adv_tx_context_desc
*context_desc
;
2579 struct igb_buffer
*buffer_info
;
2580 u32 info
= 0, tu_cmd
= 0;
2582 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
2583 (tx_flags
& IGB_TX_FLAGS_VLAN
)) {
2584 i
= tx_ring
->next_to_use
;
2585 buffer_info
= &tx_ring
->buffer_info
[i
];
2586 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
2588 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
2589 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
2590 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
2591 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2592 info
|= skb_network_header_len(skb
);
2594 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
2596 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
2598 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2599 switch (skb
->protocol
) {
2600 case __constant_htons(ETH_P_IP
):
2601 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
2602 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2603 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2605 case __constant_htons(ETH_P_IPV6
):
2606 /* XXX what about other V6 headers?? */
2607 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2608 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
2611 if (unlikely(net_ratelimit()))
2612 dev_warn(&adapter
->pdev
->dev
,
2613 "partial checksum but proto=%x!\n",
2619 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
2620 context_desc
->seqnum_seed
= 0;
2621 context_desc
->mss_l4len_idx
=
2622 cpu_to_le32(tx_ring
->queue_index
<< 4);
2624 buffer_info
->time_stamp
= jiffies
;
2625 buffer_info
->dma
= 0;
2628 if (i
== tx_ring
->count
)
2630 tx_ring
->next_to_use
= i
;
2639 #define IGB_MAX_TXD_PWR 16
2640 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
2642 static inline int igb_tx_map_adv(struct igb_adapter
*adapter
,
2643 struct igb_ring
*tx_ring
,
2644 struct sk_buff
*skb
)
2646 struct igb_buffer
*buffer_info
;
2647 unsigned int len
= skb_headlen(skb
);
2648 unsigned int count
= 0, i
;
2651 i
= tx_ring
->next_to_use
;
2653 buffer_info
= &tx_ring
->buffer_info
[i
];
2654 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
2655 buffer_info
->length
= len
;
2656 /* set time_stamp *before* dma to help avoid a possible race */
2657 buffer_info
->time_stamp
= jiffies
;
2658 buffer_info
->dma
= pci_map_single(adapter
->pdev
, skb
->data
, len
,
2662 if (i
== tx_ring
->count
)
2665 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
2666 struct skb_frag_struct
*frag
;
2668 frag
= &skb_shinfo(skb
)->frags
[f
];
2671 buffer_info
= &tx_ring
->buffer_info
[i
];
2672 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
2673 buffer_info
->length
= len
;
2674 buffer_info
->time_stamp
= jiffies
;
2675 buffer_info
->dma
= pci_map_page(adapter
->pdev
,
2683 if (i
== tx_ring
->count
)
2687 i
= (i
== 0) ? tx_ring
->count
- 1 : i
- 1;
2688 tx_ring
->buffer_info
[i
].skb
= skb
;
2693 static inline void igb_tx_queue_adv(struct igb_adapter
*adapter
,
2694 struct igb_ring
*tx_ring
,
2695 int tx_flags
, int count
, u32 paylen
,
2698 union e1000_adv_tx_desc
*tx_desc
= NULL
;
2699 struct igb_buffer
*buffer_info
;
2700 u32 olinfo_status
= 0, cmd_type_len
;
2703 cmd_type_len
= (E1000_ADVTXD_DTYP_DATA
| E1000_ADVTXD_DCMD_IFCS
|
2704 E1000_ADVTXD_DCMD_DEXT
);
2706 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
2707 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
2709 if (tx_flags
& IGB_TX_FLAGS_TSO
) {
2710 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
2712 /* insert tcp checksum */
2713 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
2715 /* insert ip checksum */
2716 if (tx_flags
& IGB_TX_FLAGS_IPV4
)
2717 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
2719 } else if (tx_flags
& IGB_TX_FLAGS_CSUM
) {
2720 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
2723 if (tx_flags
& (IGB_TX_FLAGS_CSUM
| IGB_TX_FLAGS_TSO
|
2725 olinfo_status
|= tx_ring
->queue_index
<< 4;
2727 olinfo_status
|= ((paylen
- hdr_len
) << E1000_ADVTXD_PAYLEN_SHIFT
);
2729 i
= tx_ring
->next_to_use
;
2731 buffer_info
= &tx_ring
->buffer_info
[i
];
2732 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
2733 tx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
2734 tx_desc
->read
.cmd_type_len
=
2735 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
2736 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
2738 if (i
== tx_ring
->count
)
2742 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(adapter
->txd_cmd
);
2743 /* Force memory writes to complete before letting h/w
2744 * know there are new descriptors to fetch. (Only
2745 * applicable for weak-ordered memory model archs,
2746 * such as IA-64). */
2749 tx_ring
->next_to_use
= i
;
2750 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
2751 /* we need this if more than one processor can write to our tail
2752 * at a time, it syncronizes IO on IA64/Altix systems */
2756 static int __igb_maybe_stop_tx(struct net_device
*netdev
,
2757 struct igb_ring
*tx_ring
, int size
)
2759 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2761 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
2762 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
2764 netif_stop_queue(netdev
);
2767 /* Herbert's original patch had:
2768 * smp_mb__after_netif_stop_queue();
2769 * but since that doesn't exist yet, just open code it. */
2772 /* We need to check again in a case another CPU has just
2773 * made room available. */
2774 if (IGB_DESC_UNUSED(tx_ring
) < size
)
2778 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
2779 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
2781 netif_wake_queue(netdev
);
2783 ++adapter
->restart_queue
;
2787 static int igb_maybe_stop_tx(struct net_device
*netdev
,
2788 struct igb_ring
*tx_ring
, int size
)
2790 if (IGB_DESC_UNUSED(tx_ring
) >= size
)
2792 return __igb_maybe_stop_tx(netdev
, tx_ring
, size
);
2795 #define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
2797 static int igb_xmit_frame_ring_adv(struct sk_buff
*skb
,
2798 struct net_device
*netdev
,
2799 struct igb_ring
*tx_ring
)
2801 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2802 unsigned int tx_flags
= 0;
2807 len
= skb_headlen(skb
);
2809 if (test_bit(__IGB_DOWN
, &adapter
->state
)) {
2810 dev_kfree_skb_any(skb
);
2811 return NETDEV_TX_OK
;
2814 if (skb
->len
<= 0) {
2815 dev_kfree_skb_any(skb
);
2816 return NETDEV_TX_OK
;
2819 /* need: 1 descriptor per page,
2820 * + 2 desc gap to keep tail from touching head,
2821 * + 1 desc for skb->data,
2822 * + 1 desc for context descriptor,
2823 * otherwise try next time */
2824 if (igb_maybe_stop_tx(netdev
, tx_ring
, skb_shinfo(skb
)->nr_frags
+ 4)) {
2825 /* this is a hard error */
2826 return NETDEV_TX_BUSY
;
2829 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2830 tx_flags
|= IGB_TX_FLAGS_VLAN
;
2831 tx_flags
|= (vlan_tx_tag_get(skb
) << IGB_TX_FLAGS_VLAN_SHIFT
);
2834 if (skb
->protocol
== htons(ETH_P_IP
))
2835 tx_flags
|= IGB_TX_FLAGS_IPV4
;
2837 tso
= skb_is_gso(skb
) ? igb_tso_adv(adapter
, tx_ring
, skb
, tx_flags
,
2841 dev_kfree_skb_any(skb
);
2842 return NETDEV_TX_OK
;
2846 tx_flags
|= IGB_TX_FLAGS_TSO
;
2847 else if (igb_tx_csum_adv(adapter
, tx_ring
, skb
, tx_flags
))
2848 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2849 tx_flags
|= IGB_TX_FLAGS_CSUM
;
2851 igb_tx_queue_adv(adapter
, tx_ring
, tx_flags
,
2852 igb_tx_map_adv(adapter
, tx_ring
, skb
),
2855 netdev
->trans_start
= jiffies
;
2857 /* Make sure there is space in the ring for the next send. */
2858 igb_maybe_stop_tx(netdev
, tx_ring
, MAX_SKB_FRAGS
+ 4);
2860 return NETDEV_TX_OK
;
2863 static int igb_xmit_frame_adv(struct sk_buff
*skb
, struct net_device
*netdev
)
2865 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2866 struct igb_ring
*tx_ring
;
2868 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
2870 r_idx
= skb
->queue_mapping
& (IGB_MAX_TX_QUEUES
- 1);
2871 tx_ring
= adapter
->multi_tx_table
[r_idx
];
2873 tx_ring
= &adapter
->tx_ring
[0];
2877 /* This goes back to the question of how to logically map a tx queue
2878 * to a flow. Right now, performance is impacted slightly negatively
2879 * if using multiple tx queues. If the stack breaks away from a
2880 * single qdisc implementation, we can look at this again. */
2881 return (igb_xmit_frame_ring_adv(skb
, netdev
, tx_ring
));
2885 * igb_tx_timeout - Respond to a Tx Hang
2886 * @netdev: network interface device structure
2888 static void igb_tx_timeout(struct net_device
*netdev
)
2890 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2891 struct e1000_hw
*hw
= &adapter
->hw
;
2893 /* Do the reset outside of interrupt context */
2894 adapter
->tx_timeout_count
++;
2895 schedule_work(&adapter
->reset_task
);
2896 wr32(E1000_EICS
, adapter
->eims_enable_mask
&
2897 ~(E1000_EIMS_TCP_TIMER
| E1000_EIMS_OTHER
));
2900 static void igb_reset_task(struct work_struct
*work
)
2902 struct igb_adapter
*adapter
;
2903 adapter
= container_of(work
, struct igb_adapter
, reset_task
);
2905 igb_reinit_locked(adapter
);
2909 * igb_get_stats - Get System Network Statistics
2910 * @netdev: network interface device structure
2912 * Returns the address of the device statistics structure.
2913 * The statistics are actually updated from the timer callback.
2915 static struct net_device_stats
*
2916 igb_get_stats(struct net_device
*netdev
)
2918 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2920 /* only return the current stats */
2921 return &adapter
->net_stats
;
2925 * igb_change_mtu - Change the Maximum Transfer Unit
2926 * @netdev: network interface device structure
2927 * @new_mtu: new value for maximum frame size
2929 * Returns 0 on success, negative on failure
2931 static int igb_change_mtu(struct net_device
*netdev
, int new_mtu
)
2933 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2934 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
2936 if ((max_frame
< ETH_ZLEN
+ ETH_FCS_LEN
) ||
2937 (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
2938 dev_err(&adapter
->pdev
->dev
, "Invalid MTU setting\n");
2942 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2943 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
2944 dev_err(&adapter
->pdev
->dev
, "MTU > 9216 not supported.\n");
2948 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
2950 /* igb_down has a dependency on max_frame_size */
2951 adapter
->max_frame_size
= max_frame
;
2952 if (netif_running(netdev
))
2955 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2956 * means we reserve 2 more, this pushes us to allocate from the next
2958 * i.e. RXBUFFER_2048 --> size-4096 slab
2961 if (max_frame
<= IGB_RXBUFFER_256
)
2962 adapter
->rx_buffer_len
= IGB_RXBUFFER_256
;
2963 else if (max_frame
<= IGB_RXBUFFER_512
)
2964 adapter
->rx_buffer_len
= IGB_RXBUFFER_512
;
2965 else if (max_frame
<= IGB_RXBUFFER_1024
)
2966 adapter
->rx_buffer_len
= IGB_RXBUFFER_1024
;
2967 else if (max_frame
<= IGB_RXBUFFER_2048
)
2968 adapter
->rx_buffer_len
= IGB_RXBUFFER_2048
;
2970 adapter
->rx_buffer_len
= IGB_RXBUFFER_4096
;
2971 /* adjust allocation if LPE protects us, and we aren't using SBP */
2972 if ((max_frame
== ETH_FRAME_LEN
+ ETH_FCS_LEN
) ||
2973 (max_frame
== MAXIMUM_ETHERNET_VLAN_SIZE
))
2974 adapter
->rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
2976 dev_info(&adapter
->pdev
->dev
, "changing MTU from %d to %d\n",
2977 netdev
->mtu
, new_mtu
);
2978 netdev
->mtu
= new_mtu
;
2980 if (netif_running(netdev
))
2985 clear_bit(__IGB_RESETTING
, &adapter
->state
);
2991 * igb_update_stats - Update the board statistics counters
2992 * @adapter: board private structure
2995 void igb_update_stats(struct igb_adapter
*adapter
)
2997 struct e1000_hw
*hw
= &adapter
->hw
;
2998 struct pci_dev
*pdev
= adapter
->pdev
;
3001 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3004 * Prevent stats update while adapter is being reset, or if the pci
3005 * connection is down.
3007 if (adapter
->link_speed
== 0)
3009 if (pci_channel_offline(pdev
))
3012 adapter
->stats
.crcerrs
+= rd32(E1000_CRCERRS
);
3013 adapter
->stats
.gprc
+= rd32(E1000_GPRC
);
3014 adapter
->stats
.gorc
+= rd32(E1000_GORCL
);
3015 rd32(E1000_GORCH
); /* clear GORCL */
3016 adapter
->stats
.bprc
+= rd32(E1000_BPRC
);
3017 adapter
->stats
.mprc
+= rd32(E1000_MPRC
);
3018 adapter
->stats
.roc
+= rd32(E1000_ROC
);
3020 adapter
->stats
.prc64
+= rd32(E1000_PRC64
);
3021 adapter
->stats
.prc127
+= rd32(E1000_PRC127
);
3022 adapter
->stats
.prc255
+= rd32(E1000_PRC255
);
3023 adapter
->stats
.prc511
+= rd32(E1000_PRC511
);
3024 adapter
->stats
.prc1023
+= rd32(E1000_PRC1023
);
3025 adapter
->stats
.prc1522
+= rd32(E1000_PRC1522
);
3026 adapter
->stats
.symerrs
+= rd32(E1000_SYMERRS
);
3027 adapter
->stats
.sec
+= rd32(E1000_SEC
);
3029 adapter
->stats
.mpc
+= rd32(E1000_MPC
);
3030 adapter
->stats
.scc
+= rd32(E1000_SCC
);
3031 adapter
->stats
.ecol
+= rd32(E1000_ECOL
);
3032 adapter
->stats
.mcc
+= rd32(E1000_MCC
);
3033 adapter
->stats
.latecol
+= rd32(E1000_LATECOL
);
3034 adapter
->stats
.dc
+= rd32(E1000_DC
);
3035 adapter
->stats
.rlec
+= rd32(E1000_RLEC
);
3036 adapter
->stats
.xonrxc
+= rd32(E1000_XONRXC
);
3037 adapter
->stats
.xontxc
+= rd32(E1000_XONTXC
);
3038 adapter
->stats
.xoffrxc
+= rd32(E1000_XOFFRXC
);
3039 adapter
->stats
.xofftxc
+= rd32(E1000_XOFFTXC
);
3040 adapter
->stats
.fcruc
+= rd32(E1000_FCRUC
);
3041 adapter
->stats
.gptc
+= rd32(E1000_GPTC
);
3042 adapter
->stats
.gotc
+= rd32(E1000_GOTCL
);
3043 rd32(E1000_GOTCH
); /* clear GOTCL */
3044 adapter
->stats
.rnbc
+= rd32(E1000_RNBC
);
3045 adapter
->stats
.ruc
+= rd32(E1000_RUC
);
3046 adapter
->stats
.rfc
+= rd32(E1000_RFC
);
3047 adapter
->stats
.rjc
+= rd32(E1000_RJC
);
3048 adapter
->stats
.tor
+= rd32(E1000_TORH
);
3049 adapter
->stats
.tot
+= rd32(E1000_TOTH
);
3050 adapter
->stats
.tpr
+= rd32(E1000_TPR
);
3052 adapter
->stats
.ptc64
+= rd32(E1000_PTC64
);
3053 adapter
->stats
.ptc127
+= rd32(E1000_PTC127
);
3054 adapter
->stats
.ptc255
+= rd32(E1000_PTC255
);
3055 adapter
->stats
.ptc511
+= rd32(E1000_PTC511
);
3056 adapter
->stats
.ptc1023
+= rd32(E1000_PTC1023
);
3057 adapter
->stats
.ptc1522
+= rd32(E1000_PTC1522
);
3059 adapter
->stats
.mptc
+= rd32(E1000_MPTC
);
3060 adapter
->stats
.bptc
+= rd32(E1000_BPTC
);
3062 /* used for adaptive IFS */
3064 hw
->mac
.tx_packet_delta
= rd32(E1000_TPT
);
3065 adapter
->stats
.tpt
+= hw
->mac
.tx_packet_delta
;
3066 hw
->mac
.collision_delta
= rd32(E1000_COLC
);
3067 adapter
->stats
.colc
+= hw
->mac
.collision_delta
;
3069 adapter
->stats
.algnerrc
+= rd32(E1000_ALGNERRC
);
3070 adapter
->stats
.rxerrc
+= rd32(E1000_RXERRC
);
3071 adapter
->stats
.tncrs
+= rd32(E1000_TNCRS
);
3072 adapter
->stats
.tsctc
+= rd32(E1000_TSCTC
);
3073 adapter
->stats
.tsctfc
+= rd32(E1000_TSCTFC
);
3075 adapter
->stats
.iac
+= rd32(E1000_IAC
);
3076 adapter
->stats
.icrxoc
+= rd32(E1000_ICRXOC
);
3077 adapter
->stats
.icrxptc
+= rd32(E1000_ICRXPTC
);
3078 adapter
->stats
.icrxatc
+= rd32(E1000_ICRXATC
);
3079 adapter
->stats
.ictxptc
+= rd32(E1000_ICTXPTC
);
3080 adapter
->stats
.ictxatc
+= rd32(E1000_ICTXATC
);
3081 adapter
->stats
.ictxqec
+= rd32(E1000_ICTXQEC
);
3082 adapter
->stats
.ictxqmtc
+= rd32(E1000_ICTXQMTC
);
3083 adapter
->stats
.icrxdmtc
+= rd32(E1000_ICRXDMTC
);
3085 /* Fill out the OS statistics structure */
3086 adapter
->net_stats
.multicast
= adapter
->stats
.mprc
;
3087 adapter
->net_stats
.collisions
= adapter
->stats
.colc
;
3091 /* RLEC on some newer hardware can be incorrect so build
3092 * our own version based on RUC and ROC */
3093 adapter
->net_stats
.rx_errors
= adapter
->stats
.rxerrc
+
3094 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
3095 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
3096 adapter
->stats
.cexterr
;
3097 adapter
->net_stats
.rx_length_errors
= adapter
->stats
.ruc
+
3099 adapter
->net_stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
3100 adapter
->net_stats
.rx_frame_errors
= adapter
->stats
.algnerrc
;
3101 adapter
->net_stats
.rx_missed_errors
= adapter
->stats
.mpc
;
3104 adapter
->net_stats
.tx_errors
= adapter
->stats
.ecol
+
3105 adapter
->stats
.latecol
;
3106 adapter
->net_stats
.tx_aborted_errors
= adapter
->stats
.ecol
;
3107 adapter
->net_stats
.tx_window_errors
= adapter
->stats
.latecol
;
3108 adapter
->net_stats
.tx_carrier_errors
= adapter
->stats
.tncrs
;
3110 /* Tx Dropped needs to be maintained elsewhere */
3113 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
3114 if ((adapter
->link_speed
== SPEED_1000
) &&
3115 (!hw
->phy
.ops
.read_phy_reg(hw
, PHY_1000T_STATUS
,
3117 phy_tmp
&= PHY_IDLE_ERROR_COUNT_MASK
;
3118 adapter
->phy_stats
.idle_errors
+= phy_tmp
;
3122 /* Management Stats */
3123 adapter
->stats
.mgptc
+= rd32(E1000_MGTPTC
);
3124 adapter
->stats
.mgprc
+= rd32(E1000_MGTPRC
);
3125 adapter
->stats
.mgpdc
+= rd32(E1000_MGTPDC
);
3129 static irqreturn_t
igb_msix_other(int irq
, void *data
)
3131 struct net_device
*netdev
= data
;
3132 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3133 struct e1000_hw
*hw
= &adapter
->hw
;
3134 u32 icr
= rd32(E1000_ICR
);
3136 /* reading ICR causes bit 31 of EICR to be cleared */
3137 if (!(icr
& E1000_ICR_LSC
))
3138 goto no_link_interrupt
;
3139 hw
->mac
.get_link_status
= 1;
3140 /* guard against interrupt when we're going down */
3141 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3142 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
3145 wr32(E1000_IMS
, E1000_IMS_LSC
);
3146 wr32(E1000_EIMS
, adapter
->eims_other
);
3151 static irqreturn_t
igb_msix_tx(int irq
, void *data
)
3153 struct igb_ring
*tx_ring
= data
;
3154 struct igb_adapter
*adapter
= tx_ring
->adapter
;
3155 struct e1000_hw
*hw
= &adapter
->hw
;
3157 if (!tx_ring
->itr_val
)
3158 wr32(E1000_EIMC
, tx_ring
->eims_value
);
3160 if (adapter
->dca_enabled
)
3161 igb_update_tx_dca(tx_ring
);
3163 tx_ring
->total_bytes
= 0;
3164 tx_ring
->total_packets
= 0;
3166 /* auto mask will automatically reenable the interrupt when we write
3168 if (!igb_clean_tx_irq(tx_ring
))
3169 /* Ring was not completely cleaned, so fire another interrupt */
3170 wr32(E1000_EICS
, tx_ring
->eims_value
);
3172 wr32(E1000_EIMS
, tx_ring
->eims_value
);
3177 static irqreturn_t
igb_msix_rx(int irq
, void *data
)
3179 struct igb_ring
*rx_ring
= data
;
3180 struct igb_adapter
*adapter
= rx_ring
->adapter
;
3181 struct e1000_hw
*hw
= &adapter
->hw
;
3183 /* Write the ITR value calculated at the end of the
3184 * previous interrupt.
3187 if (adapter
->set_itr
) {
3188 wr32(rx_ring
->itr_register
,
3189 1000000000 / (rx_ring
->itr_val
* 256));
3190 adapter
->set_itr
= 0;
3193 if (netif_rx_schedule_prep(adapter
->netdev
, &rx_ring
->napi
))
3194 __netif_rx_schedule(adapter
->netdev
, &rx_ring
->napi
);
3197 if (adapter
->dca_enabled
)
3198 igb_update_rx_dca(rx_ring
);
3204 static void igb_update_rx_dca(struct igb_ring
*rx_ring
)
3207 struct igb_adapter
*adapter
= rx_ring
->adapter
;
3208 struct e1000_hw
*hw
= &adapter
->hw
;
3209 int cpu
= get_cpu();
3210 int q
= rx_ring
- adapter
->rx_ring
;
3212 if (rx_ring
->cpu
!= cpu
) {
3213 dca_rxctrl
= rd32(E1000_DCA_RXCTRL(q
));
3214 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK
;
3215 dca_rxctrl
|= dca_get_tag(cpu
);
3216 dca_rxctrl
|= E1000_DCA_RXCTRL_DESC_DCA_EN
;
3217 dca_rxctrl
|= E1000_DCA_RXCTRL_HEAD_DCA_EN
;
3218 dca_rxctrl
|= E1000_DCA_RXCTRL_DATA_DCA_EN
;
3219 wr32(E1000_DCA_RXCTRL(q
), dca_rxctrl
);
3225 static void igb_update_tx_dca(struct igb_ring
*tx_ring
)
3228 struct igb_adapter
*adapter
= tx_ring
->adapter
;
3229 struct e1000_hw
*hw
= &adapter
->hw
;
3230 int cpu
= get_cpu();
3231 int q
= tx_ring
- adapter
->tx_ring
;
3233 if (tx_ring
->cpu
!= cpu
) {
3234 dca_txctrl
= rd32(E1000_DCA_TXCTRL(q
));
3235 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK
;
3236 dca_txctrl
|= dca_get_tag(cpu
);
3237 dca_txctrl
|= E1000_DCA_TXCTRL_DESC_DCA_EN
;
3238 wr32(E1000_DCA_TXCTRL(q
), dca_txctrl
);
3244 static void igb_setup_dca(struct igb_adapter
*adapter
)
3248 if (!(adapter
->dca_enabled
))
3251 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3252 adapter
->tx_ring
[i
].cpu
= -1;
3253 igb_update_tx_dca(&adapter
->tx_ring
[i
]);
3255 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3256 adapter
->rx_ring
[i
].cpu
= -1;
3257 igb_update_rx_dca(&adapter
->rx_ring
[i
]);
3261 static int __igb_notify_dca(struct device
*dev
, void *data
)
3263 struct net_device
*netdev
= dev_get_drvdata(dev
);
3264 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3265 struct e1000_hw
*hw
= &adapter
->hw
;
3266 unsigned long event
= *(unsigned long *)data
;
3269 case DCA_PROVIDER_ADD
:
3270 /* if already enabled, don't do it again */
3271 if (adapter
->dca_enabled
)
3273 adapter
->dca_enabled
= true;
3274 /* Always use CB2 mode, difference is masked
3275 * in the CB driver. */
3276 wr32(E1000_DCA_CTRL
, 2);
3277 if (dca_add_requester(dev
) == 0) {
3278 dev_info(&adapter
->pdev
->dev
, "DCA enabled\n");
3279 igb_setup_dca(adapter
);
3282 /* Fall Through since DCA is disabled. */
3283 case DCA_PROVIDER_REMOVE
:
3284 if (adapter
->dca_enabled
) {
3285 /* without this a class_device is left
3286 * hanging around in the sysfs model */
3287 dca_remove_requester(dev
);
3288 dev_info(&adapter
->pdev
->dev
, "DCA disabled\n");
3289 adapter
->dca_enabled
= false;
3290 wr32(E1000_DCA_CTRL
, 1);
3298 static int igb_notify_dca(struct notifier_block
*nb
, unsigned long event
,
3303 ret_val
= driver_for_each_device(&igb_driver
.driver
, NULL
, &event
,
3306 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
3308 #endif /* CONFIG_DCA */
3311 * igb_intr_msi - Interrupt Handler
3312 * @irq: interrupt number
3313 * @data: pointer to a network interface device structure
3315 static irqreturn_t
igb_intr_msi(int irq
, void *data
)
3317 struct net_device
*netdev
= data
;
3318 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3319 struct e1000_hw
*hw
= &adapter
->hw
;
3320 /* read ICR disables interrupts using IAM */
3321 u32 icr
= rd32(E1000_ICR
);
3323 /* Write the ITR value calculated at the end of the
3324 * previous interrupt.
3326 if (adapter
->set_itr
) {
3327 wr32(E1000_ITR
, 1000000000 / (adapter
->itr
* 256));
3328 adapter
->set_itr
= 0;
3331 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
3332 hw
->mac
.get_link_status
= 1;
3333 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3334 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
3337 netif_rx_schedule(netdev
, &adapter
->rx_ring
[0].napi
);
3343 * igb_intr - Interrupt Handler
3344 * @irq: interrupt number
3345 * @data: pointer to a network interface device structure
3347 static irqreturn_t
igb_intr(int irq
, void *data
)
3349 struct net_device
*netdev
= data
;
3350 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3351 struct e1000_hw
*hw
= &adapter
->hw
;
3352 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3353 * need for the IMC write */
3354 u32 icr
= rd32(E1000_ICR
);
3357 return IRQ_NONE
; /* Not our interrupt */
3359 /* Write the ITR value calculated at the end of the
3360 * previous interrupt.
3362 if (adapter
->set_itr
) {
3363 wr32(E1000_ITR
, 1000000000 / (adapter
->itr
* 256));
3364 adapter
->set_itr
= 0;
3367 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3368 * not set, then the adapter didn't send an interrupt */
3369 if (!(icr
& E1000_ICR_INT_ASSERTED
))
3372 eicr
= rd32(E1000_EICR
);
3374 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
3375 hw
->mac
.get_link_status
= 1;
3376 /* guard against interrupt when we're going down */
3377 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3378 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
3381 netif_rx_schedule(netdev
, &adapter
->rx_ring
[0].napi
);
3387 * igb_poll - NAPI Rx polling callback
3388 * @napi: napi polling structure
3389 * @budget: count of how many packets we should handle
3391 static int igb_poll(struct napi_struct
*napi
, int budget
)
3393 struct igb_ring
*rx_ring
= container_of(napi
, struct igb_ring
, napi
);
3394 struct igb_adapter
*adapter
= rx_ring
->adapter
;
3395 struct net_device
*netdev
= adapter
->netdev
;
3396 int tx_clean_complete
, work_done
= 0;
3398 /* this poll routine only supports one tx and one rx queue */
3400 if (adapter
->dca_enabled
)
3401 igb_update_tx_dca(&adapter
->tx_ring
[0]);
3403 tx_clean_complete
= igb_clean_tx_irq(&adapter
->tx_ring
[0]);
3406 if (adapter
->dca_enabled
)
3407 igb_update_rx_dca(&adapter
->rx_ring
[0]);
3409 igb_clean_rx_irq_adv(&adapter
->rx_ring
[0], &work_done
, budget
);
3411 /* If no Tx and not enough Rx work done, exit the polling mode */
3412 if ((tx_clean_complete
&& (work_done
< budget
)) ||
3413 !netif_running(netdev
)) {
3414 if (adapter
->itr_setting
& 3)
3415 igb_set_itr(adapter
, E1000_ITR
, false);
3416 netif_rx_complete(netdev
, napi
);
3417 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3418 igb_irq_enable(adapter
);
3425 static int igb_clean_rx_ring_msix(struct napi_struct
*napi
, int budget
)
3427 struct igb_ring
*rx_ring
= container_of(napi
, struct igb_ring
, napi
);
3428 struct igb_adapter
*adapter
= rx_ring
->adapter
;
3429 struct e1000_hw
*hw
= &adapter
->hw
;
3430 struct net_device
*netdev
= adapter
->netdev
;
3433 /* Keep link state information with original netdev */
3434 if (!netif_carrier_ok(netdev
))
3438 if (adapter
->dca_enabled
)
3439 igb_update_rx_dca(rx_ring
);
3441 igb_clean_rx_irq_adv(rx_ring
, &work_done
, budget
);
3444 /* If not enough Rx work done, exit the polling mode */
3445 if ((work_done
== 0) || !netif_running(netdev
)) {
3447 netif_rx_complete(netdev
, napi
);
3449 wr32(E1000_EIMS
, rx_ring
->eims_value
);
3450 if ((adapter
->itr_setting
& 3) && !rx_ring
->no_itr_adjust
&&
3451 (rx_ring
->total_packets
> IGB_DYN_ITR_PACKET_THRESHOLD
)) {
3452 int mean_size
= rx_ring
->total_bytes
/
3453 rx_ring
->total_packets
;
3454 if (mean_size
< IGB_DYN_ITR_LENGTH_LOW
)
3455 igb_raise_rx_eitr(adapter
, rx_ring
);
3456 else if (mean_size
> IGB_DYN_ITR_LENGTH_HIGH
)
3457 igb_lower_rx_eitr(adapter
, rx_ring
);
3460 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3461 wr32(E1000_EIMS
, rx_ring
->eims_value
);
3469 static inline u32
get_head(struct igb_ring
*tx_ring
)
3471 void *end
= (struct e1000_tx_desc
*)tx_ring
->desc
+ tx_ring
->count
;
3472 return le32_to_cpu(*(volatile __le32
*)end
);
3476 * igb_clean_tx_irq - Reclaim resources after transmit completes
3477 * @adapter: board private structure
3478 * returns true if ring is completely cleaned
3480 static bool igb_clean_tx_irq(struct igb_ring
*tx_ring
)
3482 struct igb_adapter
*adapter
= tx_ring
->adapter
;
3483 struct e1000_hw
*hw
= &adapter
->hw
;
3484 struct net_device
*netdev
= adapter
->netdev
;
3485 struct e1000_tx_desc
*tx_desc
;
3486 struct igb_buffer
*buffer_info
;
3487 struct sk_buff
*skb
;
3490 unsigned int count
= 0;
3491 bool cleaned
= false;
3493 unsigned int total_bytes
= 0, total_packets
= 0;
3496 head
= get_head(tx_ring
);
3497 i
= tx_ring
->next_to_clean
;
3501 tx_desc
= E1000_TX_DESC(*tx_ring
, i
);
3502 buffer_info
= &tx_ring
->buffer_info
[i
];
3503 skb
= buffer_info
->skb
;
3506 unsigned int segs
, bytecount
;
3507 /* gso_segs is currently only valid for tcp */
3508 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
3509 /* multiply data chunks by size of headers */
3510 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
3512 total_packets
+= segs
;
3513 total_bytes
+= bytecount
;
3516 igb_unmap_and_free_tx_resource(adapter
, buffer_info
);
3517 tx_desc
->upper
.data
= 0;
3520 if (i
== tx_ring
->count
)
3524 if (count
== IGB_MAX_TX_CLEAN
) {
3531 head
= get_head(tx_ring
);
3532 if (head
== oldhead
)
3537 tx_ring
->next_to_clean
= i
;
3539 if (unlikely(cleaned
&&
3540 netif_carrier_ok(netdev
) &&
3541 IGB_DESC_UNUSED(tx_ring
) >= IGB_TX_QUEUE_WAKE
)) {
3542 /* Make sure that anybody stopping the queue after this
3543 * sees the new next_to_clean.
3546 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
3547 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
3548 !(test_bit(__IGB_DOWN
, &adapter
->state
))) {
3549 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
3550 ++adapter
->restart_queue
;
3553 if (netif_queue_stopped(netdev
) &&
3554 !(test_bit(__IGB_DOWN
, &adapter
->state
))) {
3555 netif_wake_queue(netdev
);
3556 ++adapter
->restart_queue
;
3561 if (tx_ring
->detect_tx_hung
) {
3562 /* Detect a transmit hang in hardware, this serializes the
3563 * check with the clearing of time_stamp and movement of i */
3564 tx_ring
->detect_tx_hung
= false;
3565 if (tx_ring
->buffer_info
[i
].time_stamp
&&
3566 time_after(jiffies
, tx_ring
->buffer_info
[i
].time_stamp
+
3567 (adapter
->tx_timeout_factor
* HZ
))
3568 && !(rd32(E1000_STATUS
) &
3569 E1000_STATUS_TXOFF
)) {
3571 tx_desc
= E1000_TX_DESC(*tx_ring
, i
);
3572 /* detected Tx unit hang */
3573 dev_err(&adapter
->pdev
->dev
,
3574 "Detected Tx Unit Hang\n"
3578 " next_to_use <%x>\n"
3579 " next_to_clean <%x>\n"
3581 "buffer_info[next_to_clean]\n"
3582 " time_stamp <%lx>\n"
3584 " desc.status <%x>\n",
3585 (unsigned long)((tx_ring
- adapter
->tx_ring
) /
3586 sizeof(struct igb_ring
)),
3587 readl(adapter
->hw
.hw_addr
+ tx_ring
->head
),
3588 readl(adapter
->hw
.hw_addr
+ tx_ring
->tail
),
3589 tx_ring
->next_to_use
,
3590 tx_ring
->next_to_clean
,
3592 tx_ring
->buffer_info
[i
].time_stamp
,
3594 tx_desc
->upper
.fields
.status
);
3595 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
3596 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3598 netif_stop_queue(netdev
);
3602 tx_ring
->total_bytes
+= total_bytes
;
3603 tx_ring
->total_packets
+= total_packets
;
3604 tx_ring
->tx_stats
.bytes
+= total_bytes
;
3605 tx_ring
->tx_stats
.packets
+= total_packets
;
3606 adapter
->net_stats
.tx_bytes
+= total_bytes
;
3607 adapter
->net_stats
.tx_packets
+= total_packets
;
3613 * igb_receive_skb - helper function to handle rx indications
3614 * @adapter: board private structure
3615 * @status: descriptor status field as written by hardware
3616 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3617 * @skb: pointer to sk_buff to be indicated to stack
3619 static void igb_receive_skb(struct igb_adapter
*adapter
, u8 status
, __le16 vlan
,
3620 struct sk_buff
*skb
)
3622 if (adapter
->vlgrp
&& (status
& E1000_RXD_STAT_VP
))
3623 vlan_hwaccel_receive_skb(skb
, adapter
->vlgrp
,
3626 netif_receive_skb(skb
);
3630 static inline void igb_rx_checksum_adv(struct igb_adapter
*adapter
,
3631 u32 status_err
, struct sk_buff
*skb
)
3633 skb
->ip_summed
= CHECKSUM_NONE
;
3635 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
3636 if ((status_err
& E1000_RXD_STAT_IXSM
) || !adapter
->rx_csum
)
3638 /* TCP/UDP checksum error bit is set */
3640 (E1000_RXDEXT_STATERR_TCPE
| E1000_RXDEXT_STATERR_IPE
)) {
3641 /* let the stack verify checksum errors */
3642 adapter
->hw_csum_err
++;
3645 /* It must be a TCP or UDP packet with a valid checksum */
3646 if (status_err
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
))
3647 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3649 adapter
->hw_csum_good
++;
3652 static bool igb_clean_rx_irq_adv(struct igb_ring
*rx_ring
,
3653 int *work_done
, int budget
)
3655 struct igb_adapter
*adapter
= rx_ring
->adapter
;
3656 struct net_device
*netdev
= adapter
->netdev
;
3657 struct pci_dev
*pdev
= adapter
->pdev
;
3658 union e1000_adv_rx_desc
*rx_desc
, *next_rxd
;
3659 struct igb_buffer
*buffer_info
, *next_buffer
;
3660 struct sk_buff
*skb
;
3662 u32 length
, hlen
, staterr
;
3663 bool cleaned
= false;
3664 int cleaned_count
= 0;
3665 unsigned int total_bytes
= 0, total_packets
= 0;
3667 i
= rx_ring
->next_to_clean
;
3668 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
3669 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
3671 while (staterr
& E1000_RXD_STAT_DD
) {
3672 if (*work_done
>= budget
)
3675 buffer_info
= &rx_ring
->buffer_info
[i
];
3677 /* HW will not DMA in data larger than the given buffer, even
3678 * if it parses the (NFS, of course) header to be larger. In
3679 * that case, it fills the header buffer and spills the rest
3682 hlen
= (le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hdr_info
) &
3683 E1000_RXDADV_HDRBUFLEN_MASK
) >> E1000_RXDADV_HDRBUFLEN_SHIFT
;
3684 if (hlen
> adapter
->rx_ps_hdr_size
)
3685 hlen
= adapter
->rx_ps_hdr_size
;
3687 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
3691 if (rx_ring
->pending_skb
!= NULL
) {
3692 skb
= rx_ring
->pending_skb
;
3693 rx_ring
->pending_skb
= NULL
;
3694 j
= rx_ring
->pending_skb_page
;
3696 skb
= buffer_info
->skb
;
3697 prefetch(skb
->data
- NET_IP_ALIGN
);
3698 buffer_info
->skb
= NULL
;
3700 pci_unmap_single(pdev
, buffer_info
->dma
,
3701 adapter
->rx_ps_hdr_size
+
3703 PCI_DMA_FROMDEVICE
);
3706 pci_unmap_single(pdev
, buffer_info
->dma
,
3707 adapter
->rx_buffer_len
+
3709 PCI_DMA_FROMDEVICE
);
3710 skb_put(skb
, length
);
3717 pci_unmap_page(pdev
, buffer_info
->page_dma
,
3718 PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
3719 buffer_info
->page_dma
= 0;
3720 skb_fill_page_desc(skb
, j
, buffer_info
->page
,
3722 buffer_info
->page
= NULL
;
3725 skb
->data_len
+= length
;
3726 skb
->truesize
+= length
;
3727 rx_desc
->wb
.upper
.status_error
= 0;
3728 if (staterr
& E1000_RXD_STAT_EOP
)
3734 if (i
== rx_ring
->count
)
3737 buffer_info
= &rx_ring
->buffer_info
[i
];
3738 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
3739 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
3740 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
3741 if (!(staterr
& E1000_RXD_STAT_DD
)) {
3742 rx_ring
->pending_skb
= skb
;
3743 rx_ring
->pending_skb_page
= j
;
3749 if (i
== rx_ring
->count
)
3751 next_rxd
= E1000_RX_DESC_ADV(*rx_ring
, i
);
3753 next_buffer
= &rx_ring
->buffer_info
[i
];
3755 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
3756 dev_kfree_skb_irq(skb
);
3759 rx_ring
->no_itr_adjust
|= (staterr
& E1000_RXD_STAT_DYNINT
);
3761 total_bytes
+= skb
->len
;
3764 igb_rx_checksum_adv(adapter
, staterr
, skb
);
3766 skb
->protocol
= eth_type_trans(skb
, netdev
);
3768 igb_receive_skb(adapter
, staterr
, rx_desc
->wb
.upper
.vlan
, skb
);
3770 netdev
->last_rx
= jiffies
;
3773 rx_desc
->wb
.upper
.status_error
= 0;
3775 /* return some buffers to hardware, one at a time is too slow */
3776 if (cleaned_count
>= IGB_RX_BUFFER_WRITE
) {
3777 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
3781 /* use prefetched values */
3783 buffer_info
= next_buffer
;
3785 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
3788 rx_ring
->next_to_clean
= i
;
3789 cleaned_count
= IGB_DESC_UNUSED(rx_ring
);
3792 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
3794 rx_ring
->total_packets
+= total_packets
;
3795 rx_ring
->total_bytes
+= total_bytes
;
3796 rx_ring
->rx_stats
.packets
+= total_packets
;
3797 rx_ring
->rx_stats
.bytes
+= total_bytes
;
3798 adapter
->net_stats
.rx_bytes
+= total_bytes
;
3799 adapter
->net_stats
.rx_packets
+= total_packets
;
3805 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
3806 * @adapter: address of board private structure
3808 static void igb_alloc_rx_buffers_adv(struct igb_ring
*rx_ring
,
3811 struct igb_adapter
*adapter
= rx_ring
->adapter
;
3812 struct net_device
*netdev
= adapter
->netdev
;
3813 struct pci_dev
*pdev
= adapter
->pdev
;
3814 union e1000_adv_rx_desc
*rx_desc
;
3815 struct igb_buffer
*buffer_info
;
3816 struct sk_buff
*skb
;
3819 i
= rx_ring
->next_to_use
;
3820 buffer_info
= &rx_ring
->buffer_info
[i
];
3822 while (cleaned_count
--) {
3823 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
3825 if (adapter
->rx_ps_hdr_size
&& !buffer_info
->page
) {
3826 buffer_info
->page
= alloc_page(GFP_ATOMIC
);
3827 if (!buffer_info
->page
) {
3828 adapter
->alloc_rx_buff_failed
++;
3831 buffer_info
->page_dma
=
3835 PCI_DMA_FROMDEVICE
);
3838 if (!buffer_info
->skb
) {
3841 if (adapter
->rx_ps_hdr_size
)
3842 bufsz
= adapter
->rx_ps_hdr_size
;
3844 bufsz
= adapter
->rx_buffer_len
;
3845 bufsz
+= NET_IP_ALIGN
;
3846 skb
= netdev_alloc_skb(netdev
, bufsz
);
3849 adapter
->alloc_rx_buff_failed
++;
3853 /* Make buffer alignment 2 beyond a 16 byte boundary
3854 * this will result in a 16 byte aligned IP header after
3855 * the 14 byte MAC header is removed
3857 skb_reserve(skb
, NET_IP_ALIGN
);
3859 buffer_info
->skb
= skb
;
3860 buffer_info
->dma
= pci_map_single(pdev
, skb
->data
,
3862 PCI_DMA_FROMDEVICE
);
3865 /* Refresh the desc even if buffer_addrs didn't change because
3866 * each write-back erases this info. */
3867 if (adapter
->rx_ps_hdr_size
) {
3868 rx_desc
->read
.pkt_addr
=
3869 cpu_to_le64(buffer_info
->page_dma
);
3870 rx_desc
->read
.hdr_addr
= cpu_to_le64(buffer_info
->dma
);
3872 rx_desc
->read
.pkt_addr
=
3873 cpu_to_le64(buffer_info
->dma
);
3874 rx_desc
->read
.hdr_addr
= 0;
3878 if (i
== rx_ring
->count
)
3880 buffer_info
= &rx_ring
->buffer_info
[i
];
3884 if (rx_ring
->next_to_use
!= i
) {
3885 rx_ring
->next_to_use
= i
;
3887 i
= (rx_ring
->count
- 1);
3891 /* Force memory writes to complete before letting h/w
3892 * know there are new descriptors to fetch. (Only
3893 * applicable for weak-ordered memory model archs,
3894 * such as IA-64). */
3896 writel(i
, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
3906 static int igb_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
3908 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3909 struct mii_ioctl_data
*data
= if_mii(ifr
);
3911 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
3916 data
->phy_id
= adapter
->hw
.phy
.addr
;
3919 if (!capable(CAP_NET_ADMIN
))
3921 if (adapter
->hw
.phy
.ops
.read_phy_reg(&adapter
->hw
,
3923 & 0x1F, &data
->val_out
))
3939 static int igb_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
3945 return igb_mii_ioctl(netdev
, ifr
, cmd
);
3951 static void igb_vlan_rx_register(struct net_device
*netdev
,
3952 struct vlan_group
*grp
)
3954 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3955 struct e1000_hw
*hw
= &adapter
->hw
;
3958 igb_irq_disable(adapter
);
3959 adapter
->vlgrp
= grp
;
3962 /* enable VLAN tag insert/strip */
3963 ctrl
= rd32(E1000_CTRL
);
3964 ctrl
|= E1000_CTRL_VME
;
3965 wr32(E1000_CTRL
, ctrl
);
3967 /* enable VLAN receive filtering */
3968 rctl
= rd32(E1000_RCTL
);
3969 rctl
|= E1000_RCTL_VFE
;
3970 rctl
&= ~E1000_RCTL_CFIEN
;
3971 wr32(E1000_RCTL
, rctl
);
3972 igb_update_mng_vlan(adapter
);
3974 adapter
->max_frame_size
+ VLAN_TAG_SIZE
);
3976 /* disable VLAN tag insert/strip */
3977 ctrl
= rd32(E1000_CTRL
);
3978 ctrl
&= ~E1000_CTRL_VME
;
3979 wr32(E1000_CTRL
, ctrl
);
3981 /* disable VLAN filtering */
3982 rctl
= rd32(E1000_RCTL
);
3983 rctl
&= ~E1000_RCTL_VFE
;
3984 wr32(E1000_RCTL
, rctl
);
3985 if (adapter
->mng_vlan_id
!= (u16
)IGB_MNG_VLAN_NONE
) {
3986 igb_vlan_rx_kill_vid(netdev
, adapter
->mng_vlan_id
);
3987 adapter
->mng_vlan_id
= IGB_MNG_VLAN_NONE
;
3990 adapter
->max_frame_size
);
3993 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3994 igb_irq_enable(adapter
);
3997 static void igb_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
3999 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4000 struct e1000_hw
*hw
= &adapter
->hw
;
4003 if ((adapter
->hw
.mng_cookie
.status
&
4004 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
4005 (vid
== adapter
->mng_vlan_id
))
4007 /* add VID to filter table */
4008 index
= (vid
>> 5) & 0x7F;
4009 vfta
= array_rd32(E1000_VFTA
, index
);
4010 vfta
|= (1 << (vid
& 0x1F));
4011 igb_write_vfta(&adapter
->hw
, index
, vfta
);
4014 static void igb_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
4016 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4017 struct e1000_hw
*hw
= &adapter
->hw
;
4020 igb_irq_disable(adapter
);
4021 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
4023 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4024 igb_irq_enable(adapter
);
4026 if ((adapter
->hw
.mng_cookie
.status
&
4027 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
4028 (vid
== adapter
->mng_vlan_id
)) {
4029 /* release control to f/w */
4030 igb_release_hw_control(adapter
);
4034 /* remove VID from filter table */
4035 index
= (vid
>> 5) & 0x7F;
4036 vfta
= array_rd32(E1000_VFTA
, index
);
4037 vfta
&= ~(1 << (vid
& 0x1F));
4038 igb_write_vfta(&adapter
->hw
, index
, vfta
);
4041 static void igb_restore_vlan(struct igb_adapter
*adapter
)
4043 igb_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
4045 if (adapter
->vlgrp
) {
4047 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
4048 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
4050 igb_vlan_rx_add_vid(adapter
->netdev
, vid
);
4055 int igb_set_spd_dplx(struct igb_adapter
*adapter
, u16 spddplx
)
4057 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
4061 /* Fiber NICs only allow 1000 gbps Full duplex */
4062 if ((adapter
->hw
.phy
.media_type
== e1000_media_type_fiber
) &&
4063 spddplx
!= (SPEED_1000
+ DUPLEX_FULL
)) {
4064 dev_err(&adapter
->pdev
->dev
,
4065 "Unsupported Speed/Duplex configuration\n");
4070 case SPEED_10
+ DUPLEX_HALF
:
4071 mac
->forced_speed_duplex
= ADVERTISE_10_HALF
;
4073 case SPEED_10
+ DUPLEX_FULL
:
4074 mac
->forced_speed_duplex
= ADVERTISE_10_FULL
;
4076 case SPEED_100
+ DUPLEX_HALF
:
4077 mac
->forced_speed_duplex
= ADVERTISE_100_HALF
;
4079 case SPEED_100
+ DUPLEX_FULL
:
4080 mac
->forced_speed_duplex
= ADVERTISE_100_FULL
;
4082 case SPEED_1000
+ DUPLEX_FULL
:
4084 adapter
->hw
.phy
.autoneg_advertised
= ADVERTISE_1000_FULL
;
4086 case SPEED_1000
+ DUPLEX_HALF
: /* not supported */
4088 dev_err(&adapter
->pdev
->dev
,
4089 "Unsupported Speed/Duplex configuration\n");
4096 static int igb_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4098 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4099 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4100 struct e1000_hw
*hw
= &adapter
->hw
;
4101 u32 ctrl
, ctrl_ext
, rctl
, status
;
4102 u32 wufc
= adapter
->wol
;
4107 netif_device_detach(netdev
);
4109 if (netif_running(netdev
)) {
4110 WARN_ON(test_bit(__IGB_RESETTING
, &adapter
->state
));
4112 igb_free_irq(adapter
);
4116 retval
= pci_save_state(pdev
);
4121 status
= rd32(E1000_STATUS
);
4122 if (status
& E1000_STATUS_LU
)
4123 wufc
&= ~E1000_WUFC_LNKC
;
4126 igb_setup_rctl(adapter
);
4127 igb_set_multi(netdev
);
4129 /* turn on all-multi mode if wake on multicast is enabled */
4130 if (wufc
& E1000_WUFC_MC
) {
4131 rctl
= rd32(E1000_RCTL
);
4132 rctl
|= E1000_RCTL_MPE
;
4133 wr32(E1000_RCTL
, rctl
);
4136 ctrl
= rd32(E1000_CTRL
);
4137 /* advertise wake from D3Cold */
4138 #define E1000_CTRL_ADVD3WUC 0x00100000
4139 /* phy power management enable */
4140 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4141 ctrl
|= E1000_CTRL_ADVD3WUC
;
4142 wr32(E1000_CTRL
, ctrl
);
4144 if (adapter
->hw
.phy
.media_type
== e1000_media_type_fiber
||
4145 adapter
->hw
.phy
.media_type
==
4146 e1000_media_type_internal_serdes
) {
4147 /* keep the laser running in D3 */
4148 ctrl_ext
= rd32(E1000_CTRL_EXT
);
4149 ctrl_ext
|= E1000_CTRL_EXT_SDP7_DATA
;
4150 wr32(E1000_CTRL_EXT
, ctrl_ext
);
4153 /* Allow time for pending master requests to run */
4154 igb_disable_pcie_master(&adapter
->hw
);
4156 wr32(E1000_WUC
, E1000_WUC_PME_EN
);
4157 wr32(E1000_WUFC
, wufc
);
4158 pci_enable_wake(pdev
, PCI_D3hot
, 1);
4159 pci_enable_wake(pdev
, PCI_D3cold
, 1);
4162 wr32(E1000_WUFC
, 0);
4163 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4164 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4167 /* make sure adapter isn't asleep if manageability is enabled */
4168 if (adapter
->en_mng_pt
) {
4169 pci_enable_wake(pdev
, PCI_D3hot
, 1);
4170 pci_enable_wake(pdev
, PCI_D3cold
, 1);
4173 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4174 * would have already happened in close and is redundant. */
4175 igb_release_hw_control(adapter
);
4177 pci_disable_device(pdev
);
4179 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4185 static int igb_resume(struct pci_dev
*pdev
)
4187 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4188 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4189 struct e1000_hw
*hw
= &adapter
->hw
;
4192 pci_set_power_state(pdev
, PCI_D0
);
4193 pci_restore_state(pdev
);
4195 if (adapter
->need_ioport
)
4196 err
= pci_enable_device(pdev
);
4198 err
= pci_enable_device_mem(pdev
);
4201 "igb: Cannot enable PCI device from suspend\n");
4204 pci_set_master(pdev
);
4206 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4207 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4209 if (netif_running(netdev
)) {
4210 err
= igb_request_irq(adapter
);
4215 /* e1000_power_up_phy(adapter); */
4218 wr32(E1000_WUS
, ~0);
4220 igb_init_manageability(adapter
);
4222 if (netif_running(netdev
))
4225 netif_device_attach(netdev
);
4227 /* let the f/w know that the h/w is now under the control of the
4229 igb_get_hw_control(adapter
);
4235 static void igb_shutdown(struct pci_dev
*pdev
)
4237 igb_suspend(pdev
, PMSG_SUSPEND
);
4240 #ifdef CONFIG_NET_POLL_CONTROLLER
4242 * Polling 'interrupt' - used by things like netconsole to send skbs
4243 * without having to re-enable interrupts. It's not called while
4244 * the interrupt routine is executing.
4246 static void igb_netpoll(struct net_device
*netdev
)
4248 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4252 igb_irq_disable(adapter
);
4253 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
4254 igb_clean_tx_irq(&adapter
->tx_ring
[i
]);
4256 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
4257 igb_clean_rx_irq_adv(&adapter
->rx_ring
[i
],
4259 adapter
->rx_ring
[i
].napi
.weight
);
4261 igb_irq_enable(adapter
);
4263 #endif /* CONFIG_NET_POLL_CONTROLLER */
4266 * igb_io_error_detected - called when PCI error is detected
4267 * @pdev: Pointer to PCI device
4268 * @state: The current pci connection state
4270 * This function is called after a PCI bus error affecting
4271 * this device has been detected.
4273 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*pdev
,
4274 pci_channel_state_t state
)
4276 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4277 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4279 netif_device_detach(netdev
);
4281 if (netif_running(netdev
))
4283 pci_disable_device(pdev
);
4285 /* Request a slot slot reset. */
4286 return PCI_ERS_RESULT_NEED_RESET
;
4290 * igb_io_slot_reset - called after the pci bus has been reset.
4291 * @pdev: Pointer to PCI device
4293 * Restart the card from scratch, as if from a cold-boot. Implementation
4294 * resembles the first-half of the igb_resume routine.
4296 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*pdev
)
4298 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4299 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4300 struct e1000_hw
*hw
= &adapter
->hw
;
4303 if (adapter
->need_ioport
)
4304 err
= pci_enable_device(pdev
);
4306 err
= pci_enable_device_mem(pdev
);
4309 "Cannot re-enable PCI device after reset.\n");
4310 return PCI_ERS_RESULT_DISCONNECT
;
4312 pci_set_master(pdev
);
4313 pci_restore_state(pdev
);
4315 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4316 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4319 wr32(E1000_WUS
, ~0);
4321 return PCI_ERS_RESULT_RECOVERED
;
4325 * igb_io_resume - called when traffic can start flowing again.
4326 * @pdev: Pointer to PCI device
4328 * This callback is called when the error recovery driver tells us that
4329 * its OK to resume normal operation. Implementation resembles the
4330 * second-half of the igb_resume routine.
4332 static void igb_io_resume(struct pci_dev
*pdev
)
4334 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4335 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4337 igb_init_manageability(adapter
);
4339 if (netif_running(netdev
)) {
4340 if (igb_up(adapter
)) {
4341 dev_err(&pdev
->dev
, "igb_up failed after reset\n");
4346 netif_device_attach(netdev
);
4348 /* let the f/w know that the h/w is now under the control of the
4350 igb_get_hw_control(adapter
);