1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/pci-aspm.h>
43 #include <linux/delay.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/aer.h>
48 #include <linux/dca.h>
52 #define DRV_VERSION "1.3.16-k2"
53 char igb_driver_name
[] = "igb";
54 char igb_driver_version
[] = DRV_VERSION
;
55 static const char igb_driver_string
[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
57 static const char igb_copyright
[] = "Copyright (c) 2007-2009 Intel Corporation.";
59 static const struct e1000_info
*igb_info_tbl
[] = {
60 [board_82575
] = &e1000_82575_info
,
63 static struct pci_device_id igb_pci_tbl
[] = {
64 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576
), board_82575
},
65 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS
), board_82575
},
66 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS_SERDES
), board_82575
},
67 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_FIBER
), board_82575
},
68 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES
), board_82575
},
69 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES_QUAD
), board_82575
},
70 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_QUAD_COPPER
), board_82575
},
71 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_COPPER
), board_82575
},
72 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_FIBER_SERDES
), board_82575
},
73 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575GB_QUAD_COPPER
), board_82575
},
74 /* required last entry */
78 MODULE_DEVICE_TABLE(pci
, igb_pci_tbl
);
80 void igb_reset(struct igb_adapter
*);
81 static int igb_setup_all_tx_resources(struct igb_adapter
*);
82 static int igb_setup_all_rx_resources(struct igb_adapter
*);
83 static void igb_free_all_tx_resources(struct igb_adapter
*);
84 static void igb_free_all_rx_resources(struct igb_adapter
*);
85 static void igb_setup_mrqc(struct igb_adapter
*);
86 void igb_update_stats(struct igb_adapter
*);
87 static int igb_probe(struct pci_dev
*, const struct pci_device_id
*);
88 static void __devexit
igb_remove(struct pci_dev
*pdev
);
89 static int igb_sw_init(struct igb_adapter
*);
90 static int igb_open(struct net_device
*);
91 static int igb_close(struct net_device
*);
92 static void igb_configure_tx(struct igb_adapter
*);
93 static void igb_configure_rx(struct igb_adapter
*);
94 static void igb_clean_all_tx_rings(struct igb_adapter
*);
95 static void igb_clean_all_rx_rings(struct igb_adapter
*);
96 static void igb_clean_tx_ring(struct igb_ring
*);
97 static void igb_clean_rx_ring(struct igb_ring
*);
98 static void igb_set_rx_mode(struct net_device
*);
99 static void igb_update_phy_info(unsigned long);
100 static void igb_watchdog(unsigned long);
101 static void igb_watchdog_task(struct work_struct
*);
102 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
, struct net_device
*);
103 static struct net_device_stats
*igb_get_stats(struct net_device
*);
104 static int igb_change_mtu(struct net_device
*, int);
105 static int igb_set_mac(struct net_device
*, void *);
106 static void igb_set_uta(struct igb_adapter
*adapter
);
107 static irqreturn_t
igb_intr(int irq
, void *);
108 static irqreturn_t
igb_intr_msi(int irq
, void *);
109 static irqreturn_t
igb_msix_other(int irq
, void *);
110 static irqreturn_t
igb_msix_ring(int irq
, void *);
111 #ifdef CONFIG_IGB_DCA
112 static void igb_update_dca(struct igb_q_vector
*);
113 static void igb_setup_dca(struct igb_adapter
*);
114 #endif /* CONFIG_IGB_DCA */
115 static bool igb_clean_tx_irq(struct igb_q_vector
*);
116 static int igb_poll(struct napi_struct
*, int);
117 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*, int *, int);
118 static int igb_ioctl(struct net_device
*, struct ifreq
*, int cmd
);
119 static void igb_tx_timeout(struct net_device
*);
120 static void igb_reset_task(struct work_struct
*);
121 static void igb_vlan_rx_register(struct net_device
*, struct vlan_group
*);
122 static void igb_vlan_rx_add_vid(struct net_device
*, u16
);
123 static void igb_vlan_rx_kill_vid(struct net_device
*, u16
);
124 static void igb_restore_vlan(struct igb_adapter
*);
125 static void igb_rar_set_qsel(struct igb_adapter
*, u8
*, u32
, u8
);
126 static void igb_ping_all_vfs(struct igb_adapter
*);
127 static void igb_msg_task(struct igb_adapter
*);
128 static void igb_vmm_control(struct igb_adapter
*);
129 static int igb_set_vf_mac(struct igb_adapter
*, int, unsigned char *);
130 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
);
133 static int igb_suspend(struct pci_dev
*, pm_message_t
);
134 static int igb_resume(struct pci_dev
*);
136 static void igb_shutdown(struct pci_dev
*);
137 #ifdef CONFIG_IGB_DCA
138 static int igb_notify_dca(struct notifier_block
*, unsigned long, void *);
139 static struct notifier_block dca_notifier
= {
140 .notifier_call
= igb_notify_dca
,
145 #ifdef CONFIG_NET_POLL_CONTROLLER
146 /* for netdump / net console */
147 static void igb_netpoll(struct net_device
*);
149 #ifdef CONFIG_PCI_IOV
150 static unsigned int max_vfs
= 0;
151 module_param(max_vfs
, uint
, 0);
152 MODULE_PARM_DESC(max_vfs
, "Maximum number of virtual functions to allocate "
153 "per physical function");
154 #endif /* CONFIG_PCI_IOV */
156 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*,
157 pci_channel_state_t
);
158 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*);
159 static void igb_io_resume(struct pci_dev
*);
161 static struct pci_error_handlers igb_err_handler
= {
162 .error_detected
= igb_io_error_detected
,
163 .slot_reset
= igb_io_slot_reset
,
164 .resume
= igb_io_resume
,
168 static struct pci_driver igb_driver
= {
169 .name
= igb_driver_name
,
170 .id_table
= igb_pci_tbl
,
172 .remove
= __devexit_p(igb_remove
),
174 /* Power Managment Hooks */
175 .suspend
= igb_suspend
,
176 .resume
= igb_resume
,
178 .shutdown
= igb_shutdown
,
179 .err_handler
= &igb_err_handler
182 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
183 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
184 MODULE_LICENSE("GPL");
185 MODULE_VERSION(DRV_VERSION
);
188 * igb_read_clock - read raw cycle counter (to be used by time counter)
190 static cycle_t
igb_read_clock(const struct cyclecounter
*tc
)
192 struct igb_adapter
*adapter
=
193 container_of(tc
, struct igb_adapter
, cycles
);
194 struct e1000_hw
*hw
= &adapter
->hw
;
198 stamp
|= (u64
)rd32(E1000_SYSTIML
) << shift
;
199 stamp
|= (u64
)rd32(E1000_SYSTIMH
) << (shift
+ 32);
205 * igb_get_hw_dev_name - return device name string
206 * used by hardware layer to print debugging information
208 char *igb_get_hw_dev_name(struct e1000_hw
*hw
)
210 struct igb_adapter
*adapter
= hw
->back
;
211 return adapter
->netdev
->name
;
215 * igb_get_time_str - format current NIC and system time as string
217 static char *igb_get_time_str(struct igb_adapter
*adapter
,
220 cycle_t hw
= adapter
->cycles
.read(&adapter
->cycles
);
221 struct timespec nic
= ns_to_timespec(timecounter_read(&adapter
->clock
));
223 struct timespec delta
;
224 getnstimeofday(&sys
);
226 delta
= timespec_sub(nic
, sys
);
229 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
231 (long)nic
.tv_sec
, nic
.tv_nsec
,
232 (long)sys
.tv_sec
, sys
.tv_nsec
,
233 (long)delta
.tv_sec
, delta
.tv_nsec
);
240 * igb_init_module - Driver Registration Routine
242 * igb_init_module is the first routine called when the driver is
243 * loaded. All it does is register with the PCI subsystem.
245 static int __init
igb_init_module(void)
248 printk(KERN_INFO
"%s - version %s\n",
249 igb_driver_string
, igb_driver_version
);
251 printk(KERN_INFO
"%s\n", igb_copyright
);
253 #ifdef CONFIG_IGB_DCA
254 dca_register_notify(&dca_notifier
);
256 ret
= pci_register_driver(&igb_driver
);
260 module_init(igb_init_module
);
263 * igb_exit_module - Driver Exit Cleanup Routine
265 * igb_exit_module is called just before the driver is removed
268 static void __exit
igb_exit_module(void)
270 #ifdef CONFIG_IGB_DCA
271 dca_unregister_notify(&dca_notifier
);
273 pci_unregister_driver(&igb_driver
);
276 module_exit(igb_exit_module
);
278 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
280 * igb_cache_ring_register - Descriptor ring to register mapping
281 * @adapter: board private structure to initialize
283 * Once we know the feature-set enabled for the device, we'll cache
284 * the register offset the descriptor ring is assigned to.
286 static void igb_cache_ring_register(struct igb_adapter
*adapter
)
289 u32 rbase_offset
= adapter
->vfs_allocated_count
;
291 switch (adapter
->hw
.mac
.type
) {
293 /* The queues are allocated for virtualization such that VF 0
294 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
295 * In order to avoid collision we start at the first free queue
296 * and continue consuming queues in the same sequence
298 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
299 adapter
->rx_ring
[i
].reg_idx
= rbase_offset
+
301 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
302 adapter
->tx_ring
[i
].reg_idx
= rbase_offset
+
307 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
308 adapter
->rx_ring
[i
].reg_idx
= i
;
309 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
310 adapter
->tx_ring
[i
].reg_idx
= i
;
315 static void igb_free_queues(struct igb_adapter
*adapter
)
317 kfree(adapter
->tx_ring
);
318 kfree(adapter
->rx_ring
);
320 adapter
->tx_ring
= NULL
;
321 adapter
->rx_ring
= NULL
;
323 adapter
->num_rx_queues
= 0;
324 adapter
->num_tx_queues
= 0;
328 * igb_alloc_queues - Allocate memory for all rings
329 * @adapter: board private structure to initialize
331 * We allocate one ring per queue at run-time since we don't know the
332 * number of queues at compile-time.
334 static int igb_alloc_queues(struct igb_adapter
*adapter
)
338 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
339 sizeof(struct igb_ring
), GFP_KERNEL
);
340 if (!adapter
->tx_ring
)
343 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
344 sizeof(struct igb_ring
), GFP_KERNEL
);
345 if (!adapter
->rx_ring
)
348 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
349 struct igb_ring
*ring
= &(adapter
->tx_ring
[i
]);
350 ring
->count
= adapter
->tx_ring_count
;
351 ring
->queue_index
= i
;
352 ring
->pdev
= adapter
->pdev
;
353 ring
->netdev
= adapter
->netdev
;
354 /* For 82575, context index must be unique per ring. */
355 if (adapter
->hw
.mac
.type
== e1000_82575
)
356 ring
->flags
= IGB_RING_FLAG_TX_CTX_IDX
;
359 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
360 struct igb_ring
*ring
= &(adapter
->rx_ring
[i
]);
361 ring
->count
= adapter
->rx_ring_count
;
362 ring
->queue_index
= i
;
363 ring
->pdev
= adapter
->pdev
;
364 ring
->netdev
= adapter
->netdev
;
365 ring
->rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
366 ring
->flags
= IGB_RING_FLAG_RX_CSUM
; /* enable rx checksum */
367 /* set flag indicating ring supports SCTP checksum offload */
368 if (adapter
->hw
.mac
.type
>= e1000_82576
)
369 ring
->flags
|= IGB_RING_FLAG_RX_SCTP_CSUM
;
372 igb_cache_ring_register(adapter
);
377 igb_free_queues(adapter
);
382 #define IGB_N0_QUEUE -1
383 static void igb_assign_vector(struct igb_q_vector
*q_vector
, int msix_vector
)
386 struct igb_adapter
*adapter
= q_vector
->adapter
;
387 struct e1000_hw
*hw
= &adapter
->hw
;
389 int rx_queue
= IGB_N0_QUEUE
;
390 int tx_queue
= IGB_N0_QUEUE
;
392 if (q_vector
->rx_ring
)
393 rx_queue
= q_vector
->rx_ring
->reg_idx
;
394 if (q_vector
->tx_ring
)
395 tx_queue
= q_vector
->tx_ring
->reg_idx
;
397 switch (hw
->mac
.type
) {
399 /* The 82575 assigns vectors using a bitmask, which matches the
400 bitmask for the EICR/EIMS/EIMC registers. To assign one
401 or more queues to a vector, we write the appropriate bits
402 into the MSIXBM register for that vector. */
403 if (rx_queue
> IGB_N0_QUEUE
)
404 msixbm
= E1000_EICR_RX_QUEUE0
<< rx_queue
;
405 if (tx_queue
> IGB_N0_QUEUE
)
406 msixbm
|= E1000_EICR_TX_QUEUE0
<< tx_queue
;
407 array_wr32(E1000_MSIXBM(0), msix_vector
, msixbm
);
408 q_vector
->eims_value
= msixbm
;
411 /* 82576 uses a table-based method for assigning vectors.
412 Each queue has a single entry in the table to which we write
413 a vector number along with a "valid" bit. Sadly, the layout
414 of the table is somewhat counterintuitive. */
415 if (rx_queue
> IGB_N0_QUEUE
) {
416 index
= (rx_queue
& 0x7);
417 ivar
= array_rd32(E1000_IVAR0
, index
);
419 /* vector goes into low byte of register */
420 ivar
= ivar
& 0xFFFFFF00;
421 ivar
|= msix_vector
| E1000_IVAR_VALID
;
423 /* vector goes into third byte of register */
424 ivar
= ivar
& 0xFF00FFFF;
425 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
427 array_wr32(E1000_IVAR0
, index
, ivar
);
429 if (tx_queue
> IGB_N0_QUEUE
) {
430 index
= (tx_queue
& 0x7);
431 ivar
= array_rd32(E1000_IVAR0
, index
);
433 /* vector goes into second byte of register */
434 ivar
= ivar
& 0xFFFF00FF;
435 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
437 /* vector goes into high byte of register */
438 ivar
= ivar
& 0x00FFFFFF;
439 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
441 array_wr32(E1000_IVAR0
, index
, ivar
);
443 q_vector
->eims_value
= 1 << msix_vector
;
452 * igb_configure_msix - Configure MSI-X hardware
454 * igb_configure_msix sets up the hardware to properly
455 * generate MSI-X interrupts.
457 static void igb_configure_msix(struct igb_adapter
*adapter
)
461 struct e1000_hw
*hw
= &adapter
->hw
;
463 adapter
->eims_enable_mask
= 0;
465 /* set vector for other causes, i.e. link changes */
466 switch (hw
->mac
.type
) {
468 tmp
= rd32(E1000_CTRL_EXT
);
469 /* enable MSI-X PBA support*/
470 tmp
|= E1000_CTRL_EXT_PBA_CLR
;
472 /* Auto-Mask interrupts upon ICR read. */
473 tmp
|= E1000_CTRL_EXT_EIAME
;
474 tmp
|= E1000_CTRL_EXT_IRCA
;
476 wr32(E1000_CTRL_EXT
, tmp
);
478 /* enable msix_other interrupt */
479 array_wr32(E1000_MSIXBM(0), vector
++,
481 adapter
->eims_other
= E1000_EIMS_OTHER
;
486 /* Turn on MSI-X capability first, or our settings
487 * won't stick. And it will take days to debug. */
488 wr32(E1000_GPIE
, E1000_GPIE_MSIX_MODE
|
489 E1000_GPIE_PBA
| E1000_GPIE_EIAME
|
492 /* enable msix_other interrupt */
493 adapter
->eims_other
= 1 << vector
;
494 tmp
= (vector
++ | E1000_IVAR_VALID
) << 8;
496 wr32(E1000_IVAR_MISC
, tmp
);
499 /* do nothing, since nothing else supports MSI-X */
501 } /* switch (hw->mac.type) */
503 adapter
->eims_enable_mask
|= adapter
->eims_other
;
505 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
506 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
507 igb_assign_vector(q_vector
, vector
++);
508 adapter
->eims_enable_mask
|= q_vector
->eims_value
;
515 * igb_request_msix - Initialize MSI-X interrupts
517 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
520 static int igb_request_msix(struct igb_adapter
*adapter
)
522 struct net_device
*netdev
= adapter
->netdev
;
523 struct e1000_hw
*hw
= &adapter
->hw
;
524 int i
, err
= 0, vector
= 0;
526 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
527 &igb_msix_other
, 0, netdev
->name
, adapter
);
532 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
533 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
535 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(vector
);
537 if (q_vector
->rx_ring
&& q_vector
->tx_ring
)
538 sprintf(q_vector
->name
, "%s-TxRx-%u", netdev
->name
,
539 q_vector
->rx_ring
->queue_index
);
540 else if (q_vector
->tx_ring
)
541 sprintf(q_vector
->name
, "%s-tx-%u", netdev
->name
,
542 q_vector
->tx_ring
->queue_index
);
543 else if (q_vector
->rx_ring
)
544 sprintf(q_vector
->name
, "%s-rx-%u", netdev
->name
,
545 q_vector
->rx_ring
->queue_index
);
547 sprintf(q_vector
->name
, "%s-unused", netdev
->name
);
549 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
550 &igb_msix_ring
, 0, q_vector
->name
,
557 igb_configure_msix(adapter
);
563 static void igb_reset_interrupt_capability(struct igb_adapter
*adapter
)
565 if (adapter
->msix_entries
) {
566 pci_disable_msix(adapter
->pdev
);
567 kfree(adapter
->msix_entries
);
568 adapter
->msix_entries
= NULL
;
569 } else if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
570 pci_disable_msi(adapter
->pdev
);
575 * igb_free_q_vectors - Free memory allocated for interrupt vectors
576 * @adapter: board private structure to initialize
578 * This function frees the memory allocated to the q_vectors. In addition if
579 * NAPI is enabled it will delete any references to the NAPI struct prior
580 * to freeing the q_vector.
582 static void igb_free_q_vectors(struct igb_adapter
*adapter
)
586 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
587 struct igb_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
588 adapter
->q_vector
[v_idx
] = NULL
;
589 netif_napi_del(&q_vector
->napi
);
592 adapter
->num_q_vectors
= 0;
596 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
598 * This function resets the device so that it has 0 rx queues, tx queues, and
599 * MSI-X interrupts allocated.
601 static void igb_clear_interrupt_scheme(struct igb_adapter
*adapter
)
603 igb_free_queues(adapter
);
604 igb_free_q_vectors(adapter
);
605 igb_reset_interrupt_capability(adapter
);
609 * igb_set_interrupt_capability - set MSI or MSI-X if supported
611 * Attempt to configure interrupts using the best available
612 * capabilities of the hardware and kernel.
614 static void igb_set_interrupt_capability(struct igb_adapter
*adapter
)
619 /* Number of supported queues. */
620 adapter
->num_rx_queues
= min_t(u32
, IGB_MAX_RX_QUEUES
, num_online_cpus());
621 adapter
->num_tx_queues
= min_t(u32
, IGB_MAX_TX_QUEUES
, num_online_cpus());
623 /* start with one vector for every rx queue */
624 numvecs
= adapter
->num_rx_queues
;
626 /* if tx handler is seperate add 1 for every tx queue */
627 numvecs
+= adapter
->num_tx_queues
;
629 /* store the number of vectors reserved for queues */
630 adapter
->num_q_vectors
= numvecs
;
632 /* add 1 vector for link status interrupts */
634 adapter
->msix_entries
= kcalloc(numvecs
, sizeof(struct msix_entry
),
636 if (!adapter
->msix_entries
)
639 for (i
= 0; i
< numvecs
; i
++)
640 adapter
->msix_entries
[i
].entry
= i
;
642 err
= pci_enable_msix(adapter
->pdev
,
643 adapter
->msix_entries
,
648 igb_reset_interrupt_capability(adapter
);
650 /* If we can't do MSI-X, try MSI */
652 #ifdef CONFIG_PCI_IOV
653 /* disable SR-IOV for non MSI-X configurations */
654 if (adapter
->vf_data
) {
655 struct e1000_hw
*hw
= &adapter
->hw
;
656 /* disable iov and allow time for transactions to clear */
657 pci_disable_sriov(adapter
->pdev
);
660 kfree(adapter
->vf_data
);
661 adapter
->vf_data
= NULL
;
662 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
664 dev_info(&adapter
->pdev
->dev
, "IOV Disabled\n");
667 adapter
->vfs_allocated_count
= 0;
668 adapter
->flags
|= IGB_FLAG_QUEUE_PAIRS
;
669 adapter
->num_rx_queues
= 1;
670 adapter
->num_tx_queues
= 1;
671 adapter
->num_q_vectors
= 1;
672 if (!pci_enable_msi(adapter
->pdev
))
673 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
675 /* Notify the stack of the (possibly) reduced Tx Queue count. */
676 adapter
->netdev
->real_num_tx_queues
= adapter
->num_tx_queues
;
681 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
682 * @adapter: board private structure to initialize
684 * We allocate one q_vector per queue interrupt. If allocation fails we
687 static int igb_alloc_q_vectors(struct igb_adapter
*adapter
)
689 struct igb_q_vector
*q_vector
;
690 struct e1000_hw
*hw
= &adapter
->hw
;
693 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
694 q_vector
= kzalloc(sizeof(struct igb_q_vector
), GFP_KERNEL
);
697 q_vector
->adapter
= adapter
;
698 q_vector
->itr_shift
= (hw
->mac
.type
== e1000_82575
) ? 16 : 0;
699 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(0);
700 q_vector
->itr_val
= IGB_START_ITR
;
701 q_vector
->set_itr
= 1;
702 netif_napi_add(adapter
->netdev
, &q_vector
->napi
, igb_poll
, 64);
703 adapter
->q_vector
[v_idx
] = q_vector
;
710 q_vector
= adapter
->q_vector
[v_idx
];
711 netif_napi_del(&q_vector
->napi
);
713 adapter
->q_vector
[v_idx
] = NULL
;
718 static void igb_map_rx_ring_to_vector(struct igb_adapter
*adapter
,
719 int ring_idx
, int v_idx
)
721 struct igb_q_vector
*q_vector
;
723 q_vector
= adapter
->q_vector
[v_idx
];
724 q_vector
->rx_ring
= &adapter
->rx_ring
[ring_idx
];
725 q_vector
->rx_ring
->q_vector
= q_vector
;
726 q_vector
->itr_val
= adapter
->rx_itr_setting
;
727 if (q_vector
->itr_val
&& q_vector
->itr_val
<= 3)
728 q_vector
->itr_val
= IGB_START_ITR
;
731 static void igb_map_tx_ring_to_vector(struct igb_adapter
*adapter
,
732 int ring_idx
, int v_idx
)
734 struct igb_q_vector
*q_vector
;
736 q_vector
= adapter
->q_vector
[v_idx
];
737 q_vector
->tx_ring
= &adapter
->tx_ring
[ring_idx
];
738 q_vector
->tx_ring
->q_vector
= q_vector
;
739 q_vector
->itr_val
= adapter
->tx_itr_setting
;
740 if (q_vector
->itr_val
&& q_vector
->itr_val
<= 3)
741 q_vector
->itr_val
= IGB_START_ITR
;
745 * igb_map_ring_to_vector - maps allocated queues to vectors
747 * This function maps the recently allocated queues to vectors.
749 static int igb_map_ring_to_vector(struct igb_adapter
*adapter
)
754 if ((adapter
->num_q_vectors
< adapter
->num_rx_queues
) ||
755 (adapter
->num_q_vectors
< adapter
->num_tx_queues
))
758 if (adapter
->num_q_vectors
>=
759 (adapter
->num_rx_queues
+ adapter
->num_tx_queues
)) {
760 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
761 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
762 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
763 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
765 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
766 if (i
< adapter
->num_tx_queues
)
767 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
);
768 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
770 for (; i
< adapter
->num_tx_queues
; i
++)
771 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
777 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
779 * This function initializes the interrupts and allocates all of the queues.
781 static int igb_init_interrupt_scheme(struct igb_adapter
*adapter
)
783 struct pci_dev
*pdev
= adapter
->pdev
;
786 igb_set_interrupt_capability(adapter
);
788 err
= igb_alloc_q_vectors(adapter
);
790 dev_err(&pdev
->dev
, "Unable to allocate memory for vectors\n");
791 goto err_alloc_q_vectors
;
794 err
= igb_alloc_queues(adapter
);
796 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
797 goto err_alloc_queues
;
800 err
= igb_map_ring_to_vector(adapter
);
802 dev_err(&pdev
->dev
, "Invalid q_vector to ring mapping\n");
809 igb_free_queues(adapter
);
811 igb_free_q_vectors(adapter
);
813 igb_reset_interrupt_capability(adapter
);
818 * igb_request_irq - initialize interrupts
820 * Attempts to configure interrupts using the best available
821 * capabilities of the hardware and kernel.
823 static int igb_request_irq(struct igb_adapter
*adapter
)
825 struct net_device
*netdev
= adapter
->netdev
;
826 struct pci_dev
*pdev
= adapter
->pdev
;
827 struct e1000_hw
*hw
= &adapter
->hw
;
830 if (adapter
->msix_entries
) {
831 err
= igb_request_msix(adapter
);
834 /* fall back to MSI */
835 igb_clear_interrupt_scheme(adapter
);
836 if (!pci_enable_msi(adapter
->pdev
))
837 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
838 igb_free_all_tx_resources(adapter
);
839 igb_free_all_rx_resources(adapter
);
840 adapter
->num_tx_queues
= 1;
841 adapter
->num_rx_queues
= 1;
842 adapter
->num_q_vectors
= 1;
843 err
= igb_alloc_q_vectors(adapter
);
846 "Unable to allocate memory for vectors\n");
849 err
= igb_alloc_queues(adapter
);
852 "Unable to allocate memory for queues\n");
853 igb_free_q_vectors(adapter
);
856 igb_setup_all_tx_resources(adapter
);
857 igb_setup_all_rx_resources(adapter
);
859 switch (hw
->mac
.type
) {
861 wr32(E1000_MSIXBM(0),
862 (E1000_EICR_RX_QUEUE0
|
863 E1000_EICR_TX_QUEUE0
|
867 wr32(E1000_IVAR0
, E1000_IVAR_VALID
);
874 if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
875 err
= request_irq(adapter
->pdev
->irq
, &igb_intr_msi
, 0,
876 netdev
->name
, adapter
);
880 /* fall back to legacy interrupts */
881 igb_reset_interrupt_capability(adapter
);
882 adapter
->flags
&= ~IGB_FLAG_HAS_MSI
;
885 err
= request_irq(adapter
->pdev
->irq
, &igb_intr
, IRQF_SHARED
,
886 netdev
->name
, adapter
);
889 dev_err(&adapter
->pdev
->dev
, "Error %d getting interrupt\n",
896 static void igb_free_irq(struct igb_adapter
*adapter
)
898 if (adapter
->msix_entries
) {
901 free_irq(adapter
->msix_entries
[vector
++].vector
, adapter
);
903 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
904 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
905 free_irq(adapter
->msix_entries
[vector
++].vector
,
909 free_irq(adapter
->pdev
->irq
, adapter
);
914 * igb_irq_disable - Mask off interrupt generation on the NIC
915 * @adapter: board private structure
917 static void igb_irq_disable(struct igb_adapter
*adapter
)
919 struct e1000_hw
*hw
= &adapter
->hw
;
921 if (adapter
->msix_entries
) {
922 u32 regval
= rd32(E1000_EIAM
);
923 wr32(E1000_EIAM
, regval
& ~adapter
->eims_enable_mask
);
924 wr32(E1000_EIMC
, adapter
->eims_enable_mask
);
925 regval
= rd32(E1000_EIAC
);
926 wr32(E1000_EIAC
, regval
& ~adapter
->eims_enable_mask
);
932 synchronize_irq(adapter
->pdev
->irq
);
936 * igb_irq_enable - Enable default interrupt generation settings
937 * @adapter: board private structure
939 static void igb_irq_enable(struct igb_adapter
*adapter
)
941 struct e1000_hw
*hw
= &adapter
->hw
;
943 if (adapter
->msix_entries
) {
944 u32 regval
= rd32(E1000_EIAC
);
945 wr32(E1000_EIAC
, regval
| adapter
->eims_enable_mask
);
946 regval
= rd32(E1000_EIAM
);
947 wr32(E1000_EIAM
, regval
| adapter
->eims_enable_mask
);
948 wr32(E1000_EIMS
, adapter
->eims_enable_mask
);
949 if (adapter
->vfs_allocated_count
)
950 wr32(E1000_MBVFIMR
, 0xFF);
951 wr32(E1000_IMS
, (E1000_IMS_LSC
| E1000_IMS_VMMB
|
952 E1000_IMS_DOUTSYNC
));
954 wr32(E1000_IMS
, IMS_ENABLE_MASK
);
955 wr32(E1000_IAM
, IMS_ENABLE_MASK
);
959 static void igb_update_mng_vlan(struct igb_adapter
*adapter
)
961 struct e1000_hw
*hw
= &adapter
->hw
;
962 u16 vid
= adapter
->hw
.mng_cookie
.vlan_id
;
963 u16 old_vid
= adapter
->mng_vlan_id
;
965 if (hw
->mng_cookie
.status
& E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) {
966 /* add VID to filter table */
967 igb_vfta_set(hw
, vid
, true);
968 adapter
->mng_vlan_id
= vid
;
970 adapter
->mng_vlan_id
= IGB_MNG_VLAN_NONE
;
973 if ((old_vid
!= (u16
)IGB_MNG_VLAN_NONE
) &&
975 !vlan_group_get_device(adapter
->vlgrp
, old_vid
)) {
976 /* remove VID from filter table */
977 igb_vfta_set(hw
, old_vid
, false);
982 * igb_release_hw_control - release control of the h/w to f/w
983 * @adapter: address of board private structure
985 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
986 * For ASF and Pass Through versions of f/w this means that the
987 * driver is no longer loaded.
990 static void igb_release_hw_control(struct igb_adapter
*adapter
)
992 struct e1000_hw
*hw
= &adapter
->hw
;
995 /* Let firmware take over control of h/w */
996 ctrl_ext
= rd32(E1000_CTRL_EXT
);
998 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
1003 * igb_get_hw_control - get control of the h/w from f/w
1004 * @adapter: address of board private structure
1006 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1007 * For ASF and Pass Through versions of f/w this means that
1008 * the driver is loaded.
1011 static void igb_get_hw_control(struct igb_adapter
*adapter
)
1013 struct e1000_hw
*hw
= &adapter
->hw
;
1016 /* Let firmware know the driver has taken over */
1017 ctrl_ext
= rd32(E1000_CTRL_EXT
);
1018 wr32(E1000_CTRL_EXT
,
1019 ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
1023 * igb_configure - configure the hardware for RX and TX
1024 * @adapter: private board structure
1026 static void igb_configure(struct igb_adapter
*adapter
)
1028 struct net_device
*netdev
= adapter
->netdev
;
1031 igb_get_hw_control(adapter
);
1032 igb_set_rx_mode(netdev
);
1034 igb_restore_vlan(adapter
);
1036 igb_setup_tctl(adapter
);
1037 igb_setup_mrqc(adapter
);
1038 igb_setup_rctl(adapter
);
1040 igb_configure_tx(adapter
);
1041 igb_configure_rx(adapter
);
1043 igb_rx_fifo_flush_82575(&adapter
->hw
);
1045 /* call igb_desc_unused which always leaves
1046 * at least 1 descriptor unused to make sure
1047 * next_to_use != next_to_clean */
1048 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1049 struct igb_ring
*ring
= &adapter
->rx_ring
[i
];
1050 igb_alloc_rx_buffers_adv(ring
, igb_desc_unused(ring
));
1054 adapter
->tx_queue_len
= netdev
->tx_queue_len
;
1059 * igb_up - Open the interface and prepare it to handle traffic
1060 * @adapter: board private structure
1063 int igb_up(struct igb_adapter
*adapter
)
1065 struct e1000_hw
*hw
= &adapter
->hw
;
1068 /* hardware has been reset, we need to reload some things */
1069 igb_configure(adapter
);
1071 clear_bit(__IGB_DOWN
, &adapter
->state
);
1073 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1074 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1075 napi_enable(&q_vector
->napi
);
1077 if (adapter
->msix_entries
)
1078 igb_configure_msix(adapter
);
1080 /* Clear any pending interrupts. */
1082 igb_irq_enable(adapter
);
1084 /* notify VFs that reset has been completed */
1085 if (adapter
->vfs_allocated_count
) {
1086 u32 reg_data
= rd32(E1000_CTRL_EXT
);
1087 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
1088 wr32(E1000_CTRL_EXT
, reg_data
);
1091 netif_tx_start_all_queues(adapter
->netdev
);
1093 /* Fire a link change interrupt to start the watchdog. */
1094 wr32(E1000_ICS
, E1000_ICS_LSC
);
1098 void igb_down(struct igb_adapter
*adapter
)
1100 struct e1000_hw
*hw
= &adapter
->hw
;
1101 struct net_device
*netdev
= adapter
->netdev
;
1105 /* signal that we're down so the interrupt handler does not
1106 * reschedule our watchdog timer */
1107 set_bit(__IGB_DOWN
, &adapter
->state
);
1109 /* disable receives in the hardware */
1110 rctl
= rd32(E1000_RCTL
);
1111 wr32(E1000_RCTL
, rctl
& ~E1000_RCTL_EN
);
1112 /* flush and sleep below */
1114 netif_tx_stop_all_queues(netdev
);
1116 /* disable transmits in the hardware */
1117 tctl
= rd32(E1000_TCTL
);
1118 tctl
&= ~E1000_TCTL_EN
;
1119 wr32(E1000_TCTL
, tctl
);
1120 /* flush both disables and wait for them to finish */
1124 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1125 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1126 napi_disable(&q_vector
->napi
);
1129 igb_irq_disable(adapter
);
1131 del_timer_sync(&adapter
->watchdog_timer
);
1132 del_timer_sync(&adapter
->phy_info_timer
);
1134 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
1135 netif_carrier_off(netdev
);
1137 /* record the stats before reset*/
1138 igb_update_stats(adapter
);
1140 adapter
->link_speed
= 0;
1141 adapter
->link_duplex
= 0;
1143 if (!pci_channel_offline(adapter
->pdev
))
1145 igb_clean_all_tx_rings(adapter
);
1146 igb_clean_all_rx_rings(adapter
);
1147 #ifdef CONFIG_IGB_DCA
1149 /* since we reset the hardware DCA settings were cleared */
1150 igb_setup_dca(adapter
);
1154 void igb_reinit_locked(struct igb_adapter
*adapter
)
1156 WARN_ON(in_interrupt());
1157 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
1161 clear_bit(__IGB_RESETTING
, &adapter
->state
);
1164 void igb_reset(struct igb_adapter
*adapter
)
1166 struct e1000_hw
*hw
= &adapter
->hw
;
1167 struct e1000_mac_info
*mac
= &hw
->mac
;
1168 struct e1000_fc_info
*fc
= &hw
->fc
;
1169 u32 pba
= 0, tx_space
, min_tx_space
, min_rx_space
;
1172 /* Repartition Pba for greater than 9k mtu
1173 * To take effect CTRL.RST is required.
1175 switch (mac
->type
) {
1177 pba
= rd32(E1000_RXPBS
);
1178 pba
&= E1000_RXPBS_SIZE_MASK_82576
;
1182 pba
= E1000_PBA_34K
;
1186 if ((adapter
->max_frame_size
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) &&
1187 (mac
->type
< e1000_82576
)) {
1188 /* adjust PBA for jumbo frames */
1189 wr32(E1000_PBA
, pba
);
1191 /* To maintain wire speed transmits, the Tx FIFO should be
1192 * large enough to accommodate two full transmit packets,
1193 * rounded up to the next 1KB and expressed in KB. Likewise,
1194 * the Rx FIFO should be large enough to accommodate at least
1195 * one full receive packet and is similarly rounded up and
1196 * expressed in KB. */
1197 pba
= rd32(E1000_PBA
);
1198 /* upper 16 bits has Tx packet buffer allocation size in KB */
1199 tx_space
= pba
>> 16;
1200 /* lower 16 bits has Rx packet buffer allocation size in KB */
1202 /* the tx fifo also stores 16 bytes of information about the tx
1203 * but don't include ethernet FCS because hardware appends it */
1204 min_tx_space
= (adapter
->max_frame_size
+
1205 sizeof(union e1000_adv_tx_desc
) -
1207 min_tx_space
= ALIGN(min_tx_space
, 1024);
1208 min_tx_space
>>= 10;
1209 /* software strips receive CRC, so leave room for it */
1210 min_rx_space
= adapter
->max_frame_size
;
1211 min_rx_space
= ALIGN(min_rx_space
, 1024);
1212 min_rx_space
>>= 10;
1214 /* If current Tx allocation is less than the min Tx FIFO size,
1215 * and the min Tx FIFO size is less than the current Rx FIFO
1216 * allocation, take space away from current Rx allocation */
1217 if (tx_space
< min_tx_space
&&
1218 ((min_tx_space
- tx_space
) < pba
)) {
1219 pba
= pba
- (min_tx_space
- tx_space
);
1221 /* if short on rx space, rx wins and must trump tx
1223 if (pba
< min_rx_space
)
1226 wr32(E1000_PBA
, pba
);
1229 /* flow control settings */
1230 /* The high water mark must be low enough to fit one full frame
1231 * (or the size used for early receive) above it in the Rx FIFO.
1232 * Set it to the lower of:
1233 * - 90% of the Rx FIFO size, or
1234 * - the full Rx FIFO size minus one full frame */
1235 hwm
= min(((pba
<< 10) * 9 / 10),
1236 ((pba
<< 10) - 2 * adapter
->max_frame_size
));
1238 if (mac
->type
< e1000_82576
) {
1239 fc
->high_water
= hwm
& 0xFFF8; /* 8-byte granularity */
1240 fc
->low_water
= fc
->high_water
- 8;
1242 fc
->high_water
= hwm
& 0xFFF0; /* 16-byte granularity */
1243 fc
->low_water
= fc
->high_water
- 16;
1245 fc
->pause_time
= 0xFFFF;
1247 fc
->current_mode
= fc
->requested_mode
;
1249 /* disable receive for all VFs and wait one second */
1250 if (adapter
->vfs_allocated_count
) {
1252 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++)
1253 adapter
->vf_data
[i
].flags
= 0;
1255 /* ping all the active vfs to let them know we are going down */
1256 igb_ping_all_vfs(adapter
);
1258 /* disable transmits and receives */
1259 wr32(E1000_VFRE
, 0);
1260 wr32(E1000_VFTE
, 0);
1263 /* Allow time for pending master requests to run */
1264 adapter
->hw
.mac
.ops
.reset_hw(&adapter
->hw
);
1267 if (adapter
->hw
.mac
.ops
.init_hw(&adapter
->hw
))
1268 dev_err(&adapter
->pdev
->dev
, "Hardware Error\n");
1270 igb_update_mng_vlan(adapter
);
1272 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1273 wr32(E1000_VET
, ETHERNET_IEEE_VLAN_TYPE
);
1275 igb_reset_adaptive(&adapter
->hw
);
1276 igb_get_phy_info(&adapter
->hw
);
1279 static const struct net_device_ops igb_netdev_ops
= {
1280 .ndo_open
= igb_open
,
1281 .ndo_stop
= igb_close
,
1282 .ndo_start_xmit
= igb_xmit_frame_adv
,
1283 .ndo_get_stats
= igb_get_stats
,
1284 .ndo_set_rx_mode
= igb_set_rx_mode
,
1285 .ndo_set_multicast_list
= igb_set_rx_mode
,
1286 .ndo_set_mac_address
= igb_set_mac
,
1287 .ndo_change_mtu
= igb_change_mtu
,
1288 .ndo_do_ioctl
= igb_ioctl
,
1289 .ndo_tx_timeout
= igb_tx_timeout
,
1290 .ndo_validate_addr
= eth_validate_addr
,
1291 .ndo_vlan_rx_register
= igb_vlan_rx_register
,
1292 .ndo_vlan_rx_add_vid
= igb_vlan_rx_add_vid
,
1293 .ndo_vlan_rx_kill_vid
= igb_vlan_rx_kill_vid
,
1294 #ifdef CONFIG_NET_POLL_CONTROLLER
1295 .ndo_poll_controller
= igb_netpoll
,
1300 * igb_probe - Device Initialization Routine
1301 * @pdev: PCI device information struct
1302 * @ent: entry in igb_pci_tbl
1304 * Returns 0 on success, negative on failure
1306 * igb_probe initializes an adapter identified by a pci_dev structure.
1307 * The OS initialization, configuring of the adapter private structure,
1308 * and a hardware reset occur.
1310 static int __devinit
igb_probe(struct pci_dev
*pdev
,
1311 const struct pci_device_id
*ent
)
1313 struct net_device
*netdev
;
1314 struct igb_adapter
*adapter
;
1315 struct e1000_hw
*hw
;
1316 u16 eeprom_data
= 0;
1317 static int global_quad_port_a
; /* global quad port a indication */
1318 const struct e1000_info
*ei
= igb_info_tbl
[ent
->driver_data
];
1319 unsigned long mmio_start
, mmio_len
;
1320 int err
, pci_using_dac
;
1321 u16 eeprom_apme_mask
= IGB_EEPROM_APME
;
1324 err
= pci_enable_device_mem(pdev
);
1329 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1331 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1335 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1337 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1339 dev_err(&pdev
->dev
, "No usable DMA "
1340 "configuration, aborting\n");
1346 err
= pci_request_selected_regions(pdev
, pci_select_bars(pdev
,
1352 pci_enable_pcie_error_reporting(pdev
);
1354 pci_set_master(pdev
);
1355 pci_save_state(pdev
);
1358 netdev
= alloc_etherdev_mq(sizeof(struct igb_adapter
),
1359 IGB_ABS_MAX_TX_QUEUES
);
1361 goto err_alloc_etherdev
;
1363 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1365 pci_set_drvdata(pdev
, netdev
);
1366 adapter
= netdev_priv(netdev
);
1367 adapter
->netdev
= netdev
;
1368 adapter
->pdev
= pdev
;
1371 adapter
->msg_enable
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
;
1373 mmio_start
= pci_resource_start(pdev
, 0);
1374 mmio_len
= pci_resource_len(pdev
, 0);
1377 hw
->hw_addr
= ioremap(mmio_start
, mmio_len
);
1381 netdev
->netdev_ops
= &igb_netdev_ops
;
1382 igb_set_ethtool_ops(netdev
);
1383 netdev
->watchdog_timeo
= 5 * HZ
;
1385 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
1387 netdev
->mem_start
= mmio_start
;
1388 netdev
->mem_end
= mmio_start
+ mmio_len
;
1390 /* PCI config space info */
1391 hw
->vendor_id
= pdev
->vendor
;
1392 hw
->device_id
= pdev
->device
;
1393 hw
->revision_id
= pdev
->revision
;
1394 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
1395 hw
->subsystem_device_id
= pdev
->subsystem_device
;
1397 /* setup the private structure */
1399 /* Copy the default MAC, PHY and NVM function pointers */
1400 memcpy(&hw
->mac
.ops
, ei
->mac_ops
, sizeof(hw
->mac
.ops
));
1401 memcpy(&hw
->phy
.ops
, ei
->phy_ops
, sizeof(hw
->phy
.ops
));
1402 memcpy(&hw
->nvm
.ops
, ei
->nvm_ops
, sizeof(hw
->nvm
.ops
));
1403 /* Initialize skew-specific constants */
1404 err
= ei
->get_invariants(hw
);
1408 /* setup the private structure */
1409 err
= igb_sw_init(adapter
);
1413 igb_get_bus_info_pcie(hw
);
1415 hw
->phy
.autoneg_wait_to_complete
= false;
1416 hw
->mac
.adaptive_ifs
= true;
1418 /* Copper options */
1419 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
1420 hw
->phy
.mdix
= AUTO_ALL_MODES
;
1421 hw
->phy
.disable_polarity_correction
= false;
1422 hw
->phy
.ms_type
= e1000_ms_hw_default
;
1425 if (igb_check_reset_block(hw
))
1426 dev_info(&pdev
->dev
,
1427 "PHY reset is blocked due to SOL/IDER session.\n");
1429 netdev
->features
= NETIF_F_SG
|
1431 NETIF_F_HW_VLAN_TX
|
1432 NETIF_F_HW_VLAN_RX
|
1433 NETIF_F_HW_VLAN_FILTER
;
1435 netdev
->features
|= NETIF_F_IPV6_CSUM
;
1436 netdev
->features
|= NETIF_F_TSO
;
1437 netdev
->features
|= NETIF_F_TSO6
;
1439 netdev
->features
|= NETIF_F_GRO
;
1441 netdev
->vlan_features
|= NETIF_F_TSO
;
1442 netdev
->vlan_features
|= NETIF_F_TSO6
;
1443 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
1444 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
1445 netdev
->vlan_features
|= NETIF_F_SG
;
1448 netdev
->features
|= NETIF_F_HIGHDMA
;
1450 if (adapter
->hw
.mac
.type
== e1000_82576
)
1451 netdev
->features
|= NETIF_F_SCTP_CSUM
;
1453 adapter
->en_mng_pt
= igb_enable_mng_pass_thru(&adapter
->hw
);
1455 /* before reading the NVM, reset the controller to put the device in a
1456 * known good starting state */
1457 hw
->mac
.ops
.reset_hw(hw
);
1459 /* make sure the NVM is good */
1460 if (igb_validate_nvm_checksum(hw
) < 0) {
1461 dev_err(&pdev
->dev
, "The NVM Checksum Is Not Valid\n");
1466 /* copy the MAC address out of the NVM */
1467 if (hw
->mac
.ops
.read_mac_addr(hw
))
1468 dev_err(&pdev
->dev
, "NVM Read Error\n");
1470 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1471 memcpy(netdev
->perm_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1473 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
1474 dev_err(&pdev
->dev
, "Invalid MAC Address\n");
1479 setup_timer(&adapter
->watchdog_timer
, &igb_watchdog
,
1480 (unsigned long) adapter
);
1481 setup_timer(&adapter
->phy_info_timer
, &igb_update_phy_info
,
1482 (unsigned long) adapter
);
1484 INIT_WORK(&adapter
->reset_task
, igb_reset_task
);
1485 INIT_WORK(&adapter
->watchdog_task
, igb_watchdog_task
);
1487 /* Initialize link properties that are user-changeable */
1488 adapter
->fc_autoneg
= true;
1489 hw
->mac
.autoneg
= true;
1490 hw
->phy
.autoneg_advertised
= 0x2f;
1492 hw
->fc
.requested_mode
= e1000_fc_default
;
1493 hw
->fc
.current_mode
= e1000_fc_default
;
1495 igb_validate_mdi_setting(hw
);
1497 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1498 * enable the ACPI Magic Packet filter
1501 if (hw
->bus
.func
== 0)
1502 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_A
, 1, &eeprom_data
);
1503 else if (hw
->bus
.func
== 1)
1504 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_B
, 1, &eeprom_data
);
1506 if (eeprom_data
& eeprom_apme_mask
)
1507 adapter
->eeprom_wol
|= E1000_WUFC_MAG
;
1509 /* now that we have the eeprom settings, apply the special cases where
1510 * the eeprom may be wrong or the board simply won't support wake on
1511 * lan on a particular port */
1512 switch (pdev
->device
) {
1513 case E1000_DEV_ID_82575GB_QUAD_COPPER
:
1514 adapter
->eeprom_wol
= 0;
1516 case E1000_DEV_ID_82575EB_FIBER_SERDES
:
1517 case E1000_DEV_ID_82576_FIBER
:
1518 case E1000_DEV_ID_82576_SERDES
:
1519 /* Wake events only supported on port A for dual fiber
1520 * regardless of eeprom setting */
1521 if (rd32(E1000_STATUS
) & E1000_STATUS_FUNC_1
)
1522 adapter
->eeprom_wol
= 0;
1524 case E1000_DEV_ID_82576_QUAD_COPPER
:
1525 /* if quad port adapter, disable WoL on all but port A */
1526 if (global_quad_port_a
!= 0)
1527 adapter
->eeprom_wol
= 0;
1529 adapter
->flags
|= IGB_FLAG_QUAD_PORT_A
;
1530 /* Reset for multiple quad port adapters */
1531 if (++global_quad_port_a
== 4)
1532 global_quad_port_a
= 0;
1536 /* initialize the wol settings based on the eeprom settings */
1537 adapter
->wol
= adapter
->eeprom_wol
;
1538 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
1540 /* reset the hardware with the new settings */
1543 /* let the f/w know that the h/w is now under the control of the
1545 igb_get_hw_control(adapter
);
1547 strcpy(netdev
->name
, "eth%d");
1548 err
= register_netdev(netdev
);
1552 /* carrier off reporting is important to ethtool even BEFORE open */
1553 netif_carrier_off(netdev
);
1555 #ifdef CONFIG_IGB_DCA
1556 if (dca_add_requester(&pdev
->dev
) == 0) {
1557 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
1558 dev_info(&pdev
->dev
, "DCA enabled\n");
1559 igb_setup_dca(adapter
);
1564 switch (hw
->mac
.type
) {
1567 * Initialize hardware timer: we keep it running just in case
1568 * that some program needs it later on.
1570 memset(&adapter
->cycles
, 0, sizeof(adapter
->cycles
));
1571 adapter
->cycles
.read
= igb_read_clock
;
1572 adapter
->cycles
.mask
= CLOCKSOURCE_MASK(64);
1573 adapter
->cycles
.mult
= 1;
1575 * Scale the NIC clock cycle by a large factor so that
1576 * relatively small clock corrections can be added or
1577 * substracted at each clock tick. The drawbacks of a large
1578 * factor are a) that the clock register overflows more quickly
1579 * (not such a big deal) and b) that the increment per tick has
1580 * to fit into 24 bits. As a result we need to use a shift of
1581 * 19 so we can fit a value of 16 into the TIMINCA register.
1583 adapter
->cycles
.shift
= IGB_82576_TSYNC_SHIFT
;
1585 (1 << E1000_TIMINCA_16NS_SHIFT
) |
1586 (16 << IGB_82576_TSYNC_SHIFT
));
1588 /* Set registers so that rollover occurs soon to test this. */
1589 wr32(E1000_SYSTIML
, 0x00000000);
1590 wr32(E1000_SYSTIMH
, 0xFF800000);
1593 timecounter_init(&adapter
->clock
,
1595 ktime_to_ns(ktime_get_real()));
1597 * Synchronize our NIC clock against system wall clock. NIC
1598 * time stamp reading requires ~3us per sample, each sample
1599 * was pretty stable even under load => only require 10
1600 * samples for each offset comparison.
1602 memset(&adapter
->compare
, 0, sizeof(adapter
->compare
));
1603 adapter
->compare
.source
= &adapter
->clock
;
1604 adapter
->compare
.target
= ktime_get_real
;
1605 adapter
->compare
.num_samples
= 10;
1606 timecompare_update(&adapter
->compare
, 0);
1609 /* 82575 does not support timesync */
1614 dev_info(&pdev
->dev
, "Intel(R) Gigabit Ethernet Network Connection\n");
1615 /* print bus type/speed/width info */
1616 dev_info(&pdev
->dev
, "%s: (PCIe:%s:%s) %pM\n",
1618 ((hw
->bus
.speed
== e1000_bus_speed_2500
)
1619 ? "2.5Gb/s" : "unknown"),
1620 ((hw
->bus
.width
== e1000_bus_width_pcie_x4
) ? "Width x4" :
1621 (hw
->bus
.width
== e1000_bus_width_pcie_x2
) ? "Width x2" :
1622 (hw
->bus
.width
== e1000_bus_width_pcie_x1
) ? "Width x1" :
1626 igb_read_part_num(hw
, &part_num
);
1627 dev_info(&pdev
->dev
, "%s: PBA No: %06x-%03x\n", netdev
->name
,
1628 (part_num
>> 8), (part_num
& 0xff));
1630 dev_info(&pdev
->dev
,
1631 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1632 adapter
->msix_entries
? "MSI-X" :
1633 (adapter
->flags
& IGB_FLAG_HAS_MSI
) ? "MSI" : "legacy",
1634 adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1639 igb_release_hw_control(adapter
);
1641 if (!igb_check_reset_block(hw
))
1644 if (hw
->flash_address
)
1645 iounmap(hw
->flash_address
);
1647 igb_clear_interrupt_scheme(adapter
);
1648 iounmap(hw
->hw_addr
);
1650 free_netdev(netdev
);
1652 pci_release_selected_regions(pdev
, pci_select_bars(pdev
,
1656 pci_disable_device(pdev
);
1661 * igb_remove - Device Removal Routine
1662 * @pdev: PCI device information struct
1664 * igb_remove is called by the PCI subsystem to alert the driver
1665 * that it should release a PCI device. The could be caused by a
1666 * Hot-Plug event, or because the driver is going to be removed from
1669 static void __devexit
igb_remove(struct pci_dev
*pdev
)
1671 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1672 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1673 struct e1000_hw
*hw
= &adapter
->hw
;
1675 /* flush_scheduled work may reschedule our watchdog task, so
1676 * explicitly disable watchdog tasks from being rescheduled */
1677 set_bit(__IGB_DOWN
, &adapter
->state
);
1678 del_timer_sync(&adapter
->watchdog_timer
);
1679 del_timer_sync(&adapter
->phy_info_timer
);
1681 flush_scheduled_work();
1683 #ifdef CONFIG_IGB_DCA
1684 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
1685 dev_info(&pdev
->dev
, "DCA disabled\n");
1686 dca_remove_requester(&pdev
->dev
);
1687 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
1688 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
1692 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1693 * would have already happened in close and is redundant. */
1694 igb_release_hw_control(adapter
);
1696 unregister_netdev(netdev
);
1698 if (!igb_check_reset_block(&adapter
->hw
))
1699 igb_reset_phy(&adapter
->hw
);
1701 igb_clear_interrupt_scheme(adapter
);
1703 #ifdef CONFIG_PCI_IOV
1704 /* reclaim resources allocated to VFs */
1705 if (adapter
->vf_data
) {
1706 /* disable iov and allow time for transactions to clear */
1707 pci_disable_sriov(pdev
);
1710 kfree(adapter
->vf_data
);
1711 adapter
->vf_data
= NULL
;
1712 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
1714 dev_info(&pdev
->dev
, "IOV Disabled\n");
1717 iounmap(hw
->hw_addr
);
1718 if (hw
->flash_address
)
1719 iounmap(hw
->flash_address
);
1720 pci_release_selected_regions(pdev
, pci_select_bars(pdev
,
1723 free_netdev(netdev
);
1725 pci_disable_pcie_error_reporting(pdev
);
1727 pci_disable_device(pdev
);
1731 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1732 * @adapter: board private structure to initialize
1734 * This function initializes the vf specific data storage and then attempts to
1735 * allocate the VFs. The reason for ordering it this way is because it is much
1736 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1737 * the memory for the VFs.
1739 static void __devinit
igb_probe_vfs(struct igb_adapter
* adapter
)
1741 #ifdef CONFIG_PCI_IOV
1742 struct pci_dev
*pdev
= adapter
->pdev
;
1744 if (adapter
->vfs_allocated_count
> 7)
1745 adapter
->vfs_allocated_count
= 7;
1747 if (adapter
->vfs_allocated_count
) {
1748 adapter
->vf_data
= kcalloc(adapter
->vfs_allocated_count
,
1749 sizeof(struct vf_data_storage
),
1751 /* if allocation failed then we do not support SR-IOV */
1752 if (!adapter
->vf_data
) {
1753 adapter
->vfs_allocated_count
= 0;
1754 dev_err(&pdev
->dev
, "Unable to allocate memory for VF "
1759 if (pci_enable_sriov(pdev
, adapter
->vfs_allocated_count
)) {
1760 kfree(adapter
->vf_data
);
1761 adapter
->vf_data
= NULL
;
1762 #endif /* CONFIG_PCI_IOV */
1763 adapter
->vfs_allocated_count
= 0;
1764 #ifdef CONFIG_PCI_IOV
1766 unsigned char mac_addr
[ETH_ALEN
];
1768 dev_info(&pdev
->dev
, "%d vfs allocated\n",
1769 adapter
->vfs_allocated_count
);
1770 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
1771 random_ether_addr(mac_addr
);
1772 igb_set_vf_mac(adapter
, i
, mac_addr
);
1775 #endif /* CONFIG_PCI_IOV */
1779 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1780 * @adapter: board private structure to initialize
1782 * igb_sw_init initializes the Adapter private data structure.
1783 * Fields are initialized based on PCI device information and
1784 * OS network device settings (MTU size).
1786 static int __devinit
igb_sw_init(struct igb_adapter
*adapter
)
1788 struct e1000_hw
*hw
= &adapter
->hw
;
1789 struct net_device
*netdev
= adapter
->netdev
;
1790 struct pci_dev
*pdev
= adapter
->pdev
;
1792 pci_read_config_word(pdev
, PCI_COMMAND
, &hw
->bus
.pci_cmd_word
);
1794 adapter
->tx_ring_count
= IGB_DEFAULT_TXD
;
1795 adapter
->rx_ring_count
= IGB_DEFAULT_RXD
;
1796 adapter
->rx_itr_setting
= IGB_DEFAULT_ITR
;
1797 adapter
->tx_itr_setting
= IGB_DEFAULT_ITR
;
1799 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1800 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
1802 #ifdef CONFIG_PCI_IOV
1803 if (hw
->mac
.type
== e1000_82576
)
1804 adapter
->vfs_allocated_count
= max_vfs
;
1806 #endif /* CONFIG_PCI_IOV */
1807 /* This call may decrease the number of queues */
1808 if (igb_init_interrupt_scheme(adapter
)) {
1809 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
1813 igb_probe_vfs(adapter
);
1815 /* Explicitly disable IRQ since the NIC can be in any state. */
1816 igb_irq_disable(adapter
);
1818 set_bit(__IGB_DOWN
, &adapter
->state
);
1823 * igb_open - Called when a network interface is made active
1824 * @netdev: network interface device structure
1826 * Returns 0 on success, negative value on failure
1828 * The open entry point is called when a network interface is made
1829 * active by the system (IFF_UP). At this point all resources needed
1830 * for transmit and receive operations are allocated, the interrupt
1831 * handler is registered with the OS, the watchdog timer is started,
1832 * and the stack is notified that the interface is ready.
1834 static int igb_open(struct net_device
*netdev
)
1836 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1837 struct e1000_hw
*hw
= &adapter
->hw
;
1841 /* disallow open during test */
1842 if (test_bit(__IGB_TESTING
, &adapter
->state
))
1845 netif_carrier_off(netdev
);
1847 /* allocate transmit descriptors */
1848 err
= igb_setup_all_tx_resources(adapter
);
1852 /* allocate receive descriptors */
1853 err
= igb_setup_all_rx_resources(adapter
);
1857 /* e1000_power_up_phy(adapter); */
1859 /* before we allocate an interrupt, we must be ready to handle it.
1860 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1861 * as soon as we call pci_request_irq, so we have to setup our
1862 * clean_rx handler before we do so. */
1863 igb_configure(adapter
);
1865 err
= igb_request_irq(adapter
);
1869 /* From here on the code is the same as igb_up() */
1870 clear_bit(__IGB_DOWN
, &adapter
->state
);
1872 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1873 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1874 napi_enable(&q_vector
->napi
);
1877 /* Clear any pending interrupts. */
1880 igb_irq_enable(adapter
);
1882 /* notify VFs that reset has been completed */
1883 if (adapter
->vfs_allocated_count
) {
1884 u32 reg_data
= rd32(E1000_CTRL_EXT
);
1885 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
1886 wr32(E1000_CTRL_EXT
, reg_data
);
1889 netif_tx_start_all_queues(netdev
);
1891 /* Fire a link status change interrupt to start the watchdog. */
1892 wr32(E1000_ICS
, E1000_ICS_LSC
);
1897 igb_release_hw_control(adapter
);
1898 /* e1000_power_down_phy(adapter); */
1899 igb_free_all_rx_resources(adapter
);
1901 igb_free_all_tx_resources(adapter
);
1909 * igb_close - Disables a network interface
1910 * @netdev: network interface device structure
1912 * Returns 0, this is not allowed to fail
1914 * The close entry point is called when an interface is de-activated
1915 * by the OS. The hardware is still under the driver's control, but
1916 * needs to be disabled. A global MAC reset is issued to stop the
1917 * hardware, and all transmit and receive resources are freed.
1919 static int igb_close(struct net_device
*netdev
)
1921 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1923 WARN_ON(test_bit(__IGB_RESETTING
, &adapter
->state
));
1926 igb_free_irq(adapter
);
1928 igb_free_all_tx_resources(adapter
);
1929 igb_free_all_rx_resources(adapter
);
1935 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1936 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1938 * Return 0 on success, negative on failure
1940 int igb_setup_tx_resources(struct igb_ring
*tx_ring
)
1942 struct pci_dev
*pdev
= tx_ring
->pdev
;
1945 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
1946 tx_ring
->buffer_info
= vmalloc(size
);
1947 if (!tx_ring
->buffer_info
)
1949 memset(tx_ring
->buffer_info
, 0, size
);
1951 /* round up to nearest 4K */
1952 tx_ring
->size
= tx_ring
->count
* sizeof(union e1000_adv_tx_desc
);
1953 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
1955 tx_ring
->desc
= pci_alloc_consistent(pdev
,
1962 tx_ring
->next_to_use
= 0;
1963 tx_ring
->next_to_clean
= 0;
1967 vfree(tx_ring
->buffer_info
);
1969 "Unable to allocate memory for the transmit descriptor ring\n");
1974 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1975 * (Descriptors) for all queues
1976 * @adapter: board private structure
1978 * Return 0 on success, negative on failure
1980 static int igb_setup_all_tx_resources(struct igb_adapter
*adapter
)
1982 struct pci_dev
*pdev
= adapter
->pdev
;
1985 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1986 err
= igb_setup_tx_resources(&adapter
->tx_ring
[i
]);
1989 "Allocation for Tx Queue %u failed\n", i
);
1990 for (i
--; i
>= 0; i
--)
1991 igb_free_tx_resources(&adapter
->tx_ring
[i
]);
1996 for (i
= 0; i
< IGB_MAX_TX_QUEUES
; i
++) {
1997 int r_idx
= i
% adapter
->num_tx_queues
;
1998 adapter
->multi_tx_table
[i
] = &adapter
->tx_ring
[r_idx
];
2004 * igb_setup_tctl - configure the transmit control registers
2005 * @adapter: Board private structure
2007 void igb_setup_tctl(struct igb_adapter
*adapter
)
2009 struct e1000_hw
*hw
= &adapter
->hw
;
2012 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2013 wr32(E1000_TXDCTL(0), 0);
2015 /* Program the Transmit Control Register */
2016 tctl
= rd32(E1000_TCTL
);
2017 tctl
&= ~E1000_TCTL_CT
;
2018 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
2019 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
2021 igb_config_collision_dist(hw
);
2023 /* Enable transmits */
2024 tctl
|= E1000_TCTL_EN
;
2026 wr32(E1000_TCTL
, tctl
);
2030 * igb_configure_tx_ring - Configure transmit ring after Reset
2031 * @adapter: board private structure
2032 * @ring: tx ring to configure
2034 * Configure a transmit ring after a reset.
2036 void igb_configure_tx_ring(struct igb_adapter
*adapter
,
2037 struct igb_ring
*ring
)
2039 struct e1000_hw
*hw
= &adapter
->hw
;
2041 u64 tdba
= ring
->dma
;
2042 int reg_idx
= ring
->reg_idx
;
2044 /* disable the queue */
2045 txdctl
= rd32(E1000_TXDCTL(reg_idx
));
2046 wr32(E1000_TXDCTL(reg_idx
),
2047 txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
2051 wr32(E1000_TDLEN(reg_idx
),
2052 ring
->count
* sizeof(union e1000_adv_tx_desc
));
2053 wr32(E1000_TDBAL(reg_idx
),
2054 tdba
& 0x00000000ffffffffULL
);
2055 wr32(E1000_TDBAH(reg_idx
), tdba
>> 32);
2057 ring
->head
= hw
->hw_addr
+ E1000_TDH(reg_idx
);
2058 ring
->tail
= hw
->hw_addr
+ E1000_TDT(reg_idx
);
2059 writel(0, ring
->head
);
2060 writel(0, ring
->tail
);
2062 txdctl
|= IGB_TX_PTHRESH
;
2063 txdctl
|= IGB_TX_HTHRESH
<< 8;
2064 txdctl
|= IGB_TX_WTHRESH
<< 16;
2066 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
2067 wr32(E1000_TXDCTL(reg_idx
), txdctl
);
2071 * igb_configure_tx - Configure transmit Unit after Reset
2072 * @adapter: board private structure
2074 * Configure the Tx unit of the MAC after a reset.
2076 static void igb_configure_tx(struct igb_adapter
*adapter
)
2080 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2081 igb_configure_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
2085 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2086 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2088 * Returns 0 on success, negative on failure
2090 int igb_setup_rx_resources(struct igb_ring
*rx_ring
)
2092 struct pci_dev
*pdev
= rx_ring
->pdev
;
2095 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
2096 rx_ring
->buffer_info
= vmalloc(size
);
2097 if (!rx_ring
->buffer_info
)
2099 memset(rx_ring
->buffer_info
, 0, size
);
2101 desc_len
= sizeof(union e1000_adv_rx_desc
);
2103 /* Round up to nearest 4K */
2104 rx_ring
->size
= rx_ring
->count
* desc_len
;
2105 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2107 rx_ring
->desc
= pci_alloc_consistent(pdev
, rx_ring
->size
,
2113 rx_ring
->next_to_clean
= 0;
2114 rx_ring
->next_to_use
= 0;
2119 vfree(rx_ring
->buffer_info
);
2120 rx_ring
->buffer_info
= NULL
;
2121 dev_err(&pdev
->dev
, "Unable to allocate memory for "
2122 "the receive descriptor ring\n");
2127 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2128 * (Descriptors) for all queues
2129 * @adapter: board private structure
2131 * Return 0 on success, negative on failure
2133 static int igb_setup_all_rx_resources(struct igb_adapter
*adapter
)
2135 struct pci_dev
*pdev
= adapter
->pdev
;
2138 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2139 err
= igb_setup_rx_resources(&adapter
->rx_ring
[i
]);
2142 "Allocation for Rx Queue %u failed\n", i
);
2143 for (i
--; i
>= 0; i
--)
2144 igb_free_rx_resources(&adapter
->rx_ring
[i
]);
2153 * igb_setup_mrqc - configure the multiple receive queue control registers
2154 * @adapter: Board private structure
2156 static void igb_setup_mrqc(struct igb_adapter
*adapter
)
2158 struct e1000_hw
*hw
= &adapter
->hw
;
2160 u32 j
, num_rx_queues
, shift
= 0, shift2
= 0;
2165 static const u8 rsshash
[40] = {
2166 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2167 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2168 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2169 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2171 /* Fill out hash function seeds */
2172 for (j
= 0; j
< 10; j
++) {
2173 u32 rsskey
= rsshash
[(j
* 4)];
2174 rsskey
|= rsshash
[(j
* 4) + 1] << 8;
2175 rsskey
|= rsshash
[(j
* 4) + 2] << 16;
2176 rsskey
|= rsshash
[(j
* 4) + 3] << 24;
2177 array_wr32(E1000_RSSRK(0), j
, rsskey
);
2180 num_rx_queues
= adapter
->num_rx_queues
;
2182 if (adapter
->vfs_allocated_count
) {
2183 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2184 switch (hw
->mac
.type
) {
2196 if (hw
->mac
.type
== e1000_82575
)
2200 for (j
= 0; j
< (32 * 4); j
++) {
2201 reta
.bytes
[j
& 3] = (j
% num_rx_queues
) << shift
;
2203 reta
.bytes
[j
& 3] |= num_rx_queues
<< shift2
;
2205 wr32(E1000_RETA(j
>> 2), reta
.dword
);
2209 * Disable raw packet checksumming so that RSS hash is placed in
2210 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2211 * offloads as they are enabled by default
2213 rxcsum
= rd32(E1000_RXCSUM
);
2214 rxcsum
|= E1000_RXCSUM_PCSD
;
2216 if (adapter
->hw
.mac
.type
>= e1000_82576
)
2217 /* Enable Receive Checksum Offload for SCTP */
2218 rxcsum
|= E1000_RXCSUM_CRCOFL
;
2220 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2221 wr32(E1000_RXCSUM
, rxcsum
);
2223 /* If VMDq is enabled then we set the appropriate mode for that, else
2224 * we default to RSS so that an RSS hash is calculated per packet even
2225 * if we are only using one queue */
2226 if (adapter
->vfs_allocated_count
) {
2227 if (hw
->mac
.type
> e1000_82575
) {
2228 /* Set the default pool for the PF's first queue */
2229 u32 vtctl
= rd32(E1000_VT_CTL
);
2230 vtctl
&= ~(E1000_VT_CTL_DEFAULT_POOL_MASK
|
2231 E1000_VT_CTL_DISABLE_DEF_POOL
);
2232 vtctl
|= adapter
->vfs_allocated_count
<<
2233 E1000_VT_CTL_DEFAULT_POOL_SHIFT
;
2234 wr32(E1000_VT_CTL
, vtctl
);
2236 if (adapter
->num_rx_queues
> 1)
2237 mrqc
= E1000_MRQC_ENABLE_VMDQ_RSS_2Q
;
2239 mrqc
= E1000_MRQC_ENABLE_VMDQ
;
2241 mrqc
= E1000_MRQC_ENABLE_RSS_4Q
;
2243 igb_vmm_control(adapter
);
2245 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV4
|
2246 E1000_MRQC_RSS_FIELD_IPV4_TCP
);
2247 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV6
|
2248 E1000_MRQC_RSS_FIELD_IPV6_TCP
);
2249 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV4_UDP
|
2250 E1000_MRQC_RSS_FIELD_IPV6_UDP
);
2251 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX
|
2252 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX
);
2254 wr32(E1000_MRQC
, mrqc
);
2258 * igb_setup_rctl - configure the receive control registers
2259 * @adapter: Board private structure
2261 void igb_setup_rctl(struct igb_adapter
*adapter
)
2263 struct e1000_hw
*hw
= &adapter
->hw
;
2266 rctl
= rd32(E1000_RCTL
);
2268 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
2269 rctl
&= ~(E1000_RCTL_LBM_TCVR
| E1000_RCTL_LBM_MAC
);
2271 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
| E1000_RCTL_RDMTS_HALF
|
2272 (hw
->mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
2275 * enable stripping of CRC. It's unlikely this will break BMC
2276 * redirection as it did with e1000. Newer features require
2277 * that the HW strips the CRC.
2279 rctl
|= E1000_RCTL_SECRC
;
2282 * disable store bad packets and clear size bits.
2284 rctl
&= ~(E1000_RCTL_SBP
| E1000_RCTL_SZ_256
);
2286 /* enable LPE to prevent packets larger than max_frame_size */
2287 rctl
|= E1000_RCTL_LPE
;
2289 /* disable queue 0 to prevent tail write w/o re-config */
2290 wr32(E1000_RXDCTL(0), 0);
2292 /* Attention!!! For SR-IOV PF driver operations you must enable
2293 * queue drop for all VF and PF queues to prevent head of line blocking
2294 * if an un-trusted VF does not provide descriptors to hardware.
2296 if (adapter
->vfs_allocated_count
) {
2297 /* set all queue drop enable bits */
2298 wr32(E1000_QDE
, ALL_QUEUES
);
2301 wr32(E1000_RCTL
, rctl
);
2304 static inline int igb_set_vf_rlpml(struct igb_adapter
*adapter
, int size
,
2307 struct e1000_hw
*hw
= &adapter
->hw
;
2310 /* if it isn't the PF check to see if VFs are enabled and
2311 * increase the size to support vlan tags */
2312 if (vfn
< adapter
->vfs_allocated_count
&&
2313 adapter
->vf_data
[vfn
].vlans_enabled
)
2314 size
+= VLAN_TAG_SIZE
;
2316 vmolr
= rd32(E1000_VMOLR(vfn
));
2317 vmolr
&= ~E1000_VMOLR_RLPML_MASK
;
2318 vmolr
|= size
| E1000_VMOLR_LPE
;
2319 wr32(E1000_VMOLR(vfn
), vmolr
);
2325 * igb_rlpml_set - set maximum receive packet size
2326 * @adapter: board private structure
2328 * Configure maximum receivable packet size.
2330 static void igb_rlpml_set(struct igb_adapter
*adapter
)
2332 u32 max_frame_size
= adapter
->max_frame_size
;
2333 struct e1000_hw
*hw
= &adapter
->hw
;
2334 u16 pf_id
= adapter
->vfs_allocated_count
;
2337 max_frame_size
+= VLAN_TAG_SIZE
;
2339 /* if vfs are enabled we set RLPML to the largest possible request
2340 * size and set the VMOLR RLPML to the size we need */
2342 igb_set_vf_rlpml(adapter
, max_frame_size
, pf_id
);
2343 max_frame_size
= MAX_JUMBO_FRAME_SIZE
;
2346 wr32(E1000_RLPML
, max_frame_size
);
2349 static inline void igb_set_vmolr(struct igb_adapter
*adapter
, int vfn
)
2351 struct e1000_hw
*hw
= &adapter
->hw
;
2355 * This register exists only on 82576 and newer so if we are older then
2356 * we should exit and do nothing
2358 if (hw
->mac
.type
< e1000_82576
)
2361 vmolr
= rd32(E1000_VMOLR(vfn
));
2362 vmolr
|= E1000_VMOLR_AUPE
| /* Accept untagged packets */
2363 E1000_VMOLR_STRVLAN
; /* Strip vlan tags */
2365 /* clear all bits that might not be set */
2366 vmolr
&= ~(E1000_VMOLR_BAM
| E1000_VMOLR_RSSE
);
2368 if (adapter
->num_rx_queues
> 1 && vfn
== adapter
->vfs_allocated_count
)
2369 vmolr
|= E1000_VMOLR_RSSE
; /* enable RSS */
2371 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2374 if (vfn
<= adapter
->vfs_allocated_count
)
2375 vmolr
|= E1000_VMOLR_BAM
; /* Accept broadcast */
2377 wr32(E1000_VMOLR(vfn
), vmolr
);
2381 * igb_configure_rx_ring - Configure a receive ring after Reset
2382 * @adapter: board private structure
2383 * @ring: receive ring to be configured
2385 * Configure the Rx unit of the MAC after a reset.
2387 void igb_configure_rx_ring(struct igb_adapter
*adapter
,
2388 struct igb_ring
*ring
)
2390 struct e1000_hw
*hw
= &adapter
->hw
;
2391 u64 rdba
= ring
->dma
;
2392 int reg_idx
= ring
->reg_idx
;
2395 /* disable the queue */
2396 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
2397 wr32(E1000_RXDCTL(reg_idx
),
2398 rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
2400 /* Set DMA base address registers */
2401 wr32(E1000_RDBAL(reg_idx
),
2402 rdba
& 0x00000000ffffffffULL
);
2403 wr32(E1000_RDBAH(reg_idx
), rdba
>> 32);
2404 wr32(E1000_RDLEN(reg_idx
),
2405 ring
->count
* sizeof(union e1000_adv_rx_desc
));
2407 /* initialize head and tail */
2408 ring
->head
= hw
->hw_addr
+ E1000_RDH(reg_idx
);
2409 ring
->tail
= hw
->hw_addr
+ E1000_RDT(reg_idx
);
2410 writel(0, ring
->head
);
2411 writel(0, ring
->tail
);
2413 /* set descriptor configuration */
2414 if (ring
->rx_buffer_len
< IGB_RXBUFFER_1024
) {
2415 srrctl
= ALIGN(ring
->rx_buffer_len
, 64) <<
2416 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
2417 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2418 srrctl
|= IGB_RXBUFFER_16384
>>
2419 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2421 srrctl
|= (PAGE_SIZE
/ 2) >>
2422 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2424 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
2426 srrctl
= ALIGN(ring
->rx_buffer_len
, 1024) >>
2427 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2428 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
2431 wr32(E1000_SRRCTL(reg_idx
), srrctl
);
2433 /* set filtering for VMDQ pools */
2434 igb_set_vmolr(adapter
, reg_idx
& 0x7);
2436 /* enable receive descriptor fetching */
2437 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
2438 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
2439 rxdctl
&= 0xFFF00000;
2440 rxdctl
|= IGB_RX_PTHRESH
;
2441 rxdctl
|= IGB_RX_HTHRESH
<< 8;
2442 rxdctl
|= IGB_RX_WTHRESH
<< 16;
2443 wr32(E1000_RXDCTL(reg_idx
), rxdctl
);
2447 * igb_configure_rx - Configure receive Unit after Reset
2448 * @adapter: board private structure
2450 * Configure the Rx unit of the MAC after a reset.
2452 static void igb_configure_rx(struct igb_adapter
*adapter
)
2456 /* set UTA to appropriate mode */
2457 igb_set_uta(adapter
);
2459 /* set the correct pool for the PF default MAC address in entry 0 */
2460 igb_rar_set_qsel(adapter
, adapter
->hw
.mac
.addr
, 0,
2461 adapter
->vfs_allocated_count
);
2463 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2464 * the Base and Length of the Rx Descriptor Ring */
2465 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2466 igb_configure_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
2470 * igb_free_tx_resources - Free Tx Resources per Queue
2471 * @tx_ring: Tx descriptor ring for a specific queue
2473 * Free all transmit software resources
2475 void igb_free_tx_resources(struct igb_ring
*tx_ring
)
2477 igb_clean_tx_ring(tx_ring
);
2479 vfree(tx_ring
->buffer_info
);
2480 tx_ring
->buffer_info
= NULL
;
2482 /* if not set, then don't free */
2486 pci_free_consistent(tx_ring
->pdev
, tx_ring
->size
,
2487 tx_ring
->desc
, tx_ring
->dma
);
2489 tx_ring
->desc
= NULL
;
2493 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2494 * @adapter: board private structure
2496 * Free all transmit software resources
2498 static void igb_free_all_tx_resources(struct igb_adapter
*adapter
)
2502 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2503 igb_free_tx_resources(&adapter
->tx_ring
[i
]);
2506 void igb_unmap_and_free_tx_resource(struct igb_ring
*tx_ring
,
2507 struct igb_buffer
*buffer_info
)
2509 buffer_info
->dma
= 0;
2510 if (buffer_info
->skb
) {
2511 skb_dma_unmap(&tx_ring
->pdev
->dev
,
2514 dev_kfree_skb_any(buffer_info
->skb
);
2515 buffer_info
->skb
= NULL
;
2517 buffer_info
->time_stamp
= 0;
2518 /* buffer_info must be completely set up in the transmit path */
2522 * igb_clean_tx_ring - Free Tx Buffers
2523 * @tx_ring: ring to be cleaned
2525 static void igb_clean_tx_ring(struct igb_ring
*tx_ring
)
2527 struct igb_buffer
*buffer_info
;
2531 if (!tx_ring
->buffer_info
)
2533 /* Free all the Tx ring sk_buffs */
2535 for (i
= 0; i
< tx_ring
->count
; i
++) {
2536 buffer_info
= &tx_ring
->buffer_info
[i
];
2537 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
2540 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
2541 memset(tx_ring
->buffer_info
, 0, size
);
2543 /* Zero out the descriptor ring */
2544 memset(tx_ring
->desc
, 0, tx_ring
->size
);
2546 tx_ring
->next_to_use
= 0;
2547 tx_ring
->next_to_clean
= 0;
2551 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2552 * @adapter: board private structure
2554 static void igb_clean_all_tx_rings(struct igb_adapter
*adapter
)
2558 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2559 igb_clean_tx_ring(&adapter
->tx_ring
[i
]);
2563 * igb_free_rx_resources - Free Rx Resources
2564 * @rx_ring: ring to clean the resources from
2566 * Free all receive software resources
2568 void igb_free_rx_resources(struct igb_ring
*rx_ring
)
2570 igb_clean_rx_ring(rx_ring
);
2572 vfree(rx_ring
->buffer_info
);
2573 rx_ring
->buffer_info
= NULL
;
2575 /* if not set, then don't free */
2579 pci_free_consistent(rx_ring
->pdev
, rx_ring
->size
,
2580 rx_ring
->desc
, rx_ring
->dma
);
2582 rx_ring
->desc
= NULL
;
2586 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2587 * @adapter: board private structure
2589 * Free all receive software resources
2591 static void igb_free_all_rx_resources(struct igb_adapter
*adapter
)
2595 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2596 igb_free_rx_resources(&adapter
->rx_ring
[i
]);
2600 * igb_clean_rx_ring - Free Rx Buffers per Queue
2601 * @rx_ring: ring to free buffers from
2603 static void igb_clean_rx_ring(struct igb_ring
*rx_ring
)
2605 struct igb_buffer
*buffer_info
;
2609 if (!rx_ring
->buffer_info
)
2612 /* Free all the Rx ring sk_buffs */
2613 for (i
= 0; i
< rx_ring
->count
; i
++) {
2614 buffer_info
= &rx_ring
->buffer_info
[i
];
2615 if (buffer_info
->dma
) {
2616 pci_unmap_single(rx_ring
->pdev
,
2618 rx_ring
->rx_buffer_len
,
2619 PCI_DMA_FROMDEVICE
);
2620 buffer_info
->dma
= 0;
2623 if (buffer_info
->skb
) {
2624 dev_kfree_skb(buffer_info
->skb
);
2625 buffer_info
->skb
= NULL
;
2627 if (buffer_info
->page_dma
) {
2628 pci_unmap_page(rx_ring
->pdev
,
2629 buffer_info
->page_dma
,
2631 PCI_DMA_FROMDEVICE
);
2632 buffer_info
->page_dma
= 0;
2634 if (buffer_info
->page
) {
2635 put_page(buffer_info
->page
);
2636 buffer_info
->page
= NULL
;
2637 buffer_info
->page_offset
= 0;
2641 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
2642 memset(rx_ring
->buffer_info
, 0, size
);
2644 /* Zero out the descriptor ring */
2645 memset(rx_ring
->desc
, 0, rx_ring
->size
);
2647 rx_ring
->next_to_clean
= 0;
2648 rx_ring
->next_to_use
= 0;
2652 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2653 * @adapter: board private structure
2655 static void igb_clean_all_rx_rings(struct igb_adapter
*adapter
)
2659 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2660 igb_clean_rx_ring(&adapter
->rx_ring
[i
]);
2664 * igb_set_mac - Change the Ethernet Address of the NIC
2665 * @netdev: network interface device structure
2666 * @p: pointer to an address structure
2668 * Returns 0 on success, negative on failure
2670 static int igb_set_mac(struct net_device
*netdev
, void *p
)
2672 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2673 struct e1000_hw
*hw
= &adapter
->hw
;
2674 struct sockaddr
*addr
= p
;
2676 if (!is_valid_ether_addr(addr
->sa_data
))
2677 return -EADDRNOTAVAIL
;
2679 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2680 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
2682 /* set the correct pool for the new PF MAC address in entry 0 */
2683 igb_rar_set_qsel(adapter
, hw
->mac
.addr
, 0,
2684 adapter
->vfs_allocated_count
);
2690 * igb_write_mc_addr_list - write multicast addresses to MTA
2691 * @netdev: network interface device structure
2693 * Writes multicast address list to the MTA hash table.
2694 * Returns: -ENOMEM on failure
2695 * 0 on no addresses written
2696 * X on writing X addresses to MTA
2698 static int igb_write_mc_addr_list(struct net_device
*netdev
)
2700 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2701 struct e1000_hw
*hw
= &adapter
->hw
;
2702 struct dev_mc_list
*mc_ptr
= netdev
->mc_list
;
2707 if (!netdev
->mc_count
) {
2708 /* nothing to program, so clear mc list */
2709 igb_update_mc_addr_list(hw
, NULL
, 0);
2710 igb_restore_vf_multicasts(adapter
);
2714 mta_list
= kzalloc(netdev
->mc_count
* 6, GFP_ATOMIC
);
2718 /* set vmolr receive overflow multicast bit */
2719 vmolr
|= E1000_VMOLR_ROMPE
;
2721 /* The shared function expects a packed array of only addresses. */
2722 mc_ptr
= netdev
->mc_list
;
2724 for (i
= 0; i
< netdev
->mc_count
; i
++) {
2727 memcpy(mta_list
+ (i
*ETH_ALEN
), mc_ptr
->dmi_addr
, ETH_ALEN
);
2728 mc_ptr
= mc_ptr
->next
;
2730 igb_update_mc_addr_list(hw
, mta_list
, i
);
2733 return netdev
->mc_count
;
2737 * igb_write_uc_addr_list - write unicast addresses to RAR table
2738 * @netdev: network interface device structure
2740 * Writes unicast address list to the RAR table.
2741 * Returns: -ENOMEM on failure/insufficient address space
2742 * 0 on no addresses written
2743 * X on writing X addresses to the RAR table
2745 static int igb_write_uc_addr_list(struct net_device
*netdev
)
2747 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2748 struct e1000_hw
*hw
= &adapter
->hw
;
2749 unsigned int vfn
= adapter
->vfs_allocated_count
;
2750 unsigned int rar_entries
= hw
->mac
.rar_entry_count
- (vfn
+ 1);
2753 /* return ENOMEM indicating insufficient memory for addresses */
2754 if (netdev
->uc
.count
> rar_entries
)
2757 if (netdev
->uc
.count
&& rar_entries
) {
2758 struct netdev_hw_addr
*ha
;
2759 list_for_each_entry(ha
, &netdev
->uc
.list
, list
) {
2762 igb_rar_set_qsel(adapter
, ha
->addr
,
2768 /* write the addresses in reverse order to avoid write combining */
2769 for (; rar_entries
> 0 ; rar_entries
--) {
2770 wr32(E1000_RAH(rar_entries
), 0);
2771 wr32(E1000_RAL(rar_entries
), 0);
2779 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2780 * @netdev: network interface device structure
2782 * The set_rx_mode entry point is called whenever the unicast or multicast
2783 * address lists or the network interface flags are updated. This routine is
2784 * responsible for configuring the hardware for proper unicast, multicast,
2785 * promiscuous mode, and all-multi behavior.
2787 static void igb_set_rx_mode(struct net_device
*netdev
)
2789 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2790 struct e1000_hw
*hw
= &adapter
->hw
;
2791 unsigned int vfn
= adapter
->vfs_allocated_count
;
2792 u32 rctl
, vmolr
= 0;
2795 /* Check for Promiscuous and All Multicast modes */
2796 rctl
= rd32(E1000_RCTL
);
2798 /* clear the effected bits */
2799 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
| E1000_RCTL_VFE
);
2801 if (netdev
->flags
& IFF_PROMISC
) {
2802 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2803 vmolr
|= (E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
);
2805 if (netdev
->flags
& IFF_ALLMULTI
) {
2806 rctl
|= E1000_RCTL_MPE
;
2807 vmolr
|= E1000_VMOLR_MPME
;
2810 * Write addresses to the MTA, if the attempt fails
2811 * then we should just turn on promiscous mode so
2812 * that we can at least receive multicast traffic
2814 count
= igb_write_mc_addr_list(netdev
);
2816 rctl
|= E1000_RCTL_MPE
;
2817 vmolr
|= E1000_VMOLR_MPME
;
2819 vmolr
|= E1000_VMOLR_ROMPE
;
2823 * Write addresses to available RAR registers, if there is not
2824 * sufficient space to store all the addresses then enable
2825 * unicast promiscous mode
2827 count
= igb_write_uc_addr_list(netdev
);
2829 rctl
|= E1000_RCTL_UPE
;
2830 vmolr
|= E1000_VMOLR_ROPE
;
2832 rctl
|= E1000_RCTL_VFE
;
2834 wr32(E1000_RCTL
, rctl
);
2837 * In order to support SR-IOV and eventually VMDq it is necessary to set
2838 * the VMOLR to enable the appropriate modes. Without this workaround
2839 * we will have issues with VLAN tag stripping not being done for frames
2840 * that are only arriving because we are the default pool
2842 if (hw
->mac
.type
< e1000_82576
)
2845 vmolr
|= rd32(E1000_VMOLR(vfn
)) &
2846 ~(E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
| E1000_VMOLR_ROMPE
);
2847 wr32(E1000_VMOLR(vfn
), vmolr
);
2848 igb_restore_vf_multicasts(adapter
);
2851 /* Need to wait a few seconds after link up to get diagnostic information from
2853 static void igb_update_phy_info(unsigned long data
)
2855 struct igb_adapter
*adapter
= (struct igb_adapter
*) data
;
2856 igb_get_phy_info(&adapter
->hw
);
2860 * igb_has_link - check shared code for link and determine up/down
2861 * @adapter: pointer to driver private info
2863 static bool igb_has_link(struct igb_adapter
*adapter
)
2865 struct e1000_hw
*hw
= &adapter
->hw
;
2866 bool link_active
= false;
2869 /* get_link_status is set on LSC (link status) interrupt or
2870 * rx sequence error interrupt. get_link_status will stay
2871 * false until the e1000_check_for_link establishes link
2872 * for copper adapters ONLY
2874 switch (hw
->phy
.media_type
) {
2875 case e1000_media_type_copper
:
2876 if (hw
->mac
.get_link_status
) {
2877 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
2878 link_active
= !hw
->mac
.get_link_status
;
2883 case e1000_media_type_internal_serdes
:
2884 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
2885 link_active
= hw
->mac
.serdes_has_link
;
2888 case e1000_media_type_unknown
:
2896 * igb_watchdog - Timer Call-back
2897 * @data: pointer to adapter cast into an unsigned long
2899 static void igb_watchdog(unsigned long data
)
2901 struct igb_adapter
*adapter
= (struct igb_adapter
*)data
;
2902 /* Do the rest outside of interrupt context */
2903 schedule_work(&adapter
->watchdog_task
);
2906 static void igb_watchdog_task(struct work_struct
*work
)
2908 struct igb_adapter
*adapter
= container_of(work
,
2909 struct igb_adapter
, watchdog_task
);
2910 struct e1000_hw
*hw
= &adapter
->hw
;
2911 struct net_device
*netdev
= adapter
->netdev
;
2912 struct igb_ring
*tx_ring
= adapter
->tx_ring
;
2916 link
= igb_has_link(adapter
);
2917 if ((netif_carrier_ok(netdev
)) && link
)
2921 if (!netif_carrier_ok(netdev
)) {
2923 hw
->mac
.ops
.get_speed_and_duplex(&adapter
->hw
,
2924 &adapter
->link_speed
,
2925 &adapter
->link_duplex
);
2927 ctrl
= rd32(E1000_CTRL
);
2928 /* Links status message must follow this format */
2929 printk(KERN_INFO
"igb: %s NIC Link is Up %d Mbps %s, "
2930 "Flow Control: %s\n",
2932 adapter
->link_speed
,
2933 adapter
->link_duplex
== FULL_DUPLEX
?
2934 "Full Duplex" : "Half Duplex",
2935 ((ctrl
& E1000_CTRL_TFCE
) && (ctrl
&
2936 E1000_CTRL_RFCE
)) ? "RX/TX" : ((ctrl
&
2937 E1000_CTRL_RFCE
) ? "RX" : ((ctrl
&
2938 E1000_CTRL_TFCE
) ? "TX" : "None")));
2940 /* tweak tx_queue_len according to speed/duplex and
2941 * adjust the timeout factor */
2942 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
2943 adapter
->tx_timeout_factor
= 1;
2944 switch (adapter
->link_speed
) {
2946 netdev
->tx_queue_len
= 10;
2947 adapter
->tx_timeout_factor
= 14;
2950 netdev
->tx_queue_len
= 100;
2951 /* maybe add some timeout factor ? */
2955 netif_carrier_on(netdev
);
2957 igb_ping_all_vfs(adapter
);
2959 /* link state has changed, schedule phy info update */
2960 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
2961 mod_timer(&adapter
->phy_info_timer
,
2962 round_jiffies(jiffies
+ 2 * HZ
));
2965 if (netif_carrier_ok(netdev
)) {
2966 adapter
->link_speed
= 0;
2967 adapter
->link_duplex
= 0;
2968 /* Links status message must follow this format */
2969 printk(KERN_INFO
"igb: %s NIC Link is Down\n",
2971 netif_carrier_off(netdev
);
2973 igb_ping_all_vfs(adapter
);
2975 /* link state has changed, schedule phy info update */
2976 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
2977 mod_timer(&adapter
->phy_info_timer
,
2978 round_jiffies(jiffies
+ 2 * HZ
));
2983 igb_update_stats(adapter
);
2985 hw
->mac
.tx_packet_delta
= adapter
->stats
.tpt
- adapter
->tpt_old
;
2986 adapter
->tpt_old
= adapter
->stats
.tpt
;
2987 hw
->mac
.collision_delta
= adapter
->stats
.colc
- adapter
->colc_old
;
2988 adapter
->colc_old
= adapter
->stats
.colc
;
2990 adapter
->gorc
= adapter
->stats
.gorc
- adapter
->gorc_old
;
2991 adapter
->gorc_old
= adapter
->stats
.gorc
;
2992 adapter
->gotc
= adapter
->stats
.gotc
- adapter
->gotc_old
;
2993 adapter
->gotc_old
= adapter
->stats
.gotc
;
2995 igb_update_adaptive(&adapter
->hw
);
2997 if (!netif_carrier_ok(netdev
)) {
2998 if (igb_desc_unused(tx_ring
) + 1 < tx_ring
->count
) {
2999 /* We've lost link, so the controller stops DMA,
3000 * but we've got queued Tx work that's never going
3001 * to get done, so reset controller to flush Tx.
3002 * (Do the reset outside of interrupt context). */
3003 adapter
->tx_timeout_count
++;
3004 schedule_work(&adapter
->reset_task
);
3005 /* return immediately since reset is imminent */
3010 /* Force detection of hung controller every watchdog period */
3011 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
3012 adapter
->tx_ring
[i
].detect_tx_hung
= true;
3014 /* Cause software interrupt to ensure rx ring is cleaned */
3015 if (adapter
->msix_entries
) {
3017 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
3018 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
3019 eics
|= q_vector
->eims_value
;
3021 wr32(E1000_EICS
, eics
);
3023 wr32(E1000_ICS
, E1000_ICS_RXDMT0
);
3026 /* Reset the timer */
3027 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3028 mod_timer(&adapter
->watchdog_timer
,
3029 round_jiffies(jiffies
+ 2 * HZ
));
3032 enum latency_range
{
3036 latency_invalid
= 255
3040 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3042 * Stores a new ITR value based on strictly on packet size. This
3043 * algorithm is less sophisticated than that used in igb_update_itr,
3044 * due to the difficulty of synchronizing statistics across multiple
3045 * receive rings. The divisors and thresholds used by this fuction
3046 * were determined based on theoretical maximum wire speed and testing
3047 * data, in order to minimize response time while increasing bulk
3049 * This functionality is controlled by the InterruptThrottleRate module
3050 * parameter (see igb_param.c)
3051 * NOTE: This function is called only when operating in a multiqueue
3052 * receive environment.
3053 * @q_vector: pointer to q_vector
3055 static void igb_update_ring_itr(struct igb_q_vector
*q_vector
)
3057 int new_val
= q_vector
->itr_val
;
3058 int avg_wire_size
= 0;
3059 struct igb_adapter
*adapter
= q_vector
->adapter
;
3061 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3062 * ints/sec - ITR timer value of 120 ticks.
3064 if (adapter
->link_speed
!= SPEED_1000
) {
3069 if (q_vector
->rx_ring
&& q_vector
->rx_ring
->total_packets
) {
3070 struct igb_ring
*ring
= q_vector
->rx_ring
;
3071 avg_wire_size
= ring
->total_bytes
/ ring
->total_packets
;
3074 if (q_vector
->tx_ring
&& q_vector
->tx_ring
->total_packets
) {
3075 struct igb_ring
*ring
= q_vector
->tx_ring
;
3076 avg_wire_size
= max_t(u32
, avg_wire_size
,
3077 (ring
->total_bytes
/
3078 ring
->total_packets
));
3081 /* if avg_wire_size isn't set no work was done */
3085 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3086 avg_wire_size
+= 24;
3088 /* Don't starve jumbo frames */
3089 avg_wire_size
= min(avg_wire_size
, 3000);
3091 /* Give a little boost to mid-size frames */
3092 if ((avg_wire_size
> 300) && (avg_wire_size
< 1200))
3093 new_val
= avg_wire_size
/ 3;
3095 new_val
= avg_wire_size
/ 2;
3098 if (new_val
!= q_vector
->itr_val
) {
3099 q_vector
->itr_val
= new_val
;
3100 q_vector
->set_itr
= 1;
3103 if (q_vector
->rx_ring
) {
3104 q_vector
->rx_ring
->total_bytes
= 0;
3105 q_vector
->rx_ring
->total_packets
= 0;
3107 if (q_vector
->tx_ring
) {
3108 q_vector
->tx_ring
->total_bytes
= 0;
3109 q_vector
->tx_ring
->total_packets
= 0;
3114 * igb_update_itr - update the dynamic ITR value based on statistics
3115 * Stores a new ITR value based on packets and byte
3116 * counts during the last interrupt. The advantage of per interrupt
3117 * computation is faster updates and more accurate ITR for the current
3118 * traffic pattern. Constants in this function were computed
3119 * based on theoretical maximum wire speed and thresholds were set based
3120 * on testing data as well as attempting to minimize response time
3121 * while increasing bulk throughput.
3122 * this functionality is controlled by the InterruptThrottleRate module
3123 * parameter (see igb_param.c)
3124 * NOTE: These calculations are only valid when operating in a single-
3125 * queue environment.
3126 * @adapter: pointer to adapter
3127 * @itr_setting: current q_vector->itr_val
3128 * @packets: the number of packets during this measurement interval
3129 * @bytes: the number of bytes during this measurement interval
3131 static unsigned int igb_update_itr(struct igb_adapter
*adapter
, u16 itr_setting
,
3132 int packets
, int bytes
)
3134 unsigned int retval
= itr_setting
;
3137 goto update_itr_done
;
3139 switch (itr_setting
) {
3140 case lowest_latency
:
3141 /* handle TSO and jumbo frames */
3142 if (bytes
/packets
> 8000)
3143 retval
= bulk_latency
;
3144 else if ((packets
< 5) && (bytes
> 512))
3145 retval
= low_latency
;
3147 case low_latency
: /* 50 usec aka 20000 ints/s */
3148 if (bytes
> 10000) {
3149 /* this if handles the TSO accounting */
3150 if (bytes
/packets
> 8000) {
3151 retval
= bulk_latency
;
3152 } else if ((packets
< 10) || ((bytes
/packets
) > 1200)) {
3153 retval
= bulk_latency
;
3154 } else if ((packets
> 35)) {
3155 retval
= lowest_latency
;
3157 } else if (bytes
/packets
> 2000) {
3158 retval
= bulk_latency
;
3159 } else if (packets
<= 2 && bytes
< 512) {
3160 retval
= lowest_latency
;
3163 case bulk_latency
: /* 250 usec aka 4000 ints/s */
3164 if (bytes
> 25000) {
3166 retval
= low_latency
;
3167 } else if (bytes
< 1500) {
3168 retval
= low_latency
;
3177 static void igb_set_itr(struct igb_adapter
*adapter
)
3179 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
3181 u32 new_itr
= q_vector
->itr_val
;
3183 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3184 if (adapter
->link_speed
!= SPEED_1000
) {
3190 adapter
->rx_itr
= igb_update_itr(adapter
,
3192 adapter
->rx_ring
->total_packets
,
3193 adapter
->rx_ring
->total_bytes
);
3195 adapter
->tx_itr
= igb_update_itr(adapter
,
3197 adapter
->tx_ring
->total_packets
,
3198 adapter
->tx_ring
->total_bytes
);
3199 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
3201 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3202 if (adapter
->rx_itr_setting
== 3 && current_itr
== lowest_latency
)
3203 current_itr
= low_latency
;
3205 switch (current_itr
) {
3206 /* counts and packets in update_itr are dependent on these numbers */
3207 case lowest_latency
:
3208 new_itr
= 56; /* aka 70,000 ints/sec */
3211 new_itr
= 196; /* aka 20,000 ints/sec */
3214 new_itr
= 980; /* aka 4,000 ints/sec */
3221 adapter
->rx_ring
->total_bytes
= 0;
3222 adapter
->rx_ring
->total_packets
= 0;
3223 adapter
->tx_ring
->total_bytes
= 0;
3224 adapter
->tx_ring
->total_packets
= 0;
3226 if (new_itr
!= q_vector
->itr_val
) {
3227 /* this attempts to bias the interrupt rate towards Bulk
3228 * by adding intermediate steps when interrupt rate is
3230 new_itr
= new_itr
> q_vector
->itr_val
?
3231 max((new_itr
* q_vector
->itr_val
) /
3232 (new_itr
+ (q_vector
->itr_val
>> 2)),
3235 /* Don't write the value here; it resets the adapter's
3236 * internal timer, and causes us to delay far longer than
3237 * we should between interrupts. Instead, we write the ITR
3238 * value at the beginning of the next interrupt so the timing
3239 * ends up being correct.
3241 q_vector
->itr_val
= new_itr
;
3242 q_vector
->set_itr
= 1;
3248 #define IGB_TX_FLAGS_CSUM 0x00000001
3249 #define IGB_TX_FLAGS_VLAN 0x00000002
3250 #define IGB_TX_FLAGS_TSO 0x00000004
3251 #define IGB_TX_FLAGS_IPV4 0x00000008
3252 #define IGB_TX_FLAGS_TSTAMP 0x00000010
3253 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3254 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3256 static inline int igb_tso_adv(struct igb_ring
*tx_ring
,
3257 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
3259 struct e1000_adv_tx_context_desc
*context_desc
;
3262 struct igb_buffer
*buffer_info
;
3263 u32 info
= 0, tu_cmd
= 0;
3264 u32 mss_l4len_idx
, l4len
;
3267 if (skb_header_cloned(skb
)) {
3268 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3273 l4len
= tcp_hdrlen(skb
);
3276 if (skb
->protocol
== htons(ETH_P_IP
)) {
3277 struct iphdr
*iph
= ip_hdr(skb
);
3280 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
3284 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
3285 ipv6_hdr(skb
)->payload_len
= 0;
3286 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3287 &ipv6_hdr(skb
)->daddr
,
3291 i
= tx_ring
->next_to_use
;
3293 buffer_info
= &tx_ring
->buffer_info
[i
];
3294 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
3295 /* VLAN MACLEN IPLEN */
3296 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3297 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
3298 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
3299 *hdr_len
+= skb_network_offset(skb
);
3300 info
|= skb_network_header_len(skb
);
3301 *hdr_len
+= skb_network_header_len(skb
);
3302 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
3304 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3305 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
3307 if (skb
->protocol
== htons(ETH_P_IP
))
3308 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
3309 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3311 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
3314 mss_l4len_idx
= (skb_shinfo(skb
)->gso_size
<< E1000_ADVTXD_MSS_SHIFT
);
3315 mss_l4len_idx
|= (l4len
<< E1000_ADVTXD_L4LEN_SHIFT
);
3317 /* For 82575, context index must be unique per ring. */
3318 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
3319 mss_l4len_idx
|= tx_ring
->reg_idx
<< 4;
3321 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3322 context_desc
->seqnum_seed
= 0;
3324 buffer_info
->time_stamp
= jiffies
;
3325 buffer_info
->next_to_watch
= i
;
3326 buffer_info
->dma
= 0;
3328 if (i
== tx_ring
->count
)
3331 tx_ring
->next_to_use
= i
;
3336 static inline bool igb_tx_csum_adv(struct igb_ring
*tx_ring
,
3337 struct sk_buff
*skb
, u32 tx_flags
)
3339 struct e1000_adv_tx_context_desc
*context_desc
;
3340 struct pci_dev
*pdev
= tx_ring
->pdev
;
3341 struct igb_buffer
*buffer_info
;
3342 u32 info
= 0, tu_cmd
= 0;
3345 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3346 (tx_flags
& IGB_TX_FLAGS_VLAN
)) {
3347 i
= tx_ring
->next_to_use
;
3348 buffer_info
= &tx_ring
->buffer_info
[i
];
3349 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
3351 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3352 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
3353 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
3354 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3355 info
|= skb_network_header_len(skb
);
3357 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
3359 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
3361 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3364 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
3365 const struct vlan_ethhdr
*vhdr
=
3366 (const struct vlan_ethhdr
*)skb
->data
;
3368 protocol
= vhdr
->h_vlan_encapsulated_proto
;
3370 protocol
= skb
->protocol
;
3374 case cpu_to_be16(ETH_P_IP
):
3375 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
3376 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3377 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3378 else if (ip_hdr(skb
)->protocol
== IPPROTO_SCTP
)
3379 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
3381 case cpu_to_be16(ETH_P_IPV6
):
3382 /* XXX what about other V6 headers?? */
3383 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3384 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3385 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_SCTP
)
3386 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
3389 if (unlikely(net_ratelimit()))
3390 dev_warn(&pdev
->dev
,
3391 "partial checksum but proto=%x!\n",
3397 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
3398 context_desc
->seqnum_seed
= 0;
3399 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
3400 context_desc
->mss_l4len_idx
=
3401 cpu_to_le32(tx_ring
->reg_idx
<< 4);
3403 buffer_info
->time_stamp
= jiffies
;
3404 buffer_info
->next_to_watch
= i
;
3405 buffer_info
->dma
= 0;
3408 if (i
== tx_ring
->count
)
3410 tx_ring
->next_to_use
= i
;
3417 #define IGB_MAX_TXD_PWR 16
3418 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3420 static inline int igb_tx_map_adv(struct igb_ring
*tx_ring
, struct sk_buff
*skb
,
3423 struct igb_buffer
*buffer_info
;
3424 struct pci_dev
*pdev
= tx_ring
->pdev
;
3425 unsigned int len
= skb_headlen(skb
);
3426 unsigned int count
= 0, i
;
3430 i
= tx_ring
->next_to_use
;
3432 if (skb_dma_map(&pdev
->dev
, skb
, DMA_TO_DEVICE
)) {
3433 dev_err(&pdev
->dev
, "TX DMA map failed\n");
3437 map
= skb_shinfo(skb
)->dma_maps
;
3439 buffer_info
= &tx_ring
->buffer_info
[i
];
3440 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
3441 buffer_info
->length
= len
;
3442 /* set time_stamp *before* dma to help avoid a possible race */
3443 buffer_info
->time_stamp
= jiffies
;
3444 buffer_info
->next_to_watch
= i
;
3445 buffer_info
->dma
= skb_shinfo(skb
)->dma_head
;
3447 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
3448 struct skb_frag_struct
*frag
;
3451 if (i
== tx_ring
->count
)
3454 frag
= &skb_shinfo(skb
)->frags
[f
];
3457 buffer_info
= &tx_ring
->buffer_info
[i
];
3458 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
3459 buffer_info
->length
= len
;
3460 buffer_info
->time_stamp
= jiffies
;
3461 buffer_info
->next_to_watch
= i
;
3462 buffer_info
->dma
= map
[count
];
3466 tx_ring
->buffer_info
[i
].skb
= skb
;
3467 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
3472 static inline void igb_tx_queue_adv(struct igb_ring
*tx_ring
,
3473 int tx_flags
, int count
, u32 paylen
,
3476 union e1000_adv_tx_desc
*tx_desc
= NULL
;
3477 struct igb_buffer
*buffer_info
;
3478 u32 olinfo_status
= 0, cmd_type_len
;
3481 cmd_type_len
= (E1000_ADVTXD_DTYP_DATA
| E1000_ADVTXD_DCMD_IFCS
|
3482 E1000_ADVTXD_DCMD_DEXT
);
3484 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3485 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
3487 if (tx_flags
& IGB_TX_FLAGS_TSTAMP
)
3488 cmd_type_len
|= E1000_ADVTXD_MAC_TSTAMP
;
3490 if (tx_flags
& IGB_TX_FLAGS_TSO
) {
3491 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
3493 /* insert tcp checksum */
3494 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
3496 /* insert ip checksum */
3497 if (tx_flags
& IGB_TX_FLAGS_IPV4
)
3498 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
3500 } else if (tx_flags
& IGB_TX_FLAGS_CSUM
) {
3501 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
3504 if ((tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
) &&
3505 (tx_flags
& (IGB_TX_FLAGS_CSUM
|
3507 IGB_TX_FLAGS_VLAN
)))
3508 olinfo_status
|= tx_ring
->reg_idx
<< 4;
3510 olinfo_status
|= ((paylen
- hdr_len
) << E1000_ADVTXD_PAYLEN_SHIFT
);
3512 i
= tx_ring
->next_to_use
;
3514 buffer_info
= &tx_ring
->buffer_info
[i
];
3515 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
3516 tx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
3517 tx_desc
->read
.cmd_type_len
=
3518 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
3519 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3521 if (i
== tx_ring
->count
)
3525 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(IGB_ADVTXD_DCMD
);
3526 /* Force memory writes to complete before letting h/w
3527 * know there are new descriptors to fetch. (Only
3528 * applicable for weak-ordered memory model archs,
3529 * such as IA-64). */
3532 tx_ring
->next_to_use
= i
;
3533 writel(i
, tx_ring
->tail
);
3534 /* we need this if more than one processor can write to our tail
3535 * at a time, it syncronizes IO on IA64/Altix systems */
3539 static int __igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
3541 struct net_device
*netdev
= tx_ring
->netdev
;
3543 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3545 /* Herbert's original patch had:
3546 * smp_mb__after_netif_stop_queue();
3547 * but since that doesn't exist yet, just open code it. */
3550 /* We need to check again in a case another CPU has just
3551 * made room available. */
3552 if (igb_desc_unused(tx_ring
) < size
)
3556 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
3557 tx_ring
->tx_stats
.restart_queue
++;
3561 static int igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
3563 if (igb_desc_unused(tx_ring
) >= size
)
3565 return __igb_maybe_stop_tx(tx_ring
, size
);
3568 netdev_tx_t
igb_xmit_frame_ring_adv(struct sk_buff
*skb
,
3569 struct igb_ring
*tx_ring
)
3571 struct igb_adapter
*adapter
= netdev_priv(tx_ring
->netdev
);
3573 unsigned int tx_flags
= 0;
3577 union skb_shared_tx
*shtx
= skb_tx(skb
);
3579 /* need: 1 descriptor per page,
3580 * + 2 desc gap to keep tail from touching head,
3581 * + 1 desc for skb->data,
3582 * + 1 desc for context descriptor,
3583 * otherwise try next time */
3584 if (igb_maybe_stop_tx(tx_ring
, skb_shinfo(skb
)->nr_frags
+ 4)) {
3585 /* this is a hard error */
3586 return NETDEV_TX_BUSY
;
3589 if (unlikely(shtx
->hardware
)) {
3590 shtx
->in_progress
= 1;
3591 tx_flags
|= IGB_TX_FLAGS_TSTAMP
;
3594 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3595 tx_flags
|= IGB_TX_FLAGS_VLAN
;
3596 tx_flags
|= (vlan_tx_tag_get(skb
) << IGB_TX_FLAGS_VLAN_SHIFT
);
3599 if (skb
->protocol
== htons(ETH_P_IP
))
3600 tx_flags
|= IGB_TX_FLAGS_IPV4
;
3602 first
= tx_ring
->next_to_use
;
3603 if (skb_is_gso(skb
)) {
3604 tso
= igb_tso_adv(tx_ring
, skb
, tx_flags
, &hdr_len
);
3606 dev_kfree_skb_any(skb
);
3607 return NETDEV_TX_OK
;
3612 tx_flags
|= IGB_TX_FLAGS_TSO
;
3613 else if (igb_tx_csum_adv(tx_ring
, skb
, tx_flags
) &&
3614 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3615 tx_flags
|= IGB_TX_FLAGS_CSUM
;
3618 * count reflects descriptors mapped, if 0 then mapping error
3619 * has occured and we need to rewind the descriptor queue
3621 count
= igb_tx_map_adv(tx_ring
, skb
, first
);
3624 dev_kfree_skb_any(skb
);
3625 tx_ring
->buffer_info
[first
].time_stamp
= 0;
3626 tx_ring
->next_to_use
= first
;
3627 return NETDEV_TX_OK
;
3630 igb_tx_queue_adv(tx_ring
, tx_flags
, count
, skb
->len
, hdr_len
);
3632 /* Make sure there is space in the ring for the next send. */
3633 igb_maybe_stop_tx(tx_ring
, MAX_SKB_FRAGS
+ 4);
3635 return NETDEV_TX_OK
;
3638 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
,
3639 struct net_device
*netdev
)
3641 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3642 struct igb_ring
*tx_ring
;
3645 if (test_bit(__IGB_DOWN
, &adapter
->state
)) {
3646 dev_kfree_skb_any(skb
);
3647 return NETDEV_TX_OK
;
3650 if (skb
->len
<= 0) {
3651 dev_kfree_skb_any(skb
);
3652 return NETDEV_TX_OK
;
3655 r_idx
= skb
->queue_mapping
& (IGB_ABS_MAX_TX_QUEUES
- 1);
3656 tx_ring
= adapter
->multi_tx_table
[r_idx
];
3658 /* This goes back to the question of how to logically map a tx queue
3659 * to a flow. Right now, performance is impacted slightly negatively
3660 * if using multiple tx queues. If the stack breaks away from a
3661 * single qdisc implementation, we can look at this again. */
3662 return igb_xmit_frame_ring_adv(skb
, tx_ring
);
3666 * igb_tx_timeout - Respond to a Tx Hang
3667 * @netdev: network interface device structure
3669 static void igb_tx_timeout(struct net_device
*netdev
)
3671 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3672 struct e1000_hw
*hw
= &adapter
->hw
;
3674 /* Do the reset outside of interrupt context */
3675 adapter
->tx_timeout_count
++;
3677 schedule_work(&adapter
->reset_task
);
3679 (adapter
->eims_enable_mask
& ~adapter
->eims_other
));
3682 static void igb_reset_task(struct work_struct
*work
)
3684 struct igb_adapter
*adapter
;
3685 adapter
= container_of(work
, struct igb_adapter
, reset_task
);
3687 igb_reinit_locked(adapter
);
3691 * igb_get_stats - Get System Network Statistics
3692 * @netdev: network interface device structure
3694 * Returns the address of the device statistics structure.
3695 * The statistics are actually updated from the timer callback.
3697 static struct net_device_stats
*igb_get_stats(struct net_device
*netdev
)
3699 /* only return the current stats */
3700 return &netdev
->stats
;
3704 * igb_change_mtu - Change the Maximum Transfer Unit
3705 * @netdev: network interface device structure
3706 * @new_mtu: new value for maximum frame size
3708 * Returns 0 on success, negative on failure
3710 static int igb_change_mtu(struct net_device
*netdev
, int new_mtu
)
3712 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3713 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3714 u32 rx_buffer_len
, i
;
3716 if ((max_frame
< ETH_ZLEN
+ ETH_FCS_LEN
) ||
3717 (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
3718 dev_err(&adapter
->pdev
->dev
, "Invalid MTU setting\n");
3722 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
3723 dev_err(&adapter
->pdev
->dev
, "MTU > 9216 not supported.\n");
3727 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
3730 /* igb_down has a dependency on max_frame_size */
3731 adapter
->max_frame_size
= max_frame
;
3732 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3733 * means we reserve 2 more, this pushes us to allocate from the next
3735 * i.e. RXBUFFER_2048 --> size-4096 slab
3738 if (max_frame
<= IGB_RXBUFFER_1024
)
3739 rx_buffer_len
= IGB_RXBUFFER_1024
;
3740 else if (max_frame
<= MAXIMUM_ETHERNET_VLAN_SIZE
)
3741 rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3743 rx_buffer_len
= IGB_RXBUFFER_128
;
3745 if (netif_running(netdev
))
3748 dev_info(&adapter
->pdev
->dev
, "changing MTU from %d to %d\n",
3749 netdev
->mtu
, new_mtu
);
3750 netdev
->mtu
= new_mtu
;
3752 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3753 adapter
->rx_ring
[i
].rx_buffer_len
= rx_buffer_len
;
3755 if (netif_running(netdev
))
3760 clear_bit(__IGB_RESETTING
, &adapter
->state
);
3766 * igb_update_stats - Update the board statistics counters
3767 * @adapter: board private structure
3770 void igb_update_stats(struct igb_adapter
*adapter
)
3772 struct net_device
*netdev
= adapter
->netdev
;
3773 struct e1000_hw
*hw
= &adapter
->hw
;
3774 struct pci_dev
*pdev
= adapter
->pdev
;
3780 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3783 * Prevent stats update while adapter is being reset, or if the pci
3784 * connection is down.
3786 if (adapter
->link_speed
== 0)
3788 if (pci_channel_offline(pdev
))
3793 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3794 u32 rqdpc_tmp
= rd32(E1000_RQDPC(i
)) & 0x0FFF;
3795 adapter
->rx_ring
[i
].rx_stats
.drops
+= rqdpc_tmp
;
3796 netdev
->stats
.rx_fifo_errors
+= rqdpc_tmp
;
3797 bytes
+= adapter
->rx_ring
[i
].rx_stats
.bytes
;
3798 packets
+= adapter
->rx_ring
[i
].rx_stats
.packets
;
3801 netdev
->stats
.rx_bytes
= bytes
;
3802 netdev
->stats
.rx_packets
= packets
;
3806 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3807 bytes
+= adapter
->tx_ring
[i
].tx_stats
.bytes
;
3808 packets
+= adapter
->tx_ring
[i
].tx_stats
.packets
;
3810 netdev
->stats
.tx_bytes
= bytes
;
3811 netdev
->stats
.tx_packets
= packets
;
3813 /* read stats registers */
3814 adapter
->stats
.crcerrs
+= rd32(E1000_CRCERRS
);
3815 adapter
->stats
.gprc
+= rd32(E1000_GPRC
);
3816 adapter
->stats
.gorc
+= rd32(E1000_GORCL
);
3817 rd32(E1000_GORCH
); /* clear GORCL */
3818 adapter
->stats
.bprc
+= rd32(E1000_BPRC
);
3819 adapter
->stats
.mprc
+= rd32(E1000_MPRC
);
3820 adapter
->stats
.roc
+= rd32(E1000_ROC
);
3822 adapter
->stats
.prc64
+= rd32(E1000_PRC64
);
3823 adapter
->stats
.prc127
+= rd32(E1000_PRC127
);
3824 adapter
->stats
.prc255
+= rd32(E1000_PRC255
);
3825 adapter
->stats
.prc511
+= rd32(E1000_PRC511
);
3826 adapter
->stats
.prc1023
+= rd32(E1000_PRC1023
);
3827 adapter
->stats
.prc1522
+= rd32(E1000_PRC1522
);
3828 adapter
->stats
.symerrs
+= rd32(E1000_SYMERRS
);
3829 adapter
->stats
.sec
+= rd32(E1000_SEC
);
3831 adapter
->stats
.mpc
+= rd32(E1000_MPC
);
3832 adapter
->stats
.scc
+= rd32(E1000_SCC
);
3833 adapter
->stats
.ecol
+= rd32(E1000_ECOL
);
3834 adapter
->stats
.mcc
+= rd32(E1000_MCC
);
3835 adapter
->stats
.latecol
+= rd32(E1000_LATECOL
);
3836 adapter
->stats
.dc
+= rd32(E1000_DC
);
3837 adapter
->stats
.rlec
+= rd32(E1000_RLEC
);
3838 adapter
->stats
.xonrxc
+= rd32(E1000_XONRXC
);
3839 adapter
->stats
.xontxc
+= rd32(E1000_XONTXC
);
3840 adapter
->stats
.xoffrxc
+= rd32(E1000_XOFFRXC
);
3841 adapter
->stats
.xofftxc
+= rd32(E1000_XOFFTXC
);
3842 adapter
->stats
.fcruc
+= rd32(E1000_FCRUC
);
3843 adapter
->stats
.gptc
+= rd32(E1000_GPTC
);
3844 adapter
->stats
.gotc
+= rd32(E1000_GOTCL
);
3845 rd32(E1000_GOTCH
); /* clear GOTCL */
3846 rnbc
= rd32(E1000_RNBC
);
3847 adapter
->stats
.rnbc
+= rnbc
;
3848 netdev
->stats
.rx_fifo_errors
+= rnbc
;
3849 adapter
->stats
.ruc
+= rd32(E1000_RUC
);
3850 adapter
->stats
.rfc
+= rd32(E1000_RFC
);
3851 adapter
->stats
.rjc
+= rd32(E1000_RJC
);
3852 adapter
->stats
.tor
+= rd32(E1000_TORH
);
3853 adapter
->stats
.tot
+= rd32(E1000_TOTH
);
3854 adapter
->stats
.tpr
+= rd32(E1000_TPR
);
3856 adapter
->stats
.ptc64
+= rd32(E1000_PTC64
);
3857 adapter
->stats
.ptc127
+= rd32(E1000_PTC127
);
3858 adapter
->stats
.ptc255
+= rd32(E1000_PTC255
);
3859 adapter
->stats
.ptc511
+= rd32(E1000_PTC511
);
3860 adapter
->stats
.ptc1023
+= rd32(E1000_PTC1023
);
3861 adapter
->stats
.ptc1522
+= rd32(E1000_PTC1522
);
3863 adapter
->stats
.mptc
+= rd32(E1000_MPTC
);
3864 adapter
->stats
.bptc
+= rd32(E1000_BPTC
);
3866 /* used for adaptive IFS */
3868 hw
->mac
.tx_packet_delta
= rd32(E1000_TPT
);
3869 adapter
->stats
.tpt
+= hw
->mac
.tx_packet_delta
;
3870 hw
->mac
.collision_delta
= rd32(E1000_COLC
);
3871 adapter
->stats
.colc
+= hw
->mac
.collision_delta
;
3873 adapter
->stats
.algnerrc
+= rd32(E1000_ALGNERRC
);
3874 adapter
->stats
.rxerrc
+= rd32(E1000_RXERRC
);
3875 adapter
->stats
.tncrs
+= rd32(E1000_TNCRS
);
3876 adapter
->stats
.tsctc
+= rd32(E1000_TSCTC
);
3877 adapter
->stats
.tsctfc
+= rd32(E1000_TSCTFC
);
3879 adapter
->stats
.iac
+= rd32(E1000_IAC
);
3880 adapter
->stats
.icrxoc
+= rd32(E1000_ICRXOC
);
3881 adapter
->stats
.icrxptc
+= rd32(E1000_ICRXPTC
);
3882 adapter
->stats
.icrxatc
+= rd32(E1000_ICRXATC
);
3883 adapter
->stats
.ictxptc
+= rd32(E1000_ICTXPTC
);
3884 adapter
->stats
.ictxatc
+= rd32(E1000_ICTXATC
);
3885 adapter
->stats
.ictxqec
+= rd32(E1000_ICTXQEC
);
3886 adapter
->stats
.ictxqmtc
+= rd32(E1000_ICTXQMTC
);
3887 adapter
->stats
.icrxdmtc
+= rd32(E1000_ICRXDMTC
);
3889 /* Fill out the OS statistics structure */
3890 netdev
->stats
.multicast
= adapter
->stats
.mprc
;
3891 netdev
->stats
.collisions
= adapter
->stats
.colc
;
3895 /* RLEC on some newer hardware can be incorrect so build
3896 * our own version based on RUC and ROC */
3897 netdev
->stats
.rx_errors
= adapter
->stats
.rxerrc
+
3898 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
3899 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
3900 adapter
->stats
.cexterr
;
3901 netdev
->stats
.rx_length_errors
= adapter
->stats
.ruc
+
3903 netdev
->stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
3904 netdev
->stats
.rx_frame_errors
= adapter
->stats
.algnerrc
;
3905 netdev
->stats
.rx_missed_errors
= adapter
->stats
.mpc
;
3908 netdev
->stats
.tx_errors
= adapter
->stats
.ecol
+
3909 adapter
->stats
.latecol
;
3910 netdev
->stats
.tx_aborted_errors
= adapter
->stats
.ecol
;
3911 netdev
->stats
.tx_window_errors
= adapter
->stats
.latecol
;
3912 netdev
->stats
.tx_carrier_errors
= adapter
->stats
.tncrs
;
3914 /* Tx Dropped needs to be maintained elsewhere */
3917 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
3918 if ((adapter
->link_speed
== SPEED_1000
) &&
3919 (!igb_read_phy_reg(hw
, PHY_1000T_STATUS
, &phy_tmp
))) {
3920 phy_tmp
&= PHY_IDLE_ERROR_COUNT_MASK
;
3921 adapter
->phy_stats
.idle_errors
+= phy_tmp
;
3925 /* Management Stats */
3926 adapter
->stats
.mgptc
+= rd32(E1000_MGTPTC
);
3927 adapter
->stats
.mgprc
+= rd32(E1000_MGTPRC
);
3928 adapter
->stats
.mgpdc
+= rd32(E1000_MGTPDC
);
3931 static irqreturn_t
igb_msix_other(int irq
, void *data
)
3933 struct igb_adapter
*adapter
= data
;
3934 struct e1000_hw
*hw
= &adapter
->hw
;
3935 u32 icr
= rd32(E1000_ICR
);
3936 /* reading ICR causes bit 31 of EICR to be cleared */
3938 if (icr
& E1000_ICR_DOUTSYNC
) {
3939 /* HW is reporting DMA is out of sync */
3940 adapter
->stats
.doosync
++;
3943 /* Check for a mailbox event */
3944 if (icr
& E1000_ICR_VMMB
)
3945 igb_msg_task(adapter
);
3947 if (icr
& E1000_ICR_LSC
) {
3948 hw
->mac
.get_link_status
= 1;
3949 /* guard against interrupt when we're going down */
3950 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3951 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
3954 wr32(E1000_IMS
, E1000_IMS_LSC
| E1000_IMS_DOUTSYNC
| E1000_IMS_VMMB
);
3955 wr32(E1000_EIMS
, adapter
->eims_other
);
3960 static void igb_write_itr(struct igb_q_vector
*q_vector
)
3962 u32 itr_val
= q_vector
->itr_val
& 0x7FFC;
3964 if (!q_vector
->set_itr
)
3970 if (q_vector
->itr_shift
)
3971 itr_val
|= itr_val
<< q_vector
->itr_shift
;
3973 itr_val
|= 0x8000000;
3975 writel(itr_val
, q_vector
->itr_register
);
3976 q_vector
->set_itr
= 0;
3979 static irqreturn_t
igb_msix_ring(int irq
, void *data
)
3981 struct igb_q_vector
*q_vector
= data
;
3983 /* Write the ITR value calculated from the previous interrupt. */
3984 igb_write_itr(q_vector
);
3986 napi_schedule(&q_vector
->napi
);
3991 #ifdef CONFIG_IGB_DCA
3992 static void igb_update_dca(struct igb_q_vector
*q_vector
)
3994 struct igb_adapter
*adapter
= q_vector
->adapter
;
3995 struct e1000_hw
*hw
= &adapter
->hw
;
3996 int cpu
= get_cpu();
3998 if (q_vector
->cpu
== cpu
)
4001 if (q_vector
->tx_ring
) {
4002 int q
= q_vector
->tx_ring
->reg_idx
;
4003 u32 dca_txctrl
= rd32(E1000_DCA_TXCTRL(q
));
4004 if (hw
->mac
.type
== e1000_82575
) {
4005 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK
;
4006 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4008 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK_82576
;
4009 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4010 E1000_DCA_TXCTRL_CPUID_SHIFT
;
4012 dca_txctrl
|= E1000_DCA_TXCTRL_DESC_DCA_EN
;
4013 wr32(E1000_DCA_TXCTRL(q
), dca_txctrl
);
4015 if (q_vector
->rx_ring
) {
4016 int q
= q_vector
->rx_ring
->reg_idx
;
4017 u32 dca_rxctrl
= rd32(E1000_DCA_RXCTRL(q
));
4018 if (hw
->mac
.type
== e1000_82575
) {
4019 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK
;
4020 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4022 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK_82576
;
4023 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4024 E1000_DCA_RXCTRL_CPUID_SHIFT
;
4026 dca_rxctrl
|= E1000_DCA_RXCTRL_DESC_DCA_EN
;
4027 dca_rxctrl
|= E1000_DCA_RXCTRL_HEAD_DCA_EN
;
4028 dca_rxctrl
|= E1000_DCA_RXCTRL_DATA_DCA_EN
;
4029 wr32(E1000_DCA_RXCTRL(q
), dca_rxctrl
);
4031 q_vector
->cpu
= cpu
;
4036 static void igb_setup_dca(struct igb_adapter
*adapter
)
4038 struct e1000_hw
*hw
= &adapter
->hw
;
4041 if (!(adapter
->flags
& IGB_FLAG_DCA_ENABLED
))
4044 /* Always use CB2 mode, difference is masked in the CB driver. */
4045 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_CB2
);
4047 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
4048 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
4050 igb_update_dca(q_vector
);
4054 static int __igb_notify_dca(struct device
*dev
, void *data
)
4056 struct net_device
*netdev
= dev_get_drvdata(dev
);
4057 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4058 struct e1000_hw
*hw
= &adapter
->hw
;
4059 unsigned long event
= *(unsigned long *)data
;
4062 case DCA_PROVIDER_ADD
:
4063 /* if already enabled, don't do it again */
4064 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
4066 /* Always use CB2 mode, difference is masked
4067 * in the CB driver. */
4068 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_CB2
);
4069 if (dca_add_requester(dev
) == 0) {
4070 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
4071 dev_info(&adapter
->pdev
->dev
, "DCA enabled\n");
4072 igb_setup_dca(adapter
);
4075 /* Fall Through since DCA is disabled. */
4076 case DCA_PROVIDER_REMOVE
:
4077 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
4078 /* without this a class_device is left
4079 * hanging around in the sysfs model */
4080 dca_remove_requester(dev
);
4081 dev_info(&adapter
->pdev
->dev
, "DCA disabled\n");
4082 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
4083 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
4091 static int igb_notify_dca(struct notifier_block
*nb
, unsigned long event
,
4096 ret_val
= driver_for_each_device(&igb_driver
.driver
, NULL
, &event
,
4099 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
4101 #endif /* CONFIG_IGB_DCA */
4103 static void igb_ping_all_vfs(struct igb_adapter
*adapter
)
4105 struct e1000_hw
*hw
= &adapter
->hw
;
4109 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++) {
4110 ping
= E1000_PF_CONTROL_MSG
;
4111 if (adapter
->vf_data
[i
].flags
& IGB_VF_FLAG_CTS
)
4112 ping
|= E1000_VT_MSGTYPE_CTS
;
4113 igb_write_mbx(hw
, &ping
, 1, i
);
4117 static int igb_set_vf_promisc(struct igb_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
4119 struct e1000_hw
*hw
= &adapter
->hw
;
4120 u32 vmolr
= rd32(E1000_VMOLR(vf
));
4121 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4123 vf_data
->flags
|= ~(IGB_VF_FLAG_UNI_PROMISC
|
4124 IGB_VF_FLAG_MULTI_PROMISC
);
4125 vmolr
&= ~(E1000_VMOLR_ROPE
| E1000_VMOLR_ROMPE
| E1000_VMOLR_MPME
);
4127 if (*msgbuf
& E1000_VF_SET_PROMISC_MULTICAST
) {
4128 vmolr
|= E1000_VMOLR_MPME
;
4129 *msgbuf
&= ~E1000_VF_SET_PROMISC_MULTICAST
;
4132 * if we have hashes and we are clearing a multicast promisc
4133 * flag we need to write the hashes to the MTA as this step
4134 * was previously skipped
4136 if (vf_data
->num_vf_mc_hashes
> 30) {
4137 vmolr
|= E1000_VMOLR_MPME
;
4138 } else if (vf_data
->num_vf_mc_hashes
) {
4140 vmolr
|= E1000_VMOLR_ROMPE
;
4141 for (j
= 0; j
< vf_data
->num_vf_mc_hashes
; j
++)
4142 igb_mta_set(hw
, vf_data
->vf_mc_hashes
[j
]);
4146 wr32(E1000_VMOLR(vf
), vmolr
);
4148 /* there are flags left unprocessed, likely not supported */
4149 if (*msgbuf
& E1000_VT_MSGINFO_MASK
)
4156 static int igb_set_vf_multicasts(struct igb_adapter
*adapter
,
4157 u32
*msgbuf
, u32 vf
)
4159 int n
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
4160 u16
*hash_list
= (u16
*)&msgbuf
[1];
4161 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4164 /* salt away the number of multicast addresses assigned
4165 * to this VF for later use to restore when the PF multi cast
4168 vf_data
->num_vf_mc_hashes
= n
;
4170 /* only up to 30 hash values supported */
4174 /* store the hashes for later use */
4175 for (i
= 0; i
< n
; i
++)
4176 vf_data
->vf_mc_hashes
[i
] = hash_list
[i
];
4178 /* Flush and reset the mta with the new values */
4179 igb_set_rx_mode(adapter
->netdev
);
4184 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
)
4186 struct e1000_hw
*hw
= &adapter
->hw
;
4187 struct vf_data_storage
*vf_data
;
4190 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
4191 u32 vmolr
= rd32(E1000_VMOLR(i
));
4192 vmolr
&= ~(E1000_VMOLR_ROMPE
| E1000_VMOLR_MPME
);
4194 vf_data
= &adapter
->vf_data
[i
];
4196 if ((vf_data
->num_vf_mc_hashes
> 30) ||
4197 (vf_data
->flags
& IGB_VF_FLAG_MULTI_PROMISC
)) {
4198 vmolr
|= E1000_VMOLR_MPME
;
4199 } else if (vf_data
->num_vf_mc_hashes
) {
4200 vmolr
|= E1000_VMOLR_ROMPE
;
4201 for (j
= 0; j
< vf_data
->num_vf_mc_hashes
; j
++)
4202 igb_mta_set(hw
, vf_data
->vf_mc_hashes
[j
]);
4204 wr32(E1000_VMOLR(i
), vmolr
);
4208 static void igb_clear_vf_vfta(struct igb_adapter
*adapter
, u32 vf
)
4210 struct e1000_hw
*hw
= &adapter
->hw
;
4211 u32 pool_mask
, reg
, vid
;
4214 pool_mask
= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
4216 /* Find the vlan filter for this id */
4217 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4218 reg
= rd32(E1000_VLVF(i
));
4220 /* remove the vf from the pool */
4223 /* if pool is empty then remove entry from vfta */
4224 if (!(reg
& E1000_VLVF_POOLSEL_MASK
) &&
4225 (reg
& E1000_VLVF_VLANID_ENABLE
)) {
4227 vid
= reg
& E1000_VLVF_VLANID_MASK
;
4228 igb_vfta_set(hw
, vid
, false);
4231 wr32(E1000_VLVF(i
), reg
);
4234 adapter
->vf_data
[vf
].vlans_enabled
= 0;
4237 static s32
igb_vlvf_set(struct igb_adapter
*adapter
, u32 vid
, bool add
, u32 vf
)
4239 struct e1000_hw
*hw
= &adapter
->hw
;
4242 /* The vlvf table only exists on 82576 hardware and newer */
4243 if (hw
->mac
.type
< e1000_82576
)
4246 /* we only need to do this if VMDq is enabled */
4247 if (!adapter
->vfs_allocated_count
)
4250 /* Find the vlan filter for this id */
4251 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4252 reg
= rd32(E1000_VLVF(i
));
4253 if ((reg
& E1000_VLVF_VLANID_ENABLE
) &&
4254 vid
== (reg
& E1000_VLVF_VLANID_MASK
))
4259 if (i
== E1000_VLVF_ARRAY_SIZE
) {
4260 /* Did not find a matching VLAN ID entry that was
4261 * enabled. Search for a free filter entry, i.e.
4262 * one without the enable bit set
4264 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4265 reg
= rd32(E1000_VLVF(i
));
4266 if (!(reg
& E1000_VLVF_VLANID_ENABLE
))
4270 if (i
< E1000_VLVF_ARRAY_SIZE
) {
4271 /* Found an enabled/available entry */
4272 reg
|= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
4274 /* if !enabled we need to set this up in vfta */
4275 if (!(reg
& E1000_VLVF_VLANID_ENABLE
)) {
4276 /* add VID to filter table */
4277 igb_vfta_set(hw
, vid
, true);
4278 reg
|= E1000_VLVF_VLANID_ENABLE
;
4280 reg
&= ~E1000_VLVF_VLANID_MASK
;
4282 wr32(E1000_VLVF(i
), reg
);
4284 /* do not modify RLPML for PF devices */
4285 if (vf
>= adapter
->vfs_allocated_count
)
4288 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
4290 reg
= rd32(E1000_VMOLR(vf
));
4291 size
= reg
& E1000_VMOLR_RLPML_MASK
;
4293 reg
&= ~E1000_VMOLR_RLPML_MASK
;
4295 wr32(E1000_VMOLR(vf
), reg
);
4298 adapter
->vf_data
[vf
].vlans_enabled
++;
4302 if (i
< E1000_VLVF_ARRAY_SIZE
) {
4303 /* remove vf from the pool */
4304 reg
&= ~(1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
));
4305 /* if pool is empty then remove entry from vfta */
4306 if (!(reg
& E1000_VLVF_POOLSEL_MASK
)) {
4308 igb_vfta_set(hw
, vid
, false);
4310 wr32(E1000_VLVF(i
), reg
);
4312 /* do not modify RLPML for PF devices */
4313 if (vf
>= adapter
->vfs_allocated_count
)
4316 adapter
->vf_data
[vf
].vlans_enabled
--;
4317 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
4319 reg
= rd32(E1000_VMOLR(vf
));
4320 size
= reg
& E1000_VMOLR_RLPML_MASK
;
4322 reg
&= ~E1000_VMOLR_RLPML_MASK
;
4324 wr32(E1000_VMOLR(vf
), reg
);
4332 static int igb_set_vf_vlan(struct igb_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
4334 int add
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
4335 int vid
= (msgbuf
[1] & E1000_VLVF_VLANID_MASK
);
4337 return igb_vlvf_set(adapter
, vid
, add
, vf
);
4340 static inline void igb_vf_reset(struct igb_adapter
*adapter
, u32 vf
)
4342 /* clear all flags */
4343 adapter
->vf_data
[vf
].flags
= 0;
4344 adapter
->vf_data
[vf
].last_nack
= jiffies
;
4346 /* reset offloads to defaults */
4347 igb_set_vmolr(adapter
, vf
);
4349 /* reset vlans for device */
4350 igb_clear_vf_vfta(adapter
, vf
);
4352 /* reset multicast table array for vf */
4353 adapter
->vf_data
[vf
].num_vf_mc_hashes
= 0;
4355 /* Flush and reset the mta with the new values */
4356 igb_set_rx_mode(adapter
->netdev
);
4359 static void igb_vf_reset_event(struct igb_adapter
*adapter
, u32 vf
)
4361 unsigned char *vf_mac
= adapter
->vf_data
[vf
].vf_mac_addresses
;
4363 /* generate a new mac address as we were hotplug removed/added */
4364 random_ether_addr(vf_mac
);
4366 /* process remaining reset events */
4367 igb_vf_reset(adapter
, vf
);
4370 static void igb_vf_reset_msg(struct igb_adapter
*adapter
, u32 vf
)
4372 struct e1000_hw
*hw
= &adapter
->hw
;
4373 unsigned char *vf_mac
= adapter
->vf_data
[vf
].vf_mac_addresses
;
4374 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
4376 u8
*addr
= (u8
*)(&msgbuf
[1]);
4378 /* process all the same items cleared in a function level reset */
4379 igb_vf_reset(adapter
, vf
);
4381 /* set vf mac address */
4382 igb_rar_set_qsel(adapter
, vf_mac
, rar_entry
, vf
);
4384 /* enable transmit and receive for vf */
4385 reg
= rd32(E1000_VFTE
);
4386 wr32(E1000_VFTE
, reg
| (1 << vf
));
4387 reg
= rd32(E1000_VFRE
);
4388 wr32(E1000_VFRE
, reg
| (1 << vf
));
4390 adapter
->vf_data
[vf
].flags
= IGB_VF_FLAG_CTS
;
4392 /* reply to reset with ack and vf mac address */
4393 msgbuf
[0] = E1000_VF_RESET
| E1000_VT_MSGTYPE_ACK
;
4394 memcpy(addr
, vf_mac
, 6);
4395 igb_write_mbx(hw
, msgbuf
, 3, vf
);
4398 static int igb_set_vf_mac_addr(struct igb_adapter
*adapter
, u32
*msg
, int vf
)
4400 unsigned char *addr
= (char *)&msg
[1];
4403 if (is_valid_ether_addr(addr
))
4404 err
= igb_set_vf_mac(adapter
, vf
, addr
);
4409 static void igb_rcv_ack_from_vf(struct igb_adapter
*adapter
, u32 vf
)
4411 struct e1000_hw
*hw
= &adapter
->hw
;
4412 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4413 u32 msg
= E1000_VT_MSGTYPE_NACK
;
4415 /* if device isn't clear to send it shouldn't be reading either */
4416 if (!(vf_data
->flags
& IGB_VF_FLAG_CTS
) &&
4417 time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
))) {
4418 igb_write_mbx(hw
, &msg
, 1, vf
);
4419 vf_data
->last_nack
= jiffies
;
4423 static void igb_rcv_msg_from_vf(struct igb_adapter
*adapter
, u32 vf
)
4425 struct pci_dev
*pdev
= adapter
->pdev
;
4426 u32 msgbuf
[E1000_VFMAILBOX_SIZE
];
4427 struct e1000_hw
*hw
= &adapter
->hw
;
4428 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4431 retval
= igb_read_mbx(hw
, msgbuf
, E1000_VFMAILBOX_SIZE
, vf
);
4434 dev_err(&pdev
->dev
, "Error receiving message from VF\n");
4436 /* this is a message we already processed, do nothing */
4437 if (msgbuf
[0] & (E1000_VT_MSGTYPE_ACK
| E1000_VT_MSGTYPE_NACK
))
4441 * until the vf completes a reset it should not be
4442 * allowed to start any configuration.
4445 if (msgbuf
[0] == E1000_VF_RESET
) {
4446 igb_vf_reset_msg(adapter
, vf
);
4450 if (!(vf_data
->flags
& IGB_VF_FLAG_CTS
)) {
4451 msgbuf
[0] = E1000_VT_MSGTYPE_NACK
;
4452 if (time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
))) {
4453 igb_write_mbx(hw
, msgbuf
, 1, vf
);
4454 vf_data
->last_nack
= jiffies
;
4459 switch ((msgbuf
[0] & 0xFFFF)) {
4460 case E1000_VF_SET_MAC_ADDR
:
4461 retval
= igb_set_vf_mac_addr(adapter
, msgbuf
, vf
);
4463 case E1000_VF_SET_PROMISC
:
4464 retval
= igb_set_vf_promisc(adapter
, msgbuf
, vf
);
4466 case E1000_VF_SET_MULTICAST
:
4467 retval
= igb_set_vf_multicasts(adapter
, msgbuf
, vf
);
4469 case E1000_VF_SET_LPE
:
4470 retval
= igb_set_vf_rlpml(adapter
, msgbuf
[1], vf
);
4472 case E1000_VF_SET_VLAN
:
4473 retval
= igb_set_vf_vlan(adapter
, msgbuf
, vf
);
4476 dev_err(&adapter
->pdev
->dev
, "Unhandled Msg %08x\n", msgbuf
[0]);
4481 /* notify the VF of the results of what it sent us */
4483 msgbuf
[0] |= E1000_VT_MSGTYPE_NACK
;
4485 msgbuf
[0] |= E1000_VT_MSGTYPE_ACK
;
4487 msgbuf
[0] |= E1000_VT_MSGTYPE_CTS
;
4489 igb_write_mbx(hw
, msgbuf
, 1, vf
);
4492 static void igb_msg_task(struct igb_adapter
*adapter
)
4494 struct e1000_hw
*hw
= &adapter
->hw
;
4497 for (vf
= 0; vf
< adapter
->vfs_allocated_count
; vf
++) {
4498 /* process any reset requests */
4499 if (!igb_check_for_rst(hw
, vf
))
4500 igb_vf_reset_event(adapter
, vf
);
4502 /* process any messages pending */
4503 if (!igb_check_for_msg(hw
, vf
))
4504 igb_rcv_msg_from_vf(adapter
, vf
);
4506 /* process any acks */
4507 if (!igb_check_for_ack(hw
, vf
))
4508 igb_rcv_ack_from_vf(adapter
, vf
);
4513 * igb_set_uta - Set unicast filter table address
4514 * @adapter: board private structure
4516 * The unicast table address is a register array of 32-bit registers.
4517 * The table is meant to be used in a way similar to how the MTA is used
4518 * however due to certain limitations in the hardware it is necessary to
4519 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4520 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4522 static void igb_set_uta(struct igb_adapter
*adapter
)
4524 struct e1000_hw
*hw
= &adapter
->hw
;
4527 /* The UTA table only exists on 82576 hardware and newer */
4528 if (hw
->mac
.type
< e1000_82576
)
4531 /* we only need to do this if VMDq is enabled */
4532 if (!adapter
->vfs_allocated_count
)
4535 for (i
= 0; i
< hw
->mac
.uta_reg_count
; i
++)
4536 array_wr32(E1000_UTA
, i
, ~0);
4540 * igb_intr_msi - Interrupt Handler
4541 * @irq: interrupt number
4542 * @data: pointer to a network interface device structure
4544 static irqreturn_t
igb_intr_msi(int irq
, void *data
)
4546 struct igb_adapter
*adapter
= data
;
4547 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
4548 struct e1000_hw
*hw
= &adapter
->hw
;
4549 /* read ICR disables interrupts using IAM */
4550 u32 icr
= rd32(E1000_ICR
);
4552 igb_write_itr(q_vector
);
4554 if (icr
& E1000_ICR_DOUTSYNC
) {
4555 /* HW is reporting DMA is out of sync */
4556 adapter
->stats
.doosync
++;
4559 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
4560 hw
->mac
.get_link_status
= 1;
4561 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4562 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
4565 napi_schedule(&q_vector
->napi
);
4571 * igb_intr - Legacy Interrupt Handler
4572 * @irq: interrupt number
4573 * @data: pointer to a network interface device structure
4575 static irqreturn_t
igb_intr(int irq
, void *data
)
4577 struct igb_adapter
*adapter
= data
;
4578 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
4579 struct e1000_hw
*hw
= &adapter
->hw
;
4580 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4581 * need for the IMC write */
4582 u32 icr
= rd32(E1000_ICR
);
4584 return IRQ_NONE
; /* Not our interrupt */
4586 igb_write_itr(q_vector
);
4588 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4589 * not set, then the adapter didn't send an interrupt */
4590 if (!(icr
& E1000_ICR_INT_ASSERTED
))
4593 if (icr
& E1000_ICR_DOUTSYNC
) {
4594 /* HW is reporting DMA is out of sync */
4595 adapter
->stats
.doosync
++;
4598 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
4599 hw
->mac
.get_link_status
= 1;
4600 /* guard against interrupt when we're going down */
4601 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4602 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
4605 napi_schedule(&q_vector
->napi
);
4610 static inline void igb_ring_irq_enable(struct igb_q_vector
*q_vector
)
4612 struct igb_adapter
*adapter
= q_vector
->adapter
;
4613 struct e1000_hw
*hw
= &adapter
->hw
;
4615 if ((q_vector
->rx_ring
&& (adapter
->rx_itr_setting
& 3)) ||
4616 (!q_vector
->rx_ring
&& (adapter
->tx_itr_setting
& 3))) {
4617 if (!adapter
->msix_entries
)
4618 igb_set_itr(adapter
);
4620 igb_update_ring_itr(q_vector
);
4623 if (!test_bit(__IGB_DOWN
, &adapter
->state
)) {
4624 if (adapter
->msix_entries
)
4625 wr32(E1000_EIMS
, q_vector
->eims_value
);
4627 igb_irq_enable(adapter
);
4632 * igb_poll - NAPI Rx polling callback
4633 * @napi: napi polling structure
4634 * @budget: count of how many packets we should handle
4636 static int igb_poll(struct napi_struct
*napi
, int budget
)
4638 struct igb_q_vector
*q_vector
= container_of(napi
,
4639 struct igb_q_vector
,
4641 int tx_clean_complete
= 1, work_done
= 0;
4643 #ifdef CONFIG_IGB_DCA
4644 if (q_vector
->adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
4645 igb_update_dca(q_vector
);
4647 if (q_vector
->tx_ring
)
4648 tx_clean_complete
= igb_clean_tx_irq(q_vector
);
4650 if (q_vector
->rx_ring
)
4651 igb_clean_rx_irq_adv(q_vector
, &work_done
, budget
);
4653 if (!tx_clean_complete
)
4656 /* If not enough Rx work done, exit the polling mode */
4657 if (work_done
< budget
) {
4658 napi_complete(napi
);
4659 igb_ring_irq_enable(q_vector
);
4666 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
4667 * @adapter: board private structure
4668 * @shhwtstamps: timestamp structure to update
4669 * @regval: unsigned 64bit system time value.
4671 * We need to convert the system time value stored in the RX/TXSTMP registers
4672 * into a hwtstamp which can be used by the upper level timestamping functions
4674 static void igb_systim_to_hwtstamp(struct igb_adapter
*adapter
,
4675 struct skb_shared_hwtstamps
*shhwtstamps
,
4680 ns
= timecounter_cyc2time(&adapter
->clock
, regval
);
4681 timecompare_update(&adapter
->compare
, ns
);
4682 memset(shhwtstamps
, 0, sizeof(struct skb_shared_hwtstamps
));
4683 shhwtstamps
->hwtstamp
= ns_to_ktime(ns
);
4684 shhwtstamps
->syststamp
= timecompare_transform(&adapter
->compare
, ns
);
4688 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4689 * @q_vector: pointer to q_vector containing needed info
4690 * @skb: packet that was just sent
4692 * If we were asked to do hardware stamping and such a time stamp is
4693 * available, then it must have been for this skb here because we only
4694 * allow only one such packet into the queue.
4696 static void igb_tx_hwtstamp(struct igb_q_vector
*q_vector
, struct sk_buff
*skb
)
4698 struct igb_adapter
*adapter
= q_vector
->adapter
;
4699 union skb_shared_tx
*shtx
= skb_tx(skb
);
4700 struct e1000_hw
*hw
= &adapter
->hw
;
4701 struct skb_shared_hwtstamps shhwtstamps
;
4704 /* if skb does not support hw timestamp or TX stamp not valid exit */
4705 if (likely(!shtx
->hardware
) ||
4706 !(rd32(E1000_TSYNCTXCTL
) & E1000_TSYNCTXCTL_VALID
))
4709 regval
= rd32(E1000_TXSTMPL
);
4710 regval
|= (u64
)rd32(E1000_TXSTMPH
) << 32;
4712 igb_systim_to_hwtstamp(adapter
, &shhwtstamps
, regval
);
4713 skb_tstamp_tx(skb
, &shhwtstamps
);
4717 * igb_clean_tx_irq - Reclaim resources after transmit completes
4718 * @q_vector: pointer to q_vector containing needed info
4719 * returns true if ring is completely cleaned
4721 static bool igb_clean_tx_irq(struct igb_q_vector
*q_vector
)
4723 struct igb_adapter
*adapter
= q_vector
->adapter
;
4724 struct igb_ring
*tx_ring
= q_vector
->tx_ring
;
4725 struct net_device
*netdev
= tx_ring
->netdev
;
4726 struct e1000_hw
*hw
= &adapter
->hw
;
4727 struct igb_buffer
*buffer_info
;
4728 struct sk_buff
*skb
;
4729 union e1000_adv_tx_desc
*tx_desc
, *eop_desc
;
4730 unsigned int total_bytes
= 0, total_packets
= 0;
4731 unsigned int i
, eop
, count
= 0;
4732 bool cleaned
= false;
4734 i
= tx_ring
->next_to_clean
;
4735 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
4736 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
4738 while ((eop_desc
->wb
.status
& cpu_to_le32(E1000_TXD_STAT_DD
)) &&
4739 (count
< tx_ring
->count
)) {
4740 for (cleaned
= false; !cleaned
; count
++) {
4741 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
4742 buffer_info
= &tx_ring
->buffer_info
[i
];
4743 cleaned
= (i
== eop
);
4744 skb
= buffer_info
->skb
;
4747 unsigned int segs
, bytecount
;
4748 /* gso_segs is currently only valid for tcp */
4749 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
4750 /* multiply data chunks by size of headers */
4751 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
4753 total_packets
+= segs
;
4754 total_bytes
+= bytecount
;
4756 igb_tx_hwtstamp(q_vector
, skb
);
4759 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
4760 tx_desc
->wb
.status
= 0;
4763 if (i
== tx_ring
->count
)
4766 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
4767 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
4770 tx_ring
->next_to_clean
= i
;
4772 if (unlikely(count
&&
4773 netif_carrier_ok(netdev
) &&
4774 igb_desc_unused(tx_ring
) >= IGB_TX_QUEUE_WAKE
)) {
4775 /* Make sure that anybody stopping the queue after this
4776 * sees the new next_to_clean.
4779 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
4780 !(test_bit(__IGB_DOWN
, &adapter
->state
))) {
4781 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
4782 tx_ring
->tx_stats
.restart_queue
++;
4786 if (tx_ring
->detect_tx_hung
) {
4787 /* Detect a transmit hang in hardware, this serializes the
4788 * check with the clearing of time_stamp and movement of i */
4789 tx_ring
->detect_tx_hung
= false;
4790 if (tx_ring
->buffer_info
[i
].time_stamp
&&
4791 time_after(jiffies
, tx_ring
->buffer_info
[i
].time_stamp
+
4792 (adapter
->tx_timeout_factor
* HZ
))
4793 && !(rd32(E1000_STATUS
) &
4794 E1000_STATUS_TXOFF
)) {
4796 /* detected Tx unit hang */
4797 dev_err(&tx_ring
->pdev
->dev
,
4798 "Detected Tx Unit Hang\n"
4802 " next_to_use <%x>\n"
4803 " next_to_clean <%x>\n"
4804 "buffer_info[next_to_clean]\n"
4805 " time_stamp <%lx>\n"
4806 " next_to_watch <%x>\n"
4808 " desc.status <%x>\n",
4809 tx_ring
->queue_index
,
4810 readl(tx_ring
->head
),
4811 readl(tx_ring
->tail
),
4812 tx_ring
->next_to_use
,
4813 tx_ring
->next_to_clean
,
4814 tx_ring
->buffer_info
[eop
].time_stamp
,
4817 eop_desc
->wb
.status
);
4818 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
4821 tx_ring
->total_bytes
+= total_bytes
;
4822 tx_ring
->total_packets
+= total_packets
;
4823 tx_ring
->tx_stats
.bytes
+= total_bytes
;
4824 tx_ring
->tx_stats
.packets
+= total_packets
;
4825 return (count
< tx_ring
->count
);
4829 * igb_receive_skb - helper function to handle rx indications
4830 * @q_vector: structure containing interrupt and ring information
4831 * @skb: packet to send up
4832 * @vlan_tag: vlan tag for packet
4834 static void igb_receive_skb(struct igb_q_vector
*q_vector
,
4835 struct sk_buff
*skb
,
4838 struct igb_adapter
*adapter
= q_vector
->adapter
;
4841 vlan_gro_receive(&q_vector
->napi
, adapter
->vlgrp
,
4844 napi_gro_receive(&q_vector
->napi
, skb
);
4847 static inline void igb_rx_checksum_adv(struct igb_ring
*ring
,
4848 u32 status_err
, struct sk_buff
*skb
)
4850 skb
->ip_summed
= CHECKSUM_NONE
;
4852 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4853 if (!(ring
->flags
& IGB_RING_FLAG_RX_CSUM
) ||
4854 (status_err
& E1000_RXD_STAT_IXSM
))
4857 /* TCP/UDP checksum error bit is set */
4859 (E1000_RXDEXT_STATERR_TCPE
| E1000_RXDEXT_STATERR_IPE
)) {
4861 * work around errata with sctp packets where the TCPE aka
4862 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4863 * packets, (aka let the stack check the crc32c)
4865 if ((skb
->len
== 60) &&
4866 (ring
->flags
& IGB_RING_FLAG_RX_SCTP_CSUM
))
4867 ring
->rx_stats
.csum_err
++;
4869 /* let the stack verify checksum errors */
4872 /* It must be a TCP or UDP packet with a valid checksum */
4873 if (status_err
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
))
4874 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4876 dev_dbg(&ring
->pdev
->dev
, "cksum success: bits %08X\n", status_err
);
4879 static inline void igb_rx_hwtstamp(struct igb_q_vector
*q_vector
, u32 staterr
,
4880 struct sk_buff
*skb
)
4882 struct igb_adapter
*adapter
= q_vector
->adapter
;
4883 struct e1000_hw
*hw
= &adapter
->hw
;
4887 * If this bit is set, then the RX registers contain the time stamp. No
4888 * other packet will be time stamped until we read these registers, so
4889 * read the registers to make them available again. Because only one
4890 * packet can be time stamped at a time, we know that the register
4891 * values must belong to this one here and therefore we don't need to
4892 * compare any of the additional attributes stored for it.
4894 * If nothing went wrong, then it should have a skb_shared_tx that we
4895 * can turn into a skb_shared_hwtstamps.
4897 if (likely(!(staterr
& E1000_RXDADV_STAT_TS
)))
4899 if (!(rd32(E1000_TSYNCRXCTL
) & E1000_TSYNCRXCTL_VALID
))
4902 regval
= rd32(E1000_RXSTMPL
);
4903 regval
|= (u64
)rd32(E1000_RXSTMPH
) << 32;
4905 igb_systim_to_hwtstamp(adapter
, skb_hwtstamps(skb
), regval
);
4907 static inline u16
igb_get_hlen(struct igb_ring
*rx_ring
,
4908 union e1000_adv_rx_desc
*rx_desc
)
4910 /* HW will not DMA in data larger than the given buffer, even if it
4911 * parses the (NFS, of course) header to be larger. In that case, it
4912 * fills the header buffer and spills the rest into the page.
4914 u16 hlen
= (le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hdr_info
) &
4915 E1000_RXDADV_HDRBUFLEN_MASK
) >> E1000_RXDADV_HDRBUFLEN_SHIFT
;
4916 if (hlen
> rx_ring
->rx_buffer_len
)
4917 hlen
= rx_ring
->rx_buffer_len
;
4921 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*q_vector
,
4922 int *work_done
, int budget
)
4924 struct igb_ring
*rx_ring
= q_vector
->rx_ring
;
4925 struct net_device
*netdev
= rx_ring
->netdev
;
4926 struct pci_dev
*pdev
= rx_ring
->pdev
;
4927 union e1000_adv_rx_desc
*rx_desc
, *next_rxd
;
4928 struct igb_buffer
*buffer_info
, *next_buffer
;
4929 struct sk_buff
*skb
;
4930 bool cleaned
= false;
4931 int cleaned_count
= 0;
4932 unsigned int total_bytes
= 0, total_packets
= 0;
4938 i
= rx_ring
->next_to_clean
;
4939 buffer_info
= &rx_ring
->buffer_info
[i
];
4940 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
4941 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
4943 while (staterr
& E1000_RXD_STAT_DD
) {
4944 if (*work_done
>= budget
)
4948 skb
= buffer_info
->skb
;
4949 prefetch(skb
->data
- NET_IP_ALIGN
);
4950 buffer_info
->skb
= NULL
;
4953 if (i
== rx_ring
->count
)
4955 next_rxd
= E1000_RX_DESC_ADV(*rx_ring
, i
);
4957 next_buffer
= &rx_ring
->buffer_info
[i
];
4959 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
4963 if (buffer_info
->dma
) {
4964 pci_unmap_single(pdev
, buffer_info
->dma
,
4965 rx_ring
->rx_buffer_len
,
4966 PCI_DMA_FROMDEVICE
);
4967 buffer_info
->dma
= 0;
4968 if (rx_ring
->rx_buffer_len
>= IGB_RXBUFFER_1024
) {
4969 skb_put(skb
, length
);
4972 skb_put(skb
, igb_get_hlen(rx_ring
, rx_desc
));
4976 pci_unmap_page(pdev
, buffer_info
->page_dma
,
4977 PAGE_SIZE
/ 2, PCI_DMA_FROMDEVICE
);
4978 buffer_info
->page_dma
= 0;
4980 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
++,
4982 buffer_info
->page_offset
,
4985 if (page_count(buffer_info
->page
) != 1)
4986 buffer_info
->page
= NULL
;
4988 get_page(buffer_info
->page
);
4991 skb
->data_len
+= length
;
4993 skb
->truesize
+= length
;
4996 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
4997 buffer_info
->skb
= next_buffer
->skb
;
4998 buffer_info
->dma
= next_buffer
->dma
;
4999 next_buffer
->skb
= skb
;
5000 next_buffer
->dma
= 0;
5004 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
5005 dev_kfree_skb_irq(skb
);
5009 igb_rx_hwtstamp(q_vector
, staterr
, skb
);
5010 total_bytes
+= skb
->len
;
5013 igb_rx_checksum_adv(rx_ring
, staterr
, skb
);
5015 skb
->protocol
= eth_type_trans(skb
, netdev
);
5016 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
5018 vlan_tag
= ((staterr
& E1000_RXD_STAT_VP
) ?
5019 le16_to_cpu(rx_desc
->wb
.upper
.vlan
) : 0);
5021 igb_receive_skb(q_vector
, skb
, vlan_tag
);
5024 rx_desc
->wb
.upper
.status_error
= 0;
5026 /* return some buffers to hardware, one at a time is too slow */
5027 if (cleaned_count
>= IGB_RX_BUFFER_WRITE
) {
5028 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5032 /* use prefetched values */
5034 buffer_info
= next_buffer
;
5035 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
5038 rx_ring
->next_to_clean
= i
;
5039 cleaned_count
= igb_desc_unused(rx_ring
);
5042 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5044 rx_ring
->total_packets
+= total_packets
;
5045 rx_ring
->total_bytes
+= total_bytes
;
5046 rx_ring
->rx_stats
.packets
+= total_packets
;
5047 rx_ring
->rx_stats
.bytes
+= total_bytes
;
5052 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5053 * @adapter: address of board private structure
5055 void igb_alloc_rx_buffers_adv(struct igb_ring
*rx_ring
, int cleaned_count
)
5057 struct net_device
*netdev
= rx_ring
->netdev
;
5058 union e1000_adv_rx_desc
*rx_desc
;
5059 struct igb_buffer
*buffer_info
;
5060 struct sk_buff
*skb
;
5064 i
= rx_ring
->next_to_use
;
5065 buffer_info
= &rx_ring
->buffer_info
[i
];
5067 bufsz
= rx_ring
->rx_buffer_len
;
5069 while (cleaned_count
--) {
5070 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5072 if ((bufsz
< IGB_RXBUFFER_1024
) && !buffer_info
->page_dma
) {
5073 if (!buffer_info
->page
) {
5074 buffer_info
->page
= alloc_page(GFP_ATOMIC
);
5075 if (!buffer_info
->page
) {
5076 rx_ring
->rx_stats
.alloc_failed
++;
5079 buffer_info
->page_offset
= 0;
5081 buffer_info
->page_offset
^= PAGE_SIZE
/ 2;
5083 buffer_info
->page_dma
=
5084 pci_map_page(rx_ring
->pdev
, buffer_info
->page
,
5085 buffer_info
->page_offset
,
5087 PCI_DMA_FROMDEVICE
);
5090 if (!buffer_info
->skb
) {
5091 skb
= netdev_alloc_skb_ip_align(netdev
, bufsz
);
5093 rx_ring
->rx_stats
.alloc_failed
++;
5097 buffer_info
->skb
= skb
;
5098 buffer_info
->dma
= pci_map_single(rx_ring
->pdev
,
5101 PCI_DMA_FROMDEVICE
);
5103 /* Refresh the desc even if buffer_addrs didn't change because
5104 * each write-back erases this info. */
5105 if (bufsz
< IGB_RXBUFFER_1024
) {
5106 rx_desc
->read
.pkt_addr
=
5107 cpu_to_le64(buffer_info
->page_dma
);
5108 rx_desc
->read
.hdr_addr
= cpu_to_le64(buffer_info
->dma
);
5110 rx_desc
->read
.pkt_addr
=
5111 cpu_to_le64(buffer_info
->dma
);
5112 rx_desc
->read
.hdr_addr
= 0;
5116 if (i
== rx_ring
->count
)
5118 buffer_info
= &rx_ring
->buffer_info
[i
];
5122 if (rx_ring
->next_to_use
!= i
) {
5123 rx_ring
->next_to_use
= i
;
5125 i
= (rx_ring
->count
- 1);
5129 /* Force memory writes to complete before letting h/w
5130 * know there are new descriptors to fetch. (Only
5131 * applicable for weak-ordered memory model archs,
5132 * such as IA-64). */
5134 writel(i
, rx_ring
->tail
);
5144 static int igb_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
5146 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5147 struct mii_ioctl_data
*data
= if_mii(ifr
);
5149 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
5154 data
->phy_id
= adapter
->hw
.phy
.addr
;
5157 if (igb_read_phy_reg(&adapter
->hw
, data
->reg_num
& 0x1F,
5169 * igb_hwtstamp_ioctl - control hardware time stamping
5174 * Outgoing time stamping can be enabled and disabled. Play nice and
5175 * disable it when requested, although it shouldn't case any overhead
5176 * when no packet needs it. At most one packet in the queue may be
5177 * marked for time stamping, otherwise it would be impossible to tell
5178 * for sure to which packet the hardware time stamp belongs.
5180 * Incoming time stamping has to be configured via the hardware
5181 * filters. Not all combinations are supported, in particular event
5182 * type has to be specified. Matching the kind of event packet is
5183 * not supported, with the exception of "all V2 events regardless of
5187 static int igb_hwtstamp_ioctl(struct net_device
*netdev
,
5188 struct ifreq
*ifr
, int cmd
)
5190 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5191 struct e1000_hw
*hw
= &adapter
->hw
;
5192 struct hwtstamp_config config
;
5193 u32 tsync_tx_ctl
= E1000_TSYNCTXCTL_ENABLED
;
5194 u32 tsync_rx_ctl
= E1000_TSYNCRXCTL_ENABLED
;
5195 u32 tsync_rx_cfg
= 0;
5200 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
5203 /* reserved for future extensions */
5207 switch (config
.tx_type
) {
5208 case HWTSTAMP_TX_OFF
:
5210 case HWTSTAMP_TX_ON
:
5216 switch (config
.rx_filter
) {
5217 case HWTSTAMP_FILTER_NONE
:
5220 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
5221 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
5222 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
5223 case HWTSTAMP_FILTER_ALL
:
5225 * register TSYNCRXCFG must be set, therefore it is not
5226 * possible to time stamp both Sync and Delay_Req messages
5227 * => fall back to time stamping all packets
5229 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_ALL
;
5230 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
5232 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
5233 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
5234 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE
;
5237 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
5238 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
5239 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE
;
5242 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
5243 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
5244 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
5245 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE
;
5248 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
5250 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
5251 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
5252 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
5253 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE
;
5256 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
5258 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
5259 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
5260 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
5261 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_EVENT_V2
;
5262 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
5269 if (hw
->mac
.type
== e1000_82575
) {
5270 if (tsync_rx_ctl
| tsync_tx_ctl
)
5275 /* enable/disable TX */
5276 regval
= rd32(E1000_TSYNCTXCTL
);
5277 regval
&= ~E1000_TSYNCTXCTL_ENABLED
;
5278 regval
|= tsync_tx_ctl
;
5279 wr32(E1000_TSYNCTXCTL
, regval
);
5281 /* enable/disable RX */
5282 regval
= rd32(E1000_TSYNCRXCTL
);
5283 regval
&= ~(E1000_TSYNCRXCTL_ENABLED
| E1000_TSYNCRXCTL_TYPE_MASK
);
5284 regval
|= tsync_rx_ctl
;
5285 wr32(E1000_TSYNCRXCTL
, regval
);
5287 /* define which PTP packets are time stamped */
5288 wr32(E1000_TSYNCRXCFG
, tsync_rx_cfg
);
5290 /* define ethertype filter for timestamped packets */
5293 (E1000_ETQF_FILTER_ENABLE
| /* enable filter */
5294 E1000_ETQF_1588
| /* enable timestamping */
5295 ETH_P_1588
)); /* 1588 eth protocol type */
5297 wr32(E1000_ETQF(3), 0);
5299 #define PTP_PORT 319
5300 /* L4 Queue Filter[3]: filter by destination port and protocol */
5302 u32 ftqf
= (IPPROTO_UDP
/* UDP */
5303 | E1000_FTQF_VF_BP
/* VF not compared */
5304 | E1000_FTQF_1588_TIME_STAMP
/* Enable Timestamping */
5305 | E1000_FTQF_MASK
); /* mask all inputs */
5306 ftqf
&= ~E1000_FTQF_MASK_PROTO_BP
; /* enable protocol check */
5308 wr32(E1000_IMIR(3), htons(PTP_PORT
));
5309 wr32(E1000_IMIREXT(3),
5310 (E1000_IMIREXT_SIZE_BP
| E1000_IMIREXT_CTRL_BP
));
5311 if (hw
->mac
.type
== e1000_82576
) {
5312 /* enable source port check */
5313 wr32(E1000_SPQF(3), htons(PTP_PORT
));
5314 ftqf
&= ~E1000_FTQF_MASK_SOURCE_PORT_BP
;
5316 wr32(E1000_FTQF(3), ftqf
);
5318 wr32(E1000_FTQF(3), E1000_FTQF_MASK
);
5322 adapter
->hwtstamp_config
= config
;
5324 /* clear TX/RX time stamp registers, just to be sure */
5325 regval
= rd32(E1000_TXSTMPH
);
5326 regval
= rd32(E1000_RXSTMPH
);
5328 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
5338 static int igb_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
5344 return igb_mii_ioctl(netdev
, ifr
, cmd
);
5346 return igb_hwtstamp_ioctl(netdev
, ifr
, cmd
);
5352 s32
igb_read_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
5354 struct igb_adapter
*adapter
= hw
->back
;
5357 cap_offset
= pci_find_capability(adapter
->pdev
, PCI_CAP_ID_EXP
);
5359 return -E1000_ERR_CONFIG
;
5361 pci_read_config_word(adapter
->pdev
, cap_offset
+ reg
, value
);
5366 s32
igb_write_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
5368 struct igb_adapter
*adapter
= hw
->back
;
5371 cap_offset
= pci_find_capability(adapter
->pdev
, PCI_CAP_ID_EXP
);
5373 return -E1000_ERR_CONFIG
;
5375 pci_write_config_word(adapter
->pdev
, cap_offset
+ reg
, *value
);
5380 static void igb_vlan_rx_register(struct net_device
*netdev
,
5381 struct vlan_group
*grp
)
5383 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5384 struct e1000_hw
*hw
= &adapter
->hw
;
5387 igb_irq_disable(adapter
);
5388 adapter
->vlgrp
= grp
;
5391 /* enable VLAN tag insert/strip */
5392 ctrl
= rd32(E1000_CTRL
);
5393 ctrl
|= E1000_CTRL_VME
;
5394 wr32(E1000_CTRL
, ctrl
);
5396 /* Disable CFI check */
5397 rctl
= rd32(E1000_RCTL
);
5398 rctl
&= ~E1000_RCTL_CFIEN
;
5399 wr32(E1000_RCTL
, rctl
);
5401 /* disable VLAN tag insert/strip */
5402 ctrl
= rd32(E1000_CTRL
);
5403 ctrl
&= ~E1000_CTRL_VME
;
5404 wr32(E1000_CTRL
, ctrl
);
5407 igb_rlpml_set(adapter
);
5409 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5410 igb_irq_enable(adapter
);
5413 static void igb_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
5415 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5416 struct e1000_hw
*hw
= &adapter
->hw
;
5417 int pf_id
= adapter
->vfs_allocated_count
;
5419 /* attempt to add filter to vlvf array */
5420 igb_vlvf_set(adapter
, vid
, true, pf_id
);
5422 /* add the filter since PF can receive vlans w/o entry in vlvf */
5423 igb_vfta_set(hw
, vid
, true);
5426 static void igb_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
5428 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5429 struct e1000_hw
*hw
= &adapter
->hw
;
5430 int pf_id
= adapter
->vfs_allocated_count
;
5433 igb_irq_disable(adapter
);
5434 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
5436 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5437 igb_irq_enable(adapter
);
5439 /* remove vlan from VLVF table array */
5440 err
= igb_vlvf_set(adapter
, vid
, false, pf_id
);
5442 /* if vid was not present in VLVF just remove it from table */
5444 igb_vfta_set(hw
, vid
, false);
5447 static void igb_restore_vlan(struct igb_adapter
*adapter
)
5449 igb_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
5451 if (adapter
->vlgrp
) {
5453 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
5454 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
5456 igb_vlan_rx_add_vid(adapter
->netdev
, vid
);
5461 int igb_set_spd_dplx(struct igb_adapter
*adapter
, u16 spddplx
)
5463 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
5468 case SPEED_10
+ DUPLEX_HALF
:
5469 mac
->forced_speed_duplex
= ADVERTISE_10_HALF
;
5471 case SPEED_10
+ DUPLEX_FULL
:
5472 mac
->forced_speed_duplex
= ADVERTISE_10_FULL
;
5474 case SPEED_100
+ DUPLEX_HALF
:
5475 mac
->forced_speed_duplex
= ADVERTISE_100_HALF
;
5477 case SPEED_100
+ DUPLEX_FULL
:
5478 mac
->forced_speed_duplex
= ADVERTISE_100_FULL
;
5480 case SPEED_1000
+ DUPLEX_FULL
:
5482 adapter
->hw
.phy
.autoneg_advertised
= ADVERTISE_1000_FULL
;
5484 case SPEED_1000
+ DUPLEX_HALF
: /* not supported */
5486 dev_err(&adapter
->pdev
->dev
,
5487 "Unsupported Speed/Duplex configuration\n");
5493 static int __igb_shutdown(struct pci_dev
*pdev
, bool *enable_wake
)
5495 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5496 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5497 struct e1000_hw
*hw
= &adapter
->hw
;
5498 u32 ctrl
, rctl
, status
;
5499 u32 wufc
= adapter
->wol
;
5504 netif_device_detach(netdev
);
5506 if (netif_running(netdev
))
5509 igb_clear_interrupt_scheme(adapter
);
5512 retval
= pci_save_state(pdev
);
5517 status
= rd32(E1000_STATUS
);
5518 if (status
& E1000_STATUS_LU
)
5519 wufc
&= ~E1000_WUFC_LNKC
;
5522 igb_setup_rctl(adapter
);
5523 igb_set_rx_mode(netdev
);
5525 /* turn on all-multi mode if wake on multicast is enabled */
5526 if (wufc
& E1000_WUFC_MC
) {
5527 rctl
= rd32(E1000_RCTL
);
5528 rctl
|= E1000_RCTL_MPE
;
5529 wr32(E1000_RCTL
, rctl
);
5532 ctrl
= rd32(E1000_CTRL
);
5533 /* advertise wake from D3Cold */
5534 #define E1000_CTRL_ADVD3WUC 0x00100000
5535 /* phy power management enable */
5536 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5537 ctrl
|= E1000_CTRL_ADVD3WUC
;
5538 wr32(E1000_CTRL
, ctrl
);
5540 /* Allow time for pending master requests to run */
5541 igb_disable_pcie_master(&adapter
->hw
);
5543 wr32(E1000_WUC
, E1000_WUC_PME_EN
);
5544 wr32(E1000_WUFC
, wufc
);
5547 wr32(E1000_WUFC
, 0);
5550 *enable_wake
= wufc
|| adapter
->en_mng_pt
;
5552 igb_shutdown_serdes_link_82575(hw
);
5554 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5555 * would have already happened in close and is redundant. */
5556 igb_release_hw_control(adapter
);
5558 pci_disable_device(pdev
);
5564 static int igb_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5569 retval
= __igb_shutdown(pdev
, &wake
);
5574 pci_prepare_to_sleep(pdev
);
5576 pci_wake_from_d3(pdev
, false);
5577 pci_set_power_state(pdev
, PCI_D3hot
);
5583 static int igb_resume(struct pci_dev
*pdev
)
5585 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5586 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5587 struct e1000_hw
*hw
= &adapter
->hw
;
5590 pci_set_power_state(pdev
, PCI_D0
);
5591 pci_restore_state(pdev
);
5593 err
= pci_enable_device_mem(pdev
);
5596 "igb: Cannot enable PCI device from suspend\n");
5599 pci_set_master(pdev
);
5601 pci_enable_wake(pdev
, PCI_D3hot
, 0);
5602 pci_enable_wake(pdev
, PCI_D3cold
, 0);
5604 if (igb_init_interrupt_scheme(adapter
)) {
5605 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
5609 /* e1000_power_up_phy(adapter); */
5613 /* let the f/w know that the h/w is now under the control of the
5615 igb_get_hw_control(adapter
);
5617 wr32(E1000_WUS
, ~0);
5619 if (netif_running(netdev
)) {
5620 err
= igb_open(netdev
);
5625 netif_device_attach(netdev
);
5631 static void igb_shutdown(struct pci_dev
*pdev
)
5635 __igb_shutdown(pdev
, &wake
);
5637 if (system_state
== SYSTEM_POWER_OFF
) {
5638 pci_wake_from_d3(pdev
, wake
);
5639 pci_set_power_state(pdev
, PCI_D3hot
);
5643 #ifdef CONFIG_NET_POLL_CONTROLLER
5645 * Polling 'interrupt' - used by things like netconsole to send skbs
5646 * without having to re-enable interrupts. It's not called while
5647 * the interrupt routine is executing.
5649 static void igb_netpoll(struct net_device
*netdev
)
5651 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5652 struct e1000_hw
*hw
= &adapter
->hw
;
5655 if (!adapter
->msix_entries
) {
5656 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
5657 igb_irq_disable(adapter
);
5658 napi_schedule(&q_vector
->napi
);
5662 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
5663 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
5664 wr32(E1000_EIMC
, q_vector
->eims_value
);
5665 napi_schedule(&q_vector
->napi
);
5668 #endif /* CONFIG_NET_POLL_CONTROLLER */
5671 * igb_io_error_detected - called when PCI error is detected
5672 * @pdev: Pointer to PCI device
5673 * @state: The current pci connection state
5675 * This function is called after a PCI bus error affecting
5676 * this device has been detected.
5678 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*pdev
,
5679 pci_channel_state_t state
)
5681 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5682 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5684 netif_device_detach(netdev
);
5686 if (state
== pci_channel_io_perm_failure
)
5687 return PCI_ERS_RESULT_DISCONNECT
;
5689 if (netif_running(netdev
))
5691 pci_disable_device(pdev
);
5693 /* Request a slot slot reset. */
5694 return PCI_ERS_RESULT_NEED_RESET
;
5698 * igb_io_slot_reset - called after the pci bus has been reset.
5699 * @pdev: Pointer to PCI device
5701 * Restart the card from scratch, as if from a cold-boot. Implementation
5702 * resembles the first-half of the igb_resume routine.
5704 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*pdev
)
5706 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5707 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5708 struct e1000_hw
*hw
= &adapter
->hw
;
5709 pci_ers_result_t result
;
5712 if (pci_enable_device_mem(pdev
)) {
5714 "Cannot re-enable PCI device after reset.\n");
5715 result
= PCI_ERS_RESULT_DISCONNECT
;
5717 pci_set_master(pdev
);
5718 pci_restore_state(pdev
);
5720 pci_enable_wake(pdev
, PCI_D3hot
, 0);
5721 pci_enable_wake(pdev
, PCI_D3cold
, 0);
5724 wr32(E1000_WUS
, ~0);
5725 result
= PCI_ERS_RESULT_RECOVERED
;
5728 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
5730 dev_err(&pdev
->dev
, "pci_cleanup_aer_uncorrect_error_status "
5731 "failed 0x%0x\n", err
);
5732 /* non-fatal, continue */
5739 * igb_io_resume - called when traffic can start flowing again.
5740 * @pdev: Pointer to PCI device
5742 * This callback is called when the error recovery driver tells us that
5743 * its OK to resume normal operation. Implementation resembles the
5744 * second-half of the igb_resume routine.
5746 static void igb_io_resume(struct pci_dev
*pdev
)
5748 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5749 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5751 if (netif_running(netdev
)) {
5752 if (igb_up(adapter
)) {
5753 dev_err(&pdev
->dev
, "igb_up failed after reset\n");
5758 netif_device_attach(netdev
);
5760 /* let the f/w know that the h/w is now under the control of the
5762 igb_get_hw_control(adapter
);
5765 static void igb_rar_set_qsel(struct igb_adapter
*adapter
, u8
*addr
, u32 index
,
5768 u32 rar_low
, rar_high
;
5769 struct e1000_hw
*hw
= &adapter
->hw
;
5771 /* HW expects these in little endian so we reverse the byte order
5772 * from network order (big endian) to little endian
5774 rar_low
= ((u32
) addr
[0] | ((u32
) addr
[1] << 8) |
5775 ((u32
) addr
[2] << 16) | ((u32
) addr
[3] << 24));
5776 rar_high
= ((u32
) addr
[4] | ((u32
) addr
[5] << 8));
5778 /* Indicate to hardware the Address is Valid. */
5779 rar_high
|= E1000_RAH_AV
;
5781 if (hw
->mac
.type
== e1000_82575
)
5782 rar_high
|= E1000_RAH_POOL_1
* qsel
;
5784 rar_high
|= E1000_RAH_POOL_1
<< qsel
;
5786 wr32(E1000_RAL(index
), rar_low
);
5788 wr32(E1000_RAH(index
), rar_high
);
5792 static int igb_set_vf_mac(struct igb_adapter
*adapter
,
5793 int vf
, unsigned char *mac_addr
)
5795 struct e1000_hw
*hw
= &adapter
->hw
;
5796 /* VF MAC addresses start at end of receive addresses and moves
5797 * torwards the first, as a result a collision should not be possible */
5798 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
5800 memcpy(adapter
->vf_data
[vf
].vf_mac_addresses
, mac_addr
, ETH_ALEN
);
5802 igb_rar_set_qsel(adapter
, mac_addr
, rar_entry
, vf
);
5807 static void igb_vmm_control(struct igb_adapter
*adapter
)
5809 struct e1000_hw
*hw
= &adapter
->hw
;
5812 /* replication is not supported for 82575 */
5813 if (hw
->mac
.type
== e1000_82575
)
5816 /* enable replication vlan tag stripping */
5817 reg
= rd32(E1000_RPLOLR
);
5818 reg
|= E1000_RPLOLR_STRVLAN
;
5819 wr32(E1000_RPLOLR
, reg
);
5821 /* notify HW that the MAC is adding vlan tags */
5822 reg
= rd32(E1000_DTXCTL
);
5823 reg
|= E1000_DTXCTL_VLAN_ADDED
;
5824 wr32(E1000_DTXCTL
, reg
);
5826 if (adapter
->vfs_allocated_count
) {
5827 igb_vmdq_set_loopback_pf(hw
, true);
5828 igb_vmdq_set_replication_pf(hw
, true);
5830 igb_vmdq_set_loopback_pf(hw
, false);
5831 igb_vmdq_set_replication_pf(hw
, false);