1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/pci-aspm.h>
43 #include <linux/delay.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/aer.h>
48 #include <linux/dca.h>
52 #define DRV_VERSION "1.3.16-k2"
53 char igb_driver_name
[] = "igb";
54 char igb_driver_version
[] = DRV_VERSION
;
55 static const char igb_driver_string
[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
57 static const char igb_copyright
[] = "Copyright (c) 2007-2009 Intel Corporation.";
59 static const struct e1000_info
*igb_info_tbl
[] = {
60 [board_82575
] = &e1000_82575_info
,
63 static struct pci_device_id igb_pci_tbl
[] = {
64 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576
), board_82575
},
65 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS
), board_82575
},
66 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS_SERDES
), board_82575
},
67 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_FIBER
), board_82575
},
68 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES
), board_82575
},
69 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES_QUAD
), board_82575
},
70 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_QUAD_COPPER
), board_82575
},
71 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_COPPER
), board_82575
},
72 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_FIBER_SERDES
), board_82575
},
73 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575GB_QUAD_COPPER
), board_82575
},
74 /* required last entry */
78 MODULE_DEVICE_TABLE(pci
, igb_pci_tbl
);
80 void igb_reset(struct igb_adapter
*);
81 static int igb_setup_all_tx_resources(struct igb_adapter
*);
82 static int igb_setup_all_rx_resources(struct igb_adapter
*);
83 static void igb_free_all_tx_resources(struct igb_adapter
*);
84 static void igb_free_all_rx_resources(struct igb_adapter
*);
85 static void igb_setup_mrqc(struct igb_adapter
*);
86 void igb_update_stats(struct igb_adapter
*);
87 static int igb_probe(struct pci_dev
*, const struct pci_device_id
*);
88 static void __devexit
igb_remove(struct pci_dev
*pdev
);
89 static int igb_sw_init(struct igb_adapter
*);
90 static int igb_open(struct net_device
*);
91 static int igb_close(struct net_device
*);
92 static void igb_configure_tx(struct igb_adapter
*);
93 static void igb_configure_rx(struct igb_adapter
*);
94 static void igb_clean_all_tx_rings(struct igb_adapter
*);
95 static void igb_clean_all_rx_rings(struct igb_adapter
*);
96 static void igb_clean_tx_ring(struct igb_ring
*);
97 static void igb_clean_rx_ring(struct igb_ring
*);
98 static void igb_set_rx_mode(struct net_device
*);
99 static void igb_update_phy_info(unsigned long);
100 static void igb_watchdog(unsigned long);
101 static void igb_watchdog_task(struct work_struct
*);
102 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
, struct net_device
*);
103 static struct net_device_stats
*igb_get_stats(struct net_device
*);
104 static int igb_change_mtu(struct net_device
*, int);
105 static int igb_set_mac(struct net_device
*, void *);
106 static void igb_set_uta(struct igb_adapter
*adapter
);
107 static irqreturn_t
igb_intr(int irq
, void *);
108 static irqreturn_t
igb_intr_msi(int irq
, void *);
109 static irqreturn_t
igb_msix_other(int irq
, void *);
110 static irqreturn_t
igb_msix_ring(int irq
, void *);
111 #ifdef CONFIG_IGB_DCA
112 static void igb_update_dca(struct igb_q_vector
*);
113 static void igb_setup_dca(struct igb_adapter
*);
114 #endif /* CONFIG_IGB_DCA */
115 static bool igb_clean_tx_irq(struct igb_q_vector
*);
116 static int igb_poll(struct napi_struct
*, int);
117 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*, int *, int);
118 static int igb_ioctl(struct net_device
*, struct ifreq
*, int cmd
);
119 static void igb_tx_timeout(struct net_device
*);
120 static void igb_reset_task(struct work_struct
*);
121 static void igb_vlan_rx_register(struct net_device
*, struct vlan_group
*);
122 static void igb_vlan_rx_add_vid(struct net_device
*, u16
);
123 static void igb_vlan_rx_kill_vid(struct net_device
*, u16
);
124 static void igb_restore_vlan(struct igb_adapter
*);
125 static void igb_rar_set_qsel(struct igb_adapter
*, u8
*, u32
, u8
);
126 static void igb_ping_all_vfs(struct igb_adapter
*);
127 static void igb_msg_task(struct igb_adapter
*);
128 static void igb_vmm_control(struct igb_adapter
*);
129 static int igb_set_vf_mac(struct igb_adapter
*, int, unsigned char *);
130 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
);
133 static int igb_suspend(struct pci_dev
*, pm_message_t
);
134 static int igb_resume(struct pci_dev
*);
136 static void igb_shutdown(struct pci_dev
*);
137 #ifdef CONFIG_IGB_DCA
138 static int igb_notify_dca(struct notifier_block
*, unsigned long, void *);
139 static struct notifier_block dca_notifier
= {
140 .notifier_call
= igb_notify_dca
,
145 #ifdef CONFIG_NET_POLL_CONTROLLER
146 /* for netdump / net console */
147 static void igb_netpoll(struct net_device
*);
149 #ifdef CONFIG_PCI_IOV
150 static unsigned int max_vfs
= 0;
151 module_param(max_vfs
, uint
, 0);
152 MODULE_PARM_DESC(max_vfs
, "Maximum number of virtual functions to allocate "
153 "per physical function");
154 #endif /* CONFIG_PCI_IOV */
156 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*,
157 pci_channel_state_t
);
158 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*);
159 static void igb_io_resume(struct pci_dev
*);
161 static struct pci_error_handlers igb_err_handler
= {
162 .error_detected
= igb_io_error_detected
,
163 .slot_reset
= igb_io_slot_reset
,
164 .resume
= igb_io_resume
,
168 static struct pci_driver igb_driver
= {
169 .name
= igb_driver_name
,
170 .id_table
= igb_pci_tbl
,
172 .remove
= __devexit_p(igb_remove
),
174 /* Power Managment Hooks */
175 .suspend
= igb_suspend
,
176 .resume
= igb_resume
,
178 .shutdown
= igb_shutdown
,
179 .err_handler
= &igb_err_handler
182 static int global_quad_port_a
; /* global quad port a indication */
184 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
185 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
186 MODULE_LICENSE("GPL");
187 MODULE_VERSION(DRV_VERSION
);
190 * igb_read_clock - read raw cycle counter (to be used by time counter)
192 static cycle_t
igb_read_clock(const struct cyclecounter
*tc
)
194 struct igb_adapter
*adapter
=
195 container_of(tc
, struct igb_adapter
, cycles
);
196 struct e1000_hw
*hw
= &adapter
->hw
;
200 stamp
|= (u64
)rd32(E1000_SYSTIML
) << shift
;
201 stamp
|= (u64
)rd32(E1000_SYSTIMH
) << (shift
+ 32);
207 * igb_get_hw_dev_name - return device name string
208 * used by hardware layer to print debugging information
210 char *igb_get_hw_dev_name(struct e1000_hw
*hw
)
212 struct igb_adapter
*adapter
= hw
->back
;
213 return adapter
->netdev
->name
;
217 * igb_get_time_str - format current NIC and system time as string
219 static char *igb_get_time_str(struct igb_adapter
*adapter
,
222 cycle_t hw
= adapter
->cycles
.read(&adapter
->cycles
);
223 struct timespec nic
= ns_to_timespec(timecounter_read(&adapter
->clock
));
225 struct timespec delta
;
226 getnstimeofday(&sys
);
228 delta
= timespec_sub(nic
, sys
);
231 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
233 (long)nic
.tv_sec
, nic
.tv_nsec
,
234 (long)sys
.tv_sec
, sys
.tv_nsec
,
235 (long)delta
.tv_sec
, delta
.tv_nsec
);
242 * igb_init_module - Driver Registration Routine
244 * igb_init_module is the first routine called when the driver is
245 * loaded. All it does is register with the PCI subsystem.
247 static int __init
igb_init_module(void)
250 printk(KERN_INFO
"%s - version %s\n",
251 igb_driver_string
, igb_driver_version
);
253 printk(KERN_INFO
"%s\n", igb_copyright
);
255 global_quad_port_a
= 0;
257 #ifdef CONFIG_IGB_DCA
258 dca_register_notify(&dca_notifier
);
261 ret
= pci_register_driver(&igb_driver
);
265 module_init(igb_init_module
);
268 * igb_exit_module - Driver Exit Cleanup Routine
270 * igb_exit_module is called just before the driver is removed
273 static void __exit
igb_exit_module(void)
275 #ifdef CONFIG_IGB_DCA
276 dca_unregister_notify(&dca_notifier
);
278 pci_unregister_driver(&igb_driver
);
281 module_exit(igb_exit_module
);
283 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
285 * igb_cache_ring_register - Descriptor ring to register mapping
286 * @adapter: board private structure to initialize
288 * Once we know the feature-set enabled for the device, we'll cache
289 * the register offset the descriptor ring is assigned to.
291 static void igb_cache_ring_register(struct igb_adapter
*adapter
)
294 u32 rbase_offset
= adapter
->vfs_allocated_count
;
296 switch (adapter
->hw
.mac
.type
) {
298 /* The queues are allocated for virtualization such that VF 0
299 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
300 * In order to avoid collision we start at the first free queue
301 * and continue consuming queues in the same sequence
303 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
304 adapter
->rx_ring
[i
].reg_idx
= rbase_offset
+
306 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
307 adapter
->tx_ring
[i
].reg_idx
= rbase_offset
+
312 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
313 adapter
->rx_ring
[i
].reg_idx
= i
;
314 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
315 adapter
->tx_ring
[i
].reg_idx
= i
;
320 static void igb_free_queues(struct igb_adapter
*adapter
)
322 kfree(adapter
->tx_ring
);
323 kfree(adapter
->rx_ring
);
325 adapter
->tx_ring
= NULL
;
326 adapter
->rx_ring
= NULL
;
328 adapter
->num_rx_queues
= 0;
329 adapter
->num_tx_queues
= 0;
333 * igb_alloc_queues - Allocate memory for all rings
334 * @adapter: board private structure to initialize
336 * We allocate one ring per queue at run-time since we don't know the
337 * number of queues at compile-time.
339 static int igb_alloc_queues(struct igb_adapter
*adapter
)
343 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
344 sizeof(struct igb_ring
), GFP_KERNEL
);
345 if (!adapter
->tx_ring
)
348 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
349 sizeof(struct igb_ring
), GFP_KERNEL
);
350 if (!adapter
->rx_ring
)
353 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
354 struct igb_ring
*ring
= &(adapter
->tx_ring
[i
]);
355 ring
->count
= adapter
->tx_ring_count
;
356 ring
->queue_index
= i
;
357 ring
->pdev
= adapter
->pdev
;
358 ring
->netdev
= adapter
->netdev
;
359 /* For 82575, context index must be unique per ring. */
360 if (adapter
->hw
.mac
.type
== e1000_82575
)
361 ring
->flags
= IGB_RING_FLAG_TX_CTX_IDX
;
364 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
365 struct igb_ring
*ring
= &(adapter
->rx_ring
[i
]);
366 ring
->count
= adapter
->rx_ring_count
;
367 ring
->queue_index
= i
;
368 ring
->pdev
= adapter
->pdev
;
369 ring
->netdev
= adapter
->netdev
;
370 ring
->rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
371 ring
->flags
= IGB_RING_FLAG_RX_CSUM
; /* enable rx checksum */
372 /* set flag indicating ring supports SCTP checksum offload */
373 if (adapter
->hw
.mac
.type
>= e1000_82576
)
374 ring
->flags
|= IGB_RING_FLAG_RX_SCTP_CSUM
;
377 igb_cache_ring_register(adapter
);
382 igb_free_queues(adapter
);
387 #define IGB_N0_QUEUE -1
388 static void igb_assign_vector(struct igb_q_vector
*q_vector
, int msix_vector
)
391 struct igb_adapter
*adapter
= q_vector
->adapter
;
392 struct e1000_hw
*hw
= &adapter
->hw
;
394 int rx_queue
= IGB_N0_QUEUE
;
395 int tx_queue
= IGB_N0_QUEUE
;
397 if (q_vector
->rx_ring
)
398 rx_queue
= q_vector
->rx_ring
->reg_idx
;
399 if (q_vector
->tx_ring
)
400 tx_queue
= q_vector
->tx_ring
->reg_idx
;
402 switch (hw
->mac
.type
) {
404 /* The 82575 assigns vectors using a bitmask, which matches the
405 bitmask for the EICR/EIMS/EIMC registers. To assign one
406 or more queues to a vector, we write the appropriate bits
407 into the MSIXBM register for that vector. */
408 if (rx_queue
> IGB_N0_QUEUE
)
409 msixbm
= E1000_EICR_RX_QUEUE0
<< rx_queue
;
410 if (tx_queue
> IGB_N0_QUEUE
)
411 msixbm
|= E1000_EICR_TX_QUEUE0
<< tx_queue
;
412 array_wr32(E1000_MSIXBM(0), msix_vector
, msixbm
);
413 q_vector
->eims_value
= msixbm
;
416 /* 82576 uses a table-based method for assigning vectors.
417 Each queue has a single entry in the table to which we write
418 a vector number along with a "valid" bit. Sadly, the layout
419 of the table is somewhat counterintuitive. */
420 if (rx_queue
> IGB_N0_QUEUE
) {
421 index
= (rx_queue
& 0x7);
422 ivar
= array_rd32(E1000_IVAR0
, index
);
424 /* vector goes into low byte of register */
425 ivar
= ivar
& 0xFFFFFF00;
426 ivar
|= msix_vector
| E1000_IVAR_VALID
;
428 /* vector goes into third byte of register */
429 ivar
= ivar
& 0xFF00FFFF;
430 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
432 array_wr32(E1000_IVAR0
, index
, ivar
);
434 if (tx_queue
> IGB_N0_QUEUE
) {
435 index
= (tx_queue
& 0x7);
436 ivar
= array_rd32(E1000_IVAR0
, index
);
438 /* vector goes into second byte of register */
439 ivar
= ivar
& 0xFFFF00FF;
440 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
442 /* vector goes into high byte of register */
443 ivar
= ivar
& 0x00FFFFFF;
444 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
446 array_wr32(E1000_IVAR0
, index
, ivar
);
448 q_vector
->eims_value
= 1 << msix_vector
;
457 * igb_configure_msix - Configure MSI-X hardware
459 * igb_configure_msix sets up the hardware to properly
460 * generate MSI-X interrupts.
462 static void igb_configure_msix(struct igb_adapter
*adapter
)
466 struct e1000_hw
*hw
= &adapter
->hw
;
468 adapter
->eims_enable_mask
= 0;
470 /* set vector for other causes, i.e. link changes */
471 switch (hw
->mac
.type
) {
473 tmp
= rd32(E1000_CTRL_EXT
);
474 /* enable MSI-X PBA support*/
475 tmp
|= E1000_CTRL_EXT_PBA_CLR
;
477 /* Auto-Mask interrupts upon ICR read. */
478 tmp
|= E1000_CTRL_EXT_EIAME
;
479 tmp
|= E1000_CTRL_EXT_IRCA
;
481 wr32(E1000_CTRL_EXT
, tmp
);
483 /* enable msix_other interrupt */
484 array_wr32(E1000_MSIXBM(0), vector
++,
486 adapter
->eims_other
= E1000_EIMS_OTHER
;
491 /* Turn on MSI-X capability first, or our settings
492 * won't stick. And it will take days to debug. */
493 wr32(E1000_GPIE
, E1000_GPIE_MSIX_MODE
|
494 E1000_GPIE_PBA
| E1000_GPIE_EIAME
|
497 /* enable msix_other interrupt */
498 adapter
->eims_other
= 1 << vector
;
499 tmp
= (vector
++ | E1000_IVAR_VALID
) << 8;
501 wr32(E1000_IVAR_MISC
, tmp
);
504 /* do nothing, since nothing else supports MSI-X */
506 } /* switch (hw->mac.type) */
508 adapter
->eims_enable_mask
|= adapter
->eims_other
;
510 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
511 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
512 igb_assign_vector(q_vector
, vector
++);
513 adapter
->eims_enable_mask
|= q_vector
->eims_value
;
520 * igb_request_msix - Initialize MSI-X interrupts
522 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
525 static int igb_request_msix(struct igb_adapter
*adapter
)
527 struct net_device
*netdev
= adapter
->netdev
;
528 struct e1000_hw
*hw
= &adapter
->hw
;
529 int i
, err
= 0, vector
= 0;
531 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
532 &igb_msix_other
, 0, netdev
->name
, adapter
);
537 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
538 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
540 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(vector
);
542 if (q_vector
->rx_ring
&& q_vector
->tx_ring
)
543 sprintf(q_vector
->name
, "%s-TxRx-%u", netdev
->name
,
544 q_vector
->rx_ring
->queue_index
);
545 else if (q_vector
->tx_ring
)
546 sprintf(q_vector
->name
, "%s-tx-%u", netdev
->name
,
547 q_vector
->tx_ring
->queue_index
);
548 else if (q_vector
->rx_ring
)
549 sprintf(q_vector
->name
, "%s-rx-%u", netdev
->name
,
550 q_vector
->rx_ring
->queue_index
);
552 sprintf(q_vector
->name
, "%s-unused", netdev
->name
);
554 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
555 &igb_msix_ring
, 0, q_vector
->name
,
562 igb_configure_msix(adapter
);
568 static void igb_reset_interrupt_capability(struct igb_adapter
*adapter
)
570 if (adapter
->msix_entries
) {
571 pci_disable_msix(adapter
->pdev
);
572 kfree(adapter
->msix_entries
);
573 adapter
->msix_entries
= NULL
;
574 } else if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
575 pci_disable_msi(adapter
->pdev
);
580 * igb_free_q_vectors - Free memory allocated for interrupt vectors
581 * @adapter: board private structure to initialize
583 * This function frees the memory allocated to the q_vectors. In addition if
584 * NAPI is enabled it will delete any references to the NAPI struct prior
585 * to freeing the q_vector.
587 static void igb_free_q_vectors(struct igb_adapter
*adapter
)
591 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
592 struct igb_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
593 adapter
->q_vector
[v_idx
] = NULL
;
594 netif_napi_del(&q_vector
->napi
);
597 adapter
->num_q_vectors
= 0;
601 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
603 * This function resets the device so that it has 0 rx queues, tx queues, and
604 * MSI-X interrupts allocated.
606 static void igb_clear_interrupt_scheme(struct igb_adapter
*adapter
)
608 igb_free_queues(adapter
);
609 igb_free_q_vectors(adapter
);
610 igb_reset_interrupt_capability(adapter
);
614 * igb_set_interrupt_capability - set MSI or MSI-X if supported
616 * Attempt to configure interrupts using the best available
617 * capabilities of the hardware and kernel.
619 static void igb_set_interrupt_capability(struct igb_adapter
*adapter
)
624 /* Number of supported queues. */
625 adapter
->num_rx_queues
= min_t(u32
, IGB_MAX_RX_QUEUES
, num_online_cpus());
626 adapter
->num_tx_queues
= min_t(u32
, IGB_MAX_TX_QUEUES
, num_online_cpus());
628 /* start with one vector for every rx queue */
629 numvecs
= adapter
->num_rx_queues
;
631 /* if tx handler is seperate add 1 for every tx queue */
632 numvecs
+= adapter
->num_tx_queues
;
634 /* store the number of vectors reserved for queues */
635 adapter
->num_q_vectors
= numvecs
;
637 /* add 1 vector for link status interrupts */
639 adapter
->msix_entries
= kcalloc(numvecs
, sizeof(struct msix_entry
),
641 if (!adapter
->msix_entries
)
644 for (i
= 0; i
< numvecs
; i
++)
645 adapter
->msix_entries
[i
].entry
= i
;
647 err
= pci_enable_msix(adapter
->pdev
,
648 adapter
->msix_entries
,
653 igb_reset_interrupt_capability(adapter
);
655 /* If we can't do MSI-X, try MSI */
657 #ifdef CONFIG_PCI_IOV
658 /* disable SR-IOV for non MSI-X configurations */
659 if (adapter
->vf_data
) {
660 struct e1000_hw
*hw
= &adapter
->hw
;
661 /* disable iov and allow time for transactions to clear */
662 pci_disable_sriov(adapter
->pdev
);
665 kfree(adapter
->vf_data
);
666 adapter
->vf_data
= NULL
;
667 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
669 dev_info(&adapter
->pdev
->dev
, "IOV Disabled\n");
672 adapter
->vfs_allocated_count
= 0;
673 adapter
->flags
|= IGB_FLAG_QUEUE_PAIRS
;
674 adapter
->num_rx_queues
= 1;
675 adapter
->num_tx_queues
= 1;
676 adapter
->num_q_vectors
= 1;
677 if (!pci_enable_msi(adapter
->pdev
))
678 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
680 /* Notify the stack of the (possibly) reduced Tx Queue count. */
681 adapter
->netdev
->real_num_tx_queues
= adapter
->num_tx_queues
;
686 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
687 * @adapter: board private structure to initialize
689 * We allocate one q_vector per queue interrupt. If allocation fails we
692 static int igb_alloc_q_vectors(struct igb_adapter
*adapter
)
694 struct igb_q_vector
*q_vector
;
695 struct e1000_hw
*hw
= &adapter
->hw
;
698 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
699 q_vector
= kzalloc(sizeof(struct igb_q_vector
), GFP_KERNEL
);
702 q_vector
->adapter
= adapter
;
703 q_vector
->itr_shift
= (hw
->mac
.type
== e1000_82575
) ? 16 : 0;
704 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(0);
705 q_vector
->itr_val
= IGB_START_ITR
;
706 q_vector
->set_itr
= 1;
707 netif_napi_add(adapter
->netdev
, &q_vector
->napi
, igb_poll
, 64);
708 adapter
->q_vector
[v_idx
] = q_vector
;
715 q_vector
= adapter
->q_vector
[v_idx
];
716 netif_napi_del(&q_vector
->napi
);
718 adapter
->q_vector
[v_idx
] = NULL
;
723 static void igb_map_rx_ring_to_vector(struct igb_adapter
*adapter
,
724 int ring_idx
, int v_idx
)
726 struct igb_q_vector
*q_vector
;
728 q_vector
= adapter
->q_vector
[v_idx
];
729 q_vector
->rx_ring
= &adapter
->rx_ring
[ring_idx
];
730 q_vector
->rx_ring
->q_vector
= q_vector
;
731 q_vector
->itr_val
= adapter
->rx_itr_setting
;
732 if (q_vector
->itr_val
&& q_vector
->itr_val
<= 3)
733 q_vector
->itr_val
= IGB_START_ITR
;
736 static void igb_map_tx_ring_to_vector(struct igb_adapter
*adapter
,
737 int ring_idx
, int v_idx
)
739 struct igb_q_vector
*q_vector
;
741 q_vector
= adapter
->q_vector
[v_idx
];
742 q_vector
->tx_ring
= &adapter
->tx_ring
[ring_idx
];
743 q_vector
->tx_ring
->q_vector
= q_vector
;
744 q_vector
->itr_val
= adapter
->tx_itr_setting
;
745 if (q_vector
->itr_val
&& q_vector
->itr_val
<= 3)
746 q_vector
->itr_val
= IGB_START_ITR
;
750 * igb_map_ring_to_vector - maps allocated queues to vectors
752 * This function maps the recently allocated queues to vectors.
754 static int igb_map_ring_to_vector(struct igb_adapter
*adapter
)
759 if ((adapter
->num_q_vectors
< adapter
->num_rx_queues
) ||
760 (adapter
->num_q_vectors
< adapter
->num_tx_queues
))
763 if (adapter
->num_q_vectors
>=
764 (adapter
->num_rx_queues
+ adapter
->num_tx_queues
)) {
765 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
766 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
767 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
768 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
770 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
771 if (i
< adapter
->num_tx_queues
)
772 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
);
773 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
775 for (; i
< adapter
->num_tx_queues
; i
++)
776 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
782 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
784 * This function initializes the interrupts and allocates all of the queues.
786 static int igb_init_interrupt_scheme(struct igb_adapter
*adapter
)
788 struct pci_dev
*pdev
= adapter
->pdev
;
791 igb_set_interrupt_capability(adapter
);
793 err
= igb_alloc_q_vectors(adapter
);
795 dev_err(&pdev
->dev
, "Unable to allocate memory for vectors\n");
796 goto err_alloc_q_vectors
;
799 err
= igb_alloc_queues(adapter
);
801 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
802 goto err_alloc_queues
;
805 err
= igb_map_ring_to_vector(adapter
);
807 dev_err(&pdev
->dev
, "Invalid q_vector to ring mapping\n");
814 igb_free_queues(adapter
);
816 igb_free_q_vectors(adapter
);
818 igb_reset_interrupt_capability(adapter
);
823 * igb_request_irq - initialize interrupts
825 * Attempts to configure interrupts using the best available
826 * capabilities of the hardware and kernel.
828 static int igb_request_irq(struct igb_adapter
*adapter
)
830 struct net_device
*netdev
= adapter
->netdev
;
831 struct pci_dev
*pdev
= adapter
->pdev
;
832 struct e1000_hw
*hw
= &adapter
->hw
;
835 if (adapter
->msix_entries
) {
836 err
= igb_request_msix(adapter
);
839 /* fall back to MSI */
840 igb_clear_interrupt_scheme(adapter
);
841 if (!pci_enable_msi(adapter
->pdev
))
842 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
843 igb_free_all_tx_resources(adapter
);
844 igb_free_all_rx_resources(adapter
);
845 adapter
->num_tx_queues
= 1;
846 adapter
->num_rx_queues
= 1;
847 adapter
->num_q_vectors
= 1;
848 err
= igb_alloc_q_vectors(adapter
);
851 "Unable to allocate memory for vectors\n");
854 err
= igb_alloc_queues(adapter
);
857 "Unable to allocate memory for queues\n");
858 igb_free_q_vectors(adapter
);
861 igb_setup_all_tx_resources(adapter
);
862 igb_setup_all_rx_resources(adapter
);
864 switch (hw
->mac
.type
) {
866 wr32(E1000_MSIXBM(0),
867 (E1000_EICR_RX_QUEUE0
|
868 E1000_EICR_TX_QUEUE0
|
872 wr32(E1000_IVAR0
, E1000_IVAR_VALID
);
879 if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
880 err
= request_irq(adapter
->pdev
->irq
, &igb_intr_msi
, 0,
881 netdev
->name
, adapter
);
885 /* fall back to legacy interrupts */
886 igb_reset_interrupt_capability(adapter
);
887 adapter
->flags
&= ~IGB_FLAG_HAS_MSI
;
890 err
= request_irq(adapter
->pdev
->irq
, &igb_intr
, IRQF_SHARED
,
891 netdev
->name
, adapter
);
894 dev_err(&adapter
->pdev
->dev
, "Error %d getting interrupt\n",
901 static void igb_free_irq(struct igb_adapter
*adapter
)
903 if (adapter
->msix_entries
) {
906 free_irq(adapter
->msix_entries
[vector
++].vector
, adapter
);
908 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
909 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
910 free_irq(adapter
->msix_entries
[vector
++].vector
,
914 free_irq(adapter
->pdev
->irq
, adapter
);
919 * igb_irq_disable - Mask off interrupt generation on the NIC
920 * @adapter: board private structure
922 static void igb_irq_disable(struct igb_adapter
*adapter
)
924 struct e1000_hw
*hw
= &adapter
->hw
;
926 if (adapter
->msix_entries
) {
927 u32 regval
= rd32(E1000_EIAM
);
928 wr32(E1000_EIAM
, regval
& ~adapter
->eims_enable_mask
);
929 wr32(E1000_EIMC
, adapter
->eims_enable_mask
);
930 regval
= rd32(E1000_EIAC
);
931 wr32(E1000_EIAC
, regval
& ~adapter
->eims_enable_mask
);
937 synchronize_irq(adapter
->pdev
->irq
);
941 * igb_irq_enable - Enable default interrupt generation settings
942 * @adapter: board private structure
944 static void igb_irq_enable(struct igb_adapter
*adapter
)
946 struct e1000_hw
*hw
= &adapter
->hw
;
948 if (adapter
->msix_entries
) {
949 u32 regval
= rd32(E1000_EIAC
);
950 wr32(E1000_EIAC
, regval
| adapter
->eims_enable_mask
);
951 regval
= rd32(E1000_EIAM
);
952 wr32(E1000_EIAM
, regval
| adapter
->eims_enable_mask
);
953 wr32(E1000_EIMS
, adapter
->eims_enable_mask
);
954 if (adapter
->vfs_allocated_count
)
955 wr32(E1000_MBVFIMR
, 0xFF);
956 wr32(E1000_IMS
, (E1000_IMS_LSC
| E1000_IMS_VMMB
|
957 E1000_IMS_DOUTSYNC
));
959 wr32(E1000_IMS
, IMS_ENABLE_MASK
);
960 wr32(E1000_IAM
, IMS_ENABLE_MASK
);
964 static void igb_update_mng_vlan(struct igb_adapter
*adapter
)
966 struct e1000_hw
*hw
= &adapter
->hw
;
967 u16 vid
= adapter
->hw
.mng_cookie
.vlan_id
;
968 u16 old_vid
= adapter
->mng_vlan_id
;
970 if (hw
->mng_cookie
.status
& E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) {
971 /* add VID to filter table */
972 igb_vfta_set(hw
, vid
, true);
973 adapter
->mng_vlan_id
= vid
;
975 adapter
->mng_vlan_id
= IGB_MNG_VLAN_NONE
;
978 if ((old_vid
!= (u16
)IGB_MNG_VLAN_NONE
) &&
980 !vlan_group_get_device(adapter
->vlgrp
, old_vid
)) {
981 /* remove VID from filter table */
982 igb_vfta_set(hw
, old_vid
, false);
987 * igb_release_hw_control - release control of the h/w to f/w
988 * @adapter: address of board private structure
990 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
991 * For ASF and Pass Through versions of f/w this means that the
992 * driver is no longer loaded.
995 static void igb_release_hw_control(struct igb_adapter
*adapter
)
997 struct e1000_hw
*hw
= &adapter
->hw
;
1000 /* Let firmware take over control of h/w */
1001 ctrl_ext
= rd32(E1000_CTRL_EXT
);
1002 wr32(E1000_CTRL_EXT
,
1003 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
1008 * igb_get_hw_control - get control of the h/w from f/w
1009 * @adapter: address of board private structure
1011 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1012 * For ASF and Pass Through versions of f/w this means that
1013 * the driver is loaded.
1016 static void igb_get_hw_control(struct igb_adapter
*adapter
)
1018 struct e1000_hw
*hw
= &adapter
->hw
;
1021 /* Let firmware know the driver has taken over */
1022 ctrl_ext
= rd32(E1000_CTRL_EXT
);
1023 wr32(E1000_CTRL_EXT
,
1024 ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
1028 * igb_configure - configure the hardware for RX and TX
1029 * @adapter: private board structure
1031 static void igb_configure(struct igb_adapter
*adapter
)
1033 struct net_device
*netdev
= adapter
->netdev
;
1036 igb_get_hw_control(adapter
);
1037 igb_set_rx_mode(netdev
);
1039 igb_restore_vlan(adapter
);
1041 igb_setup_tctl(adapter
);
1042 igb_setup_mrqc(adapter
);
1043 igb_setup_rctl(adapter
);
1045 igb_configure_tx(adapter
);
1046 igb_configure_rx(adapter
);
1048 igb_rx_fifo_flush_82575(&adapter
->hw
);
1050 /* call igb_desc_unused which always leaves
1051 * at least 1 descriptor unused to make sure
1052 * next_to_use != next_to_clean */
1053 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1054 struct igb_ring
*ring
= &adapter
->rx_ring
[i
];
1055 igb_alloc_rx_buffers_adv(ring
, igb_desc_unused(ring
));
1059 adapter
->tx_queue_len
= netdev
->tx_queue_len
;
1064 * igb_up - Open the interface and prepare it to handle traffic
1065 * @adapter: board private structure
1068 int igb_up(struct igb_adapter
*adapter
)
1070 struct e1000_hw
*hw
= &adapter
->hw
;
1073 /* hardware has been reset, we need to reload some things */
1074 igb_configure(adapter
);
1076 clear_bit(__IGB_DOWN
, &adapter
->state
);
1078 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1079 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1080 napi_enable(&q_vector
->napi
);
1082 if (adapter
->msix_entries
)
1083 igb_configure_msix(adapter
);
1085 /* Clear any pending interrupts. */
1087 igb_irq_enable(adapter
);
1089 /* notify VFs that reset has been completed */
1090 if (adapter
->vfs_allocated_count
) {
1091 u32 reg_data
= rd32(E1000_CTRL_EXT
);
1092 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
1093 wr32(E1000_CTRL_EXT
, reg_data
);
1096 netif_tx_start_all_queues(adapter
->netdev
);
1098 /* Fire a link change interrupt to start the watchdog. */
1099 wr32(E1000_ICS
, E1000_ICS_LSC
);
1103 void igb_down(struct igb_adapter
*adapter
)
1105 struct e1000_hw
*hw
= &adapter
->hw
;
1106 struct net_device
*netdev
= adapter
->netdev
;
1110 /* signal that we're down so the interrupt handler does not
1111 * reschedule our watchdog timer */
1112 set_bit(__IGB_DOWN
, &adapter
->state
);
1114 /* disable receives in the hardware */
1115 rctl
= rd32(E1000_RCTL
);
1116 wr32(E1000_RCTL
, rctl
& ~E1000_RCTL_EN
);
1117 /* flush and sleep below */
1119 netif_tx_stop_all_queues(netdev
);
1121 /* disable transmits in the hardware */
1122 tctl
= rd32(E1000_TCTL
);
1123 tctl
&= ~E1000_TCTL_EN
;
1124 wr32(E1000_TCTL
, tctl
);
1125 /* flush both disables and wait for them to finish */
1129 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1130 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1131 napi_disable(&q_vector
->napi
);
1134 igb_irq_disable(adapter
);
1136 del_timer_sync(&adapter
->watchdog_timer
);
1137 del_timer_sync(&adapter
->phy_info_timer
);
1139 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
1140 netif_carrier_off(netdev
);
1142 /* record the stats before reset*/
1143 igb_update_stats(adapter
);
1145 adapter
->link_speed
= 0;
1146 adapter
->link_duplex
= 0;
1148 if (!pci_channel_offline(adapter
->pdev
))
1150 igb_clean_all_tx_rings(adapter
);
1151 igb_clean_all_rx_rings(adapter
);
1152 #ifdef CONFIG_IGB_DCA
1154 /* since we reset the hardware DCA settings were cleared */
1155 igb_setup_dca(adapter
);
1159 void igb_reinit_locked(struct igb_adapter
*adapter
)
1161 WARN_ON(in_interrupt());
1162 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
1166 clear_bit(__IGB_RESETTING
, &adapter
->state
);
1169 void igb_reset(struct igb_adapter
*adapter
)
1171 struct e1000_hw
*hw
= &adapter
->hw
;
1172 struct e1000_mac_info
*mac
= &hw
->mac
;
1173 struct e1000_fc_info
*fc
= &hw
->fc
;
1174 u32 pba
= 0, tx_space
, min_tx_space
, min_rx_space
;
1177 /* Repartition Pba for greater than 9k mtu
1178 * To take effect CTRL.RST is required.
1180 switch (mac
->type
) {
1182 pba
= rd32(E1000_RXPBS
);
1183 pba
&= E1000_RXPBS_SIZE_MASK_82576
;
1187 pba
= E1000_PBA_34K
;
1191 if ((adapter
->max_frame_size
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) &&
1192 (mac
->type
< e1000_82576
)) {
1193 /* adjust PBA for jumbo frames */
1194 wr32(E1000_PBA
, pba
);
1196 /* To maintain wire speed transmits, the Tx FIFO should be
1197 * large enough to accommodate two full transmit packets,
1198 * rounded up to the next 1KB and expressed in KB. Likewise,
1199 * the Rx FIFO should be large enough to accommodate at least
1200 * one full receive packet and is similarly rounded up and
1201 * expressed in KB. */
1202 pba
= rd32(E1000_PBA
);
1203 /* upper 16 bits has Tx packet buffer allocation size in KB */
1204 tx_space
= pba
>> 16;
1205 /* lower 16 bits has Rx packet buffer allocation size in KB */
1207 /* the tx fifo also stores 16 bytes of information about the tx
1208 * but don't include ethernet FCS because hardware appends it */
1209 min_tx_space
= (adapter
->max_frame_size
+
1210 sizeof(union e1000_adv_tx_desc
) -
1212 min_tx_space
= ALIGN(min_tx_space
, 1024);
1213 min_tx_space
>>= 10;
1214 /* software strips receive CRC, so leave room for it */
1215 min_rx_space
= adapter
->max_frame_size
;
1216 min_rx_space
= ALIGN(min_rx_space
, 1024);
1217 min_rx_space
>>= 10;
1219 /* If current Tx allocation is less than the min Tx FIFO size,
1220 * and the min Tx FIFO size is less than the current Rx FIFO
1221 * allocation, take space away from current Rx allocation */
1222 if (tx_space
< min_tx_space
&&
1223 ((min_tx_space
- tx_space
) < pba
)) {
1224 pba
= pba
- (min_tx_space
- tx_space
);
1226 /* if short on rx space, rx wins and must trump tx
1228 if (pba
< min_rx_space
)
1231 wr32(E1000_PBA
, pba
);
1234 /* flow control settings */
1235 /* The high water mark must be low enough to fit one full frame
1236 * (or the size used for early receive) above it in the Rx FIFO.
1237 * Set it to the lower of:
1238 * - 90% of the Rx FIFO size, or
1239 * - the full Rx FIFO size minus one full frame */
1240 hwm
= min(((pba
<< 10) * 9 / 10),
1241 ((pba
<< 10) - 2 * adapter
->max_frame_size
));
1243 if (mac
->type
< e1000_82576
) {
1244 fc
->high_water
= hwm
& 0xFFF8; /* 8-byte granularity */
1245 fc
->low_water
= fc
->high_water
- 8;
1247 fc
->high_water
= hwm
& 0xFFF0; /* 16-byte granularity */
1248 fc
->low_water
= fc
->high_water
- 16;
1250 fc
->pause_time
= 0xFFFF;
1252 fc
->current_mode
= fc
->requested_mode
;
1254 /* disable receive for all VFs and wait one second */
1255 if (adapter
->vfs_allocated_count
) {
1257 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++)
1258 adapter
->vf_data
[i
].flags
= 0;
1260 /* ping all the active vfs to let them know we are going down */
1261 igb_ping_all_vfs(adapter
);
1263 /* disable transmits and receives */
1264 wr32(E1000_VFRE
, 0);
1265 wr32(E1000_VFTE
, 0);
1268 /* Allow time for pending master requests to run */
1269 adapter
->hw
.mac
.ops
.reset_hw(&adapter
->hw
);
1272 if (adapter
->hw
.mac
.ops
.init_hw(&adapter
->hw
))
1273 dev_err(&adapter
->pdev
->dev
, "Hardware Error\n");
1275 igb_update_mng_vlan(adapter
);
1277 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1278 wr32(E1000_VET
, ETHERNET_IEEE_VLAN_TYPE
);
1280 igb_reset_adaptive(&adapter
->hw
);
1281 igb_get_phy_info(&adapter
->hw
);
1284 static const struct net_device_ops igb_netdev_ops
= {
1285 .ndo_open
= igb_open
,
1286 .ndo_stop
= igb_close
,
1287 .ndo_start_xmit
= igb_xmit_frame_adv
,
1288 .ndo_get_stats
= igb_get_stats
,
1289 .ndo_set_rx_mode
= igb_set_rx_mode
,
1290 .ndo_set_multicast_list
= igb_set_rx_mode
,
1291 .ndo_set_mac_address
= igb_set_mac
,
1292 .ndo_change_mtu
= igb_change_mtu
,
1293 .ndo_do_ioctl
= igb_ioctl
,
1294 .ndo_tx_timeout
= igb_tx_timeout
,
1295 .ndo_validate_addr
= eth_validate_addr
,
1296 .ndo_vlan_rx_register
= igb_vlan_rx_register
,
1297 .ndo_vlan_rx_add_vid
= igb_vlan_rx_add_vid
,
1298 .ndo_vlan_rx_kill_vid
= igb_vlan_rx_kill_vid
,
1299 #ifdef CONFIG_NET_POLL_CONTROLLER
1300 .ndo_poll_controller
= igb_netpoll
,
1305 * igb_probe - Device Initialization Routine
1306 * @pdev: PCI device information struct
1307 * @ent: entry in igb_pci_tbl
1309 * Returns 0 on success, negative on failure
1311 * igb_probe initializes an adapter identified by a pci_dev structure.
1312 * The OS initialization, configuring of the adapter private structure,
1313 * and a hardware reset occur.
1315 static int __devinit
igb_probe(struct pci_dev
*pdev
,
1316 const struct pci_device_id
*ent
)
1318 struct net_device
*netdev
;
1319 struct igb_adapter
*adapter
;
1320 struct e1000_hw
*hw
;
1321 const struct e1000_info
*ei
= igb_info_tbl
[ent
->driver_data
];
1322 unsigned long mmio_start
, mmio_len
;
1323 int err
, pci_using_dac
;
1324 u16 eeprom_data
= 0;
1325 u16 eeprom_apme_mask
= IGB_EEPROM_APME
;
1328 err
= pci_enable_device_mem(pdev
);
1333 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1335 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1339 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1341 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1343 dev_err(&pdev
->dev
, "No usable DMA "
1344 "configuration, aborting\n");
1350 err
= pci_request_selected_regions(pdev
, pci_select_bars(pdev
,
1356 pci_enable_pcie_error_reporting(pdev
);
1358 pci_set_master(pdev
);
1359 pci_save_state(pdev
);
1362 netdev
= alloc_etherdev_mq(sizeof(struct igb_adapter
),
1363 IGB_ABS_MAX_TX_QUEUES
);
1365 goto err_alloc_etherdev
;
1367 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1369 pci_set_drvdata(pdev
, netdev
);
1370 adapter
= netdev_priv(netdev
);
1371 adapter
->netdev
= netdev
;
1372 adapter
->pdev
= pdev
;
1375 adapter
->msg_enable
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
;
1377 mmio_start
= pci_resource_start(pdev
, 0);
1378 mmio_len
= pci_resource_len(pdev
, 0);
1381 hw
->hw_addr
= ioremap(mmio_start
, mmio_len
);
1385 netdev
->netdev_ops
= &igb_netdev_ops
;
1386 igb_set_ethtool_ops(netdev
);
1387 netdev
->watchdog_timeo
= 5 * HZ
;
1389 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
1391 netdev
->mem_start
= mmio_start
;
1392 netdev
->mem_end
= mmio_start
+ mmio_len
;
1394 /* PCI config space info */
1395 hw
->vendor_id
= pdev
->vendor
;
1396 hw
->device_id
= pdev
->device
;
1397 hw
->revision_id
= pdev
->revision
;
1398 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
1399 hw
->subsystem_device_id
= pdev
->subsystem_device
;
1401 /* setup the private structure */
1403 /* Copy the default MAC, PHY and NVM function pointers */
1404 memcpy(&hw
->mac
.ops
, ei
->mac_ops
, sizeof(hw
->mac
.ops
));
1405 memcpy(&hw
->phy
.ops
, ei
->phy_ops
, sizeof(hw
->phy
.ops
));
1406 memcpy(&hw
->nvm
.ops
, ei
->nvm_ops
, sizeof(hw
->nvm
.ops
));
1407 /* Initialize skew-specific constants */
1408 err
= ei
->get_invariants(hw
);
1412 /* setup the private structure */
1413 err
= igb_sw_init(adapter
);
1417 igb_get_bus_info_pcie(hw
);
1419 hw
->phy
.autoneg_wait_to_complete
= false;
1420 hw
->mac
.adaptive_ifs
= true;
1422 /* Copper options */
1423 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
1424 hw
->phy
.mdix
= AUTO_ALL_MODES
;
1425 hw
->phy
.disable_polarity_correction
= false;
1426 hw
->phy
.ms_type
= e1000_ms_hw_default
;
1429 if (igb_check_reset_block(hw
))
1430 dev_info(&pdev
->dev
,
1431 "PHY reset is blocked due to SOL/IDER session.\n");
1433 netdev
->features
= NETIF_F_SG
|
1435 NETIF_F_HW_VLAN_TX
|
1436 NETIF_F_HW_VLAN_RX
|
1437 NETIF_F_HW_VLAN_FILTER
;
1439 netdev
->features
|= NETIF_F_IPV6_CSUM
;
1440 netdev
->features
|= NETIF_F_TSO
;
1441 netdev
->features
|= NETIF_F_TSO6
;
1443 netdev
->features
|= NETIF_F_GRO
;
1445 netdev
->vlan_features
|= NETIF_F_TSO
;
1446 netdev
->vlan_features
|= NETIF_F_TSO6
;
1447 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
1448 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
1449 netdev
->vlan_features
|= NETIF_F_SG
;
1452 netdev
->features
|= NETIF_F_HIGHDMA
;
1454 if (adapter
->hw
.mac
.type
== e1000_82576
)
1455 netdev
->features
|= NETIF_F_SCTP_CSUM
;
1457 adapter
->en_mng_pt
= igb_enable_mng_pass_thru(&adapter
->hw
);
1459 /* before reading the NVM, reset the controller to put the device in a
1460 * known good starting state */
1461 hw
->mac
.ops
.reset_hw(hw
);
1463 /* make sure the NVM is good */
1464 if (igb_validate_nvm_checksum(hw
) < 0) {
1465 dev_err(&pdev
->dev
, "The NVM Checksum Is Not Valid\n");
1470 /* copy the MAC address out of the NVM */
1471 if (hw
->mac
.ops
.read_mac_addr(hw
))
1472 dev_err(&pdev
->dev
, "NVM Read Error\n");
1474 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1475 memcpy(netdev
->perm_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1477 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
1478 dev_err(&pdev
->dev
, "Invalid MAC Address\n");
1483 setup_timer(&adapter
->watchdog_timer
, &igb_watchdog
,
1484 (unsigned long) adapter
);
1485 setup_timer(&adapter
->phy_info_timer
, &igb_update_phy_info
,
1486 (unsigned long) adapter
);
1488 INIT_WORK(&adapter
->reset_task
, igb_reset_task
);
1489 INIT_WORK(&adapter
->watchdog_task
, igb_watchdog_task
);
1491 /* Initialize link properties that are user-changeable */
1492 adapter
->fc_autoneg
= true;
1493 hw
->mac
.autoneg
= true;
1494 hw
->phy
.autoneg_advertised
= 0x2f;
1496 hw
->fc
.requested_mode
= e1000_fc_default
;
1497 hw
->fc
.current_mode
= e1000_fc_default
;
1499 igb_validate_mdi_setting(hw
);
1501 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1502 * enable the ACPI Magic Packet filter
1505 if (hw
->bus
.func
== 0)
1506 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_A
, 1, &eeprom_data
);
1507 else if (hw
->bus
.func
== 1)
1508 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_B
, 1, &eeprom_data
);
1510 if (eeprom_data
& eeprom_apme_mask
)
1511 adapter
->eeprom_wol
|= E1000_WUFC_MAG
;
1513 /* now that we have the eeprom settings, apply the special cases where
1514 * the eeprom may be wrong or the board simply won't support wake on
1515 * lan on a particular port */
1516 switch (pdev
->device
) {
1517 case E1000_DEV_ID_82575GB_QUAD_COPPER
:
1518 adapter
->eeprom_wol
= 0;
1520 case E1000_DEV_ID_82575EB_FIBER_SERDES
:
1521 case E1000_DEV_ID_82576_FIBER
:
1522 case E1000_DEV_ID_82576_SERDES
:
1523 /* Wake events only supported on port A for dual fiber
1524 * regardless of eeprom setting */
1525 if (rd32(E1000_STATUS
) & E1000_STATUS_FUNC_1
)
1526 adapter
->eeprom_wol
= 0;
1528 case E1000_DEV_ID_82576_QUAD_COPPER
:
1529 /* if quad port adapter, disable WoL on all but port A */
1530 if (global_quad_port_a
!= 0)
1531 adapter
->eeprom_wol
= 0;
1533 adapter
->flags
|= IGB_FLAG_QUAD_PORT_A
;
1534 /* Reset for multiple quad port adapters */
1535 if (++global_quad_port_a
== 4)
1536 global_quad_port_a
= 0;
1540 /* initialize the wol settings based on the eeprom settings */
1541 adapter
->wol
= adapter
->eeprom_wol
;
1542 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
1544 /* reset the hardware with the new settings */
1547 /* let the f/w know that the h/w is now under the control of the
1549 igb_get_hw_control(adapter
);
1551 strcpy(netdev
->name
, "eth%d");
1552 err
= register_netdev(netdev
);
1556 /* carrier off reporting is important to ethtool even BEFORE open */
1557 netif_carrier_off(netdev
);
1559 #ifdef CONFIG_IGB_DCA
1560 if (dca_add_requester(&pdev
->dev
) == 0) {
1561 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
1562 dev_info(&pdev
->dev
, "DCA enabled\n");
1563 igb_setup_dca(adapter
);
1568 switch (hw
->mac
.type
) {
1571 * Initialize hardware timer: we keep it running just in case
1572 * that some program needs it later on.
1574 memset(&adapter
->cycles
, 0, sizeof(adapter
->cycles
));
1575 adapter
->cycles
.read
= igb_read_clock
;
1576 adapter
->cycles
.mask
= CLOCKSOURCE_MASK(64);
1577 adapter
->cycles
.mult
= 1;
1579 * Scale the NIC clock cycle by a large factor so that
1580 * relatively small clock corrections can be added or
1581 * substracted at each clock tick. The drawbacks of a large
1582 * factor are a) that the clock register overflows more quickly
1583 * (not such a big deal) and b) that the increment per tick has
1584 * to fit into 24 bits. As a result we need to use a shift of
1585 * 19 so we can fit a value of 16 into the TIMINCA register.
1587 adapter
->cycles
.shift
= IGB_82576_TSYNC_SHIFT
;
1589 (1 << E1000_TIMINCA_16NS_SHIFT
) |
1590 (16 << IGB_82576_TSYNC_SHIFT
));
1592 /* Set registers so that rollover occurs soon to test this. */
1593 wr32(E1000_SYSTIML
, 0x00000000);
1594 wr32(E1000_SYSTIMH
, 0xFF800000);
1597 timecounter_init(&adapter
->clock
,
1599 ktime_to_ns(ktime_get_real()));
1601 * Synchronize our NIC clock against system wall clock. NIC
1602 * time stamp reading requires ~3us per sample, each sample
1603 * was pretty stable even under load => only require 10
1604 * samples for each offset comparison.
1606 memset(&adapter
->compare
, 0, sizeof(adapter
->compare
));
1607 adapter
->compare
.source
= &adapter
->clock
;
1608 adapter
->compare
.target
= ktime_get_real
;
1609 adapter
->compare
.num_samples
= 10;
1610 timecompare_update(&adapter
->compare
, 0);
1613 /* 82575 does not support timesync */
1618 dev_info(&pdev
->dev
, "Intel(R) Gigabit Ethernet Network Connection\n");
1619 /* print bus type/speed/width info */
1620 dev_info(&pdev
->dev
, "%s: (PCIe:%s:%s) %pM\n",
1622 ((hw
->bus
.speed
== e1000_bus_speed_2500
)
1623 ? "2.5Gb/s" : "unknown"),
1624 ((hw
->bus
.width
== e1000_bus_width_pcie_x4
) ? "Width x4" :
1625 (hw
->bus
.width
== e1000_bus_width_pcie_x2
) ? "Width x2" :
1626 (hw
->bus
.width
== e1000_bus_width_pcie_x1
) ? "Width x1" :
1630 igb_read_part_num(hw
, &part_num
);
1631 dev_info(&pdev
->dev
, "%s: PBA No: %06x-%03x\n", netdev
->name
,
1632 (part_num
>> 8), (part_num
& 0xff));
1634 dev_info(&pdev
->dev
,
1635 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1636 adapter
->msix_entries
? "MSI-X" :
1637 (adapter
->flags
& IGB_FLAG_HAS_MSI
) ? "MSI" : "legacy",
1638 adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1643 igb_release_hw_control(adapter
);
1645 if (!igb_check_reset_block(hw
))
1648 if (hw
->flash_address
)
1649 iounmap(hw
->flash_address
);
1651 igb_clear_interrupt_scheme(adapter
);
1652 iounmap(hw
->hw_addr
);
1654 free_netdev(netdev
);
1656 pci_release_selected_regions(pdev
, pci_select_bars(pdev
,
1660 pci_disable_device(pdev
);
1665 * igb_remove - Device Removal Routine
1666 * @pdev: PCI device information struct
1668 * igb_remove is called by the PCI subsystem to alert the driver
1669 * that it should release a PCI device. The could be caused by a
1670 * Hot-Plug event, or because the driver is going to be removed from
1673 static void __devexit
igb_remove(struct pci_dev
*pdev
)
1675 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1676 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1677 struct e1000_hw
*hw
= &adapter
->hw
;
1679 /* flush_scheduled work may reschedule our watchdog task, so
1680 * explicitly disable watchdog tasks from being rescheduled */
1681 set_bit(__IGB_DOWN
, &adapter
->state
);
1682 del_timer_sync(&adapter
->watchdog_timer
);
1683 del_timer_sync(&adapter
->phy_info_timer
);
1685 flush_scheduled_work();
1687 #ifdef CONFIG_IGB_DCA
1688 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
1689 dev_info(&pdev
->dev
, "DCA disabled\n");
1690 dca_remove_requester(&pdev
->dev
);
1691 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
1692 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
1696 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1697 * would have already happened in close and is redundant. */
1698 igb_release_hw_control(adapter
);
1700 unregister_netdev(netdev
);
1702 if (!igb_check_reset_block(&adapter
->hw
))
1703 igb_reset_phy(&adapter
->hw
);
1705 igb_clear_interrupt_scheme(adapter
);
1707 #ifdef CONFIG_PCI_IOV
1708 /* reclaim resources allocated to VFs */
1709 if (adapter
->vf_data
) {
1710 /* disable iov and allow time for transactions to clear */
1711 pci_disable_sriov(pdev
);
1714 kfree(adapter
->vf_data
);
1715 adapter
->vf_data
= NULL
;
1716 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
1718 dev_info(&pdev
->dev
, "IOV Disabled\n");
1721 iounmap(hw
->hw_addr
);
1722 if (hw
->flash_address
)
1723 iounmap(hw
->flash_address
);
1724 pci_release_selected_regions(pdev
, pci_select_bars(pdev
,
1727 free_netdev(netdev
);
1729 pci_disable_pcie_error_reporting(pdev
);
1731 pci_disable_device(pdev
);
1735 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1736 * @adapter: board private structure to initialize
1738 * This function initializes the vf specific data storage and then attempts to
1739 * allocate the VFs. The reason for ordering it this way is because it is much
1740 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1741 * the memory for the VFs.
1743 static void __devinit
igb_probe_vfs(struct igb_adapter
* adapter
)
1745 #ifdef CONFIG_PCI_IOV
1746 struct pci_dev
*pdev
= adapter
->pdev
;
1748 if (adapter
->vfs_allocated_count
> 7)
1749 adapter
->vfs_allocated_count
= 7;
1751 if (adapter
->vfs_allocated_count
) {
1752 adapter
->vf_data
= kcalloc(adapter
->vfs_allocated_count
,
1753 sizeof(struct vf_data_storage
),
1755 /* if allocation failed then we do not support SR-IOV */
1756 if (!adapter
->vf_data
) {
1757 adapter
->vfs_allocated_count
= 0;
1758 dev_err(&pdev
->dev
, "Unable to allocate memory for VF "
1763 if (pci_enable_sriov(pdev
, adapter
->vfs_allocated_count
)) {
1764 kfree(adapter
->vf_data
);
1765 adapter
->vf_data
= NULL
;
1766 #endif /* CONFIG_PCI_IOV */
1767 adapter
->vfs_allocated_count
= 0;
1768 #ifdef CONFIG_PCI_IOV
1770 unsigned char mac_addr
[ETH_ALEN
];
1772 dev_info(&pdev
->dev
, "%d vfs allocated\n",
1773 adapter
->vfs_allocated_count
);
1774 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
1775 random_ether_addr(mac_addr
);
1776 igb_set_vf_mac(adapter
, i
, mac_addr
);
1779 #endif /* CONFIG_PCI_IOV */
1783 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1784 * @adapter: board private structure to initialize
1786 * igb_sw_init initializes the Adapter private data structure.
1787 * Fields are initialized based on PCI device information and
1788 * OS network device settings (MTU size).
1790 static int __devinit
igb_sw_init(struct igb_adapter
*adapter
)
1792 struct e1000_hw
*hw
= &adapter
->hw
;
1793 struct net_device
*netdev
= adapter
->netdev
;
1794 struct pci_dev
*pdev
= adapter
->pdev
;
1796 pci_read_config_word(pdev
, PCI_COMMAND
, &hw
->bus
.pci_cmd_word
);
1798 adapter
->tx_ring_count
= IGB_DEFAULT_TXD
;
1799 adapter
->rx_ring_count
= IGB_DEFAULT_RXD
;
1800 adapter
->rx_itr_setting
= IGB_DEFAULT_ITR
;
1801 adapter
->tx_itr_setting
= IGB_DEFAULT_ITR
;
1803 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1804 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
1806 #ifdef CONFIG_PCI_IOV
1807 if (hw
->mac
.type
== e1000_82576
)
1808 adapter
->vfs_allocated_count
= max_vfs
;
1810 #endif /* CONFIG_PCI_IOV */
1811 /* This call may decrease the number of queues */
1812 if (igb_init_interrupt_scheme(adapter
)) {
1813 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
1817 igb_probe_vfs(adapter
);
1819 /* Explicitly disable IRQ since the NIC can be in any state. */
1820 igb_irq_disable(adapter
);
1822 set_bit(__IGB_DOWN
, &adapter
->state
);
1827 * igb_open - Called when a network interface is made active
1828 * @netdev: network interface device structure
1830 * Returns 0 on success, negative value on failure
1832 * The open entry point is called when a network interface is made
1833 * active by the system (IFF_UP). At this point all resources needed
1834 * for transmit and receive operations are allocated, the interrupt
1835 * handler is registered with the OS, the watchdog timer is started,
1836 * and the stack is notified that the interface is ready.
1838 static int igb_open(struct net_device
*netdev
)
1840 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1841 struct e1000_hw
*hw
= &adapter
->hw
;
1845 /* disallow open during test */
1846 if (test_bit(__IGB_TESTING
, &adapter
->state
))
1849 netif_carrier_off(netdev
);
1851 /* allocate transmit descriptors */
1852 err
= igb_setup_all_tx_resources(adapter
);
1856 /* allocate receive descriptors */
1857 err
= igb_setup_all_rx_resources(adapter
);
1861 /* e1000_power_up_phy(adapter); */
1863 /* before we allocate an interrupt, we must be ready to handle it.
1864 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1865 * as soon as we call pci_request_irq, so we have to setup our
1866 * clean_rx handler before we do so. */
1867 igb_configure(adapter
);
1869 err
= igb_request_irq(adapter
);
1873 /* From here on the code is the same as igb_up() */
1874 clear_bit(__IGB_DOWN
, &adapter
->state
);
1876 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1877 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1878 napi_enable(&q_vector
->napi
);
1881 /* Clear any pending interrupts. */
1884 igb_irq_enable(adapter
);
1886 /* notify VFs that reset has been completed */
1887 if (adapter
->vfs_allocated_count
) {
1888 u32 reg_data
= rd32(E1000_CTRL_EXT
);
1889 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
1890 wr32(E1000_CTRL_EXT
, reg_data
);
1893 netif_tx_start_all_queues(netdev
);
1895 /* Fire a link status change interrupt to start the watchdog. */
1896 wr32(E1000_ICS
, E1000_ICS_LSC
);
1901 igb_release_hw_control(adapter
);
1902 /* e1000_power_down_phy(adapter); */
1903 igb_free_all_rx_resources(adapter
);
1905 igb_free_all_tx_resources(adapter
);
1913 * igb_close - Disables a network interface
1914 * @netdev: network interface device structure
1916 * Returns 0, this is not allowed to fail
1918 * The close entry point is called when an interface is de-activated
1919 * by the OS. The hardware is still under the driver's control, but
1920 * needs to be disabled. A global MAC reset is issued to stop the
1921 * hardware, and all transmit and receive resources are freed.
1923 static int igb_close(struct net_device
*netdev
)
1925 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1927 WARN_ON(test_bit(__IGB_RESETTING
, &adapter
->state
));
1930 igb_free_irq(adapter
);
1932 igb_free_all_tx_resources(adapter
);
1933 igb_free_all_rx_resources(adapter
);
1939 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1940 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1942 * Return 0 on success, negative on failure
1944 int igb_setup_tx_resources(struct igb_ring
*tx_ring
)
1946 struct pci_dev
*pdev
= tx_ring
->pdev
;
1949 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
1950 tx_ring
->buffer_info
= vmalloc(size
);
1951 if (!tx_ring
->buffer_info
)
1953 memset(tx_ring
->buffer_info
, 0, size
);
1955 /* round up to nearest 4K */
1956 tx_ring
->size
= tx_ring
->count
* sizeof(union e1000_adv_tx_desc
);
1957 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
1959 tx_ring
->desc
= pci_alloc_consistent(pdev
, tx_ring
->size
,
1965 tx_ring
->next_to_use
= 0;
1966 tx_ring
->next_to_clean
= 0;
1970 vfree(tx_ring
->buffer_info
);
1972 "Unable to allocate memory for the transmit descriptor ring\n");
1977 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1978 * (Descriptors) for all queues
1979 * @adapter: board private structure
1981 * Return 0 on success, negative on failure
1983 static int igb_setup_all_tx_resources(struct igb_adapter
*adapter
)
1988 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1989 err
= igb_setup_tx_resources(&adapter
->tx_ring
[i
]);
1991 dev_err(&adapter
->pdev
->dev
,
1992 "Allocation for Tx Queue %u failed\n", i
);
1993 for (i
--; i
>= 0; i
--)
1994 igb_free_tx_resources(&adapter
->tx_ring
[i
]);
1999 for (i
= 0; i
< IGB_MAX_TX_QUEUES
; i
++) {
2000 r_idx
= i
% adapter
->num_tx_queues
;
2001 adapter
->multi_tx_table
[i
] = &adapter
->tx_ring
[r_idx
];
2007 * igb_setup_tctl - configure the transmit control registers
2008 * @adapter: Board private structure
2010 void igb_setup_tctl(struct igb_adapter
*adapter
)
2012 struct e1000_hw
*hw
= &adapter
->hw
;
2015 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2016 wr32(E1000_TXDCTL(0), 0);
2018 /* Program the Transmit Control Register */
2019 tctl
= rd32(E1000_TCTL
);
2020 tctl
&= ~E1000_TCTL_CT
;
2021 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
2022 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
2024 igb_config_collision_dist(hw
);
2026 /* Enable transmits */
2027 tctl
|= E1000_TCTL_EN
;
2029 wr32(E1000_TCTL
, tctl
);
2033 * igb_configure_tx_ring - Configure transmit ring after Reset
2034 * @adapter: board private structure
2035 * @ring: tx ring to configure
2037 * Configure a transmit ring after a reset.
2039 void igb_configure_tx_ring(struct igb_adapter
*adapter
,
2040 struct igb_ring
*ring
)
2042 struct e1000_hw
*hw
= &adapter
->hw
;
2044 u64 tdba
= ring
->dma
;
2045 int reg_idx
= ring
->reg_idx
;
2047 /* disable the queue */
2048 txdctl
= rd32(E1000_TXDCTL(reg_idx
));
2049 wr32(E1000_TXDCTL(reg_idx
),
2050 txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
2054 wr32(E1000_TDLEN(reg_idx
),
2055 ring
->count
* sizeof(union e1000_adv_tx_desc
));
2056 wr32(E1000_TDBAL(reg_idx
),
2057 tdba
& 0x00000000ffffffffULL
);
2058 wr32(E1000_TDBAH(reg_idx
), tdba
>> 32);
2060 ring
->head
= hw
->hw_addr
+ E1000_TDH(reg_idx
);
2061 ring
->tail
= hw
->hw_addr
+ E1000_TDT(reg_idx
);
2062 writel(0, ring
->head
);
2063 writel(0, ring
->tail
);
2065 txdctl
|= IGB_TX_PTHRESH
;
2066 txdctl
|= IGB_TX_HTHRESH
<< 8;
2067 txdctl
|= IGB_TX_WTHRESH
<< 16;
2069 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
2070 wr32(E1000_TXDCTL(reg_idx
), txdctl
);
2074 * igb_configure_tx - Configure transmit Unit after Reset
2075 * @adapter: board private structure
2077 * Configure the Tx unit of the MAC after a reset.
2079 static void igb_configure_tx(struct igb_adapter
*adapter
)
2083 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2084 igb_configure_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
2088 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2089 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2091 * Returns 0 on success, negative on failure
2093 int igb_setup_rx_resources(struct igb_ring
*rx_ring
)
2095 struct pci_dev
*pdev
= rx_ring
->pdev
;
2098 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
2099 rx_ring
->buffer_info
= vmalloc(size
);
2100 if (!rx_ring
->buffer_info
)
2102 memset(rx_ring
->buffer_info
, 0, size
);
2104 desc_len
= sizeof(union e1000_adv_rx_desc
);
2106 /* Round up to nearest 4K */
2107 rx_ring
->size
= rx_ring
->count
* desc_len
;
2108 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2110 rx_ring
->desc
= pci_alloc_consistent(pdev
, rx_ring
->size
,
2116 rx_ring
->next_to_clean
= 0;
2117 rx_ring
->next_to_use
= 0;
2122 vfree(rx_ring
->buffer_info
);
2123 dev_err(&pdev
->dev
, "Unable to allocate memory for "
2124 "the receive descriptor ring\n");
2129 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2130 * (Descriptors) for all queues
2131 * @adapter: board private structure
2133 * Return 0 on success, negative on failure
2135 static int igb_setup_all_rx_resources(struct igb_adapter
*adapter
)
2139 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2140 err
= igb_setup_rx_resources(&adapter
->rx_ring
[i
]);
2142 dev_err(&adapter
->pdev
->dev
,
2143 "Allocation for Rx Queue %u failed\n", i
);
2144 for (i
--; i
>= 0; i
--)
2145 igb_free_rx_resources(&adapter
->rx_ring
[i
]);
2154 * igb_setup_mrqc - configure the multiple receive queue control registers
2155 * @adapter: Board private structure
2157 static void igb_setup_mrqc(struct igb_adapter
*adapter
)
2159 struct e1000_hw
*hw
= &adapter
->hw
;
2161 u32 j
, num_rx_queues
, shift
= 0, shift2
= 0;
2166 static const u8 rsshash
[40] = {
2167 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2168 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2169 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2170 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2172 /* Fill out hash function seeds */
2173 for (j
= 0; j
< 10; j
++) {
2174 u32 rsskey
= rsshash
[(j
* 4)];
2175 rsskey
|= rsshash
[(j
* 4) + 1] << 8;
2176 rsskey
|= rsshash
[(j
* 4) + 2] << 16;
2177 rsskey
|= rsshash
[(j
* 4) + 3] << 24;
2178 array_wr32(E1000_RSSRK(0), j
, rsskey
);
2181 num_rx_queues
= adapter
->num_rx_queues
;
2183 if (adapter
->vfs_allocated_count
) {
2184 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2185 switch (hw
->mac
.type
) {
2197 if (hw
->mac
.type
== e1000_82575
)
2201 for (j
= 0; j
< (32 * 4); j
++) {
2202 reta
.bytes
[j
& 3] = (j
% num_rx_queues
) << shift
;
2204 reta
.bytes
[j
& 3] |= num_rx_queues
<< shift2
;
2206 wr32(E1000_RETA(j
>> 2), reta
.dword
);
2210 * Disable raw packet checksumming so that RSS hash is placed in
2211 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2212 * offloads as they are enabled by default
2214 rxcsum
= rd32(E1000_RXCSUM
);
2215 rxcsum
|= E1000_RXCSUM_PCSD
;
2217 if (adapter
->hw
.mac
.type
>= e1000_82576
)
2218 /* Enable Receive Checksum Offload for SCTP */
2219 rxcsum
|= E1000_RXCSUM_CRCOFL
;
2221 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2222 wr32(E1000_RXCSUM
, rxcsum
);
2224 /* If VMDq is enabled then we set the appropriate mode for that, else
2225 * we default to RSS so that an RSS hash is calculated per packet even
2226 * if we are only using one queue */
2227 if (adapter
->vfs_allocated_count
) {
2228 if (hw
->mac
.type
> e1000_82575
) {
2229 /* Set the default pool for the PF's first queue */
2230 u32 vtctl
= rd32(E1000_VT_CTL
);
2231 vtctl
&= ~(E1000_VT_CTL_DEFAULT_POOL_MASK
|
2232 E1000_VT_CTL_DISABLE_DEF_POOL
);
2233 vtctl
|= adapter
->vfs_allocated_count
<<
2234 E1000_VT_CTL_DEFAULT_POOL_SHIFT
;
2235 wr32(E1000_VT_CTL
, vtctl
);
2237 if (adapter
->num_rx_queues
> 1)
2238 mrqc
= E1000_MRQC_ENABLE_VMDQ_RSS_2Q
;
2240 mrqc
= E1000_MRQC_ENABLE_VMDQ
;
2242 mrqc
= E1000_MRQC_ENABLE_RSS_4Q
;
2244 igb_vmm_control(adapter
);
2246 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV4
|
2247 E1000_MRQC_RSS_FIELD_IPV4_TCP
);
2248 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV6
|
2249 E1000_MRQC_RSS_FIELD_IPV6_TCP
);
2250 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV4_UDP
|
2251 E1000_MRQC_RSS_FIELD_IPV6_UDP
);
2252 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX
|
2253 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX
);
2255 wr32(E1000_MRQC
, mrqc
);
2259 * igb_setup_rctl - configure the receive control registers
2260 * @adapter: Board private structure
2262 void igb_setup_rctl(struct igb_adapter
*adapter
)
2264 struct e1000_hw
*hw
= &adapter
->hw
;
2267 rctl
= rd32(E1000_RCTL
);
2269 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
2270 rctl
&= ~(E1000_RCTL_LBM_TCVR
| E1000_RCTL_LBM_MAC
);
2272 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
| E1000_RCTL_RDMTS_HALF
|
2273 (hw
->mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
2276 * enable stripping of CRC. It's unlikely this will break BMC
2277 * redirection as it did with e1000. Newer features require
2278 * that the HW strips the CRC.
2280 rctl
|= E1000_RCTL_SECRC
;
2283 * disable store bad packets and clear size bits.
2285 rctl
&= ~(E1000_RCTL_SBP
| E1000_RCTL_SZ_256
);
2287 /* enable LPE to prevent packets larger than max_frame_size */
2288 rctl
|= E1000_RCTL_LPE
;
2290 /* disable queue 0 to prevent tail write w/o re-config */
2291 wr32(E1000_RXDCTL(0), 0);
2293 /* Attention!!! For SR-IOV PF driver operations you must enable
2294 * queue drop for all VF and PF queues to prevent head of line blocking
2295 * if an un-trusted VF does not provide descriptors to hardware.
2297 if (adapter
->vfs_allocated_count
) {
2298 /* set all queue drop enable bits */
2299 wr32(E1000_QDE
, ALL_QUEUES
);
2302 wr32(E1000_RCTL
, rctl
);
2305 static inline int igb_set_vf_rlpml(struct igb_adapter
*adapter
, int size
,
2308 struct e1000_hw
*hw
= &adapter
->hw
;
2311 /* if it isn't the PF check to see if VFs are enabled and
2312 * increase the size to support vlan tags */
2313 if (vfn
< adapter
->vfs_allocated_count
&&
2314 adapter
->vf_data
[vfn
].vlans_enabled
)
2315 size
+= VLAN_TAG_SIZE
;
2317 vmolr
= rd32(E1000_VMOLR(vfn
));
2318 vmolr
&= ~E1000_VMOLR_RLPML_MASK
;
2319 vmolr
|= size
| E1000_VMOLR_LPE
;
2320 wr32(E1000_VMOLR(vfn
), vmolr
);
2326 * igb_rlpml_set - set maximum receive packet size
2327 * @adapter: board private structure
2329 * Configure maximum receivable packet size.
2331 static void igb_rlpml_set(struct igb_adapter
*adapter
)
2333 u32 max_frame_size
= adapter
->max_frame_size
;
2334 struct e1000_hw
*hw
= &adapter
->hw
;
2335 u16 pf_id
= adapter
->vfs_allocated_count
;
2338 max_frame_size
+= VLAN_TAG_SIZE
;
2340 /* if vfs are enabled we set RLPML to the largest possible request
2341 * size and set the VMOLR RLPML to the size we need */
2343 igb_set_vf_rlpml(adapter
, max_frame_size
, pf_id
);
2344 max_frame_size
= MAX_JUMBO_FRAME_SIZE
;
2347 wr32(E1000_RLPML
, max_frame_size
);
2350 static inline void igb_set_vmolr(struct igb_adapter
*adapter
, int vfn
)
2352 struct e1000_hw
*hw
= &adapter
->hw
;
2356 * This register exists only on 82576 and newer so if we are older then
2357 * we should exit and do nothing
2359 if (hw
->mac
.type
< e1000_82576
)
2362 vmolr
= rd32(E1000_VMOLR(vfn
));
2363 vmolr
|= E1000_VMOLR_AUPE
| /* Accept untagged packets */
2364 E1000_VMOLR_STRVLAN
; /* Strip vlan tags */
2366 /* clear all bits that might not be set */
2367 vmolr
&= ~(E1000_VMOLR_BAM
| E1000_VMOLR_RSSE
);
2369 if (adapter
->num_rx_queues
> 1 && vfn
== adapter
->vfs_allocated_count
)
2370 vmolr
|= E1000_VMOLR_RSSE
; /* enable RSS */
2372 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2375 if (vfn
<= adapter
->vfs_allocated_count
)
2376 vmolr
|= E1000_VMOLR_BAM
; /* Accept broadcast */
2378 wr32(E1000_VMOLR(vfn
), vmolr
);
2382 * igb_configure_rx_ring - Configure a receive ring after Reset
2383 * @adapter: board private structure
2384 * @ring: receive ring to be configured
2386 * Configure the Rx unit of the MAC after a reset.
2388 void igb_configure_rx_ring(struct igb_adapter
*adapter
,
2389 struct igb_ring
*ring
)
2391 struct e1000_hw
*hw
= &adapter
->hw
;
2392 u64 rdba
= ring
->dma
;
2393 int reg_idx
= ring
->reg_idx
;
2396 /* disable the queue */
2397 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
2398 wr32(E1000_RXDCTL(reg_idx
),
2399 rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
2401 /* Set DMA base address registers */
2402 wr32(E1000_RDBAL(reg_idx
),
2403 rdba
& 0x00000000ffffffffULL
);
2404 wr32(E1000_RDBAH(reg_idx
), rdba
>> 32);
2405 wr32(E1000_RDLEN(reg_idx
),
2406 ring
->count
* sizeof(union e1000_adv_rx_desc
));
2408 /* initialize head and tail */
2409 ring
->head
= hw
->hw_addr
+ E1000_RDH(reg_idx
);
2410 ring
->tail
= hw
->hw_addr
+ E1000_RDT(reg_idx
);
2411 writel(0, ring
->head
);
2412 writel(0, ring
->tail
);
2414 /* set descriptor configuration */
2415 if (ring
->rx_buffer_len
< IGB_RXBUFFER_1024
) {
2416 srrctl
= ALIGN(ring
->rx_buffer_len
, 64) <<
2417 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
2418 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2419 srrctl
|= IGB_RXBUFFER_16384
>>
2420 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2422 srrctl
|= (PAGE_SIZE
/ 2) >>
2423 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2425 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
2427 srrctl
= ALIGN(ring
->rx_buffer_len
, 1024) >>
2428 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2429 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
2432 wr32(E1000_SRRCTL(reg_idx
), srrctl
);
2434 /* set filtering for VMDQ pools */
2435 igb_set_vmolr(adapter
, reg_idx
& 0x7);
2437 /* enable receive descriptor fetching */
2438 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
2439 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
2440 rxdctl
&= 0xFFF00000;
2441 rxdctl
|= IGB_RX_PTHRESH
;
2442 rxdctl
|= IGB_RX_HTHRESH
<< 8;
2443 rxdctl
|= IGB_RX_WTHRESH
<< 16;
2444 wr32(E1000_RXDCTL(reg_idx
), rxdctl
);
2448 * igb_configure_rx - Configure receive Unit after Reset
2449 * @adapter: board private structure
2451 * Configure the Rx unit of the MAC after a reset.
2453 static void igb_configure_rx(struct igb_adapter
*adapter
)
2457 /* set UTA to appropriate mode */
2458 igb_set_uta(adapter
);
2460 /* set the correct pool for the PF default MAC address in entry 0 */
2461 igb_rar_set_qsel(adapter
, adapter
->hw
.mac
.addr
, 0,
2462 adapter
->vfs_allocated_count
);
2464 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2465 * the Base and Length of the Rx Descriptor Ring */
2466 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2467 igb_configure_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
2471 * igb_free_tx_resources - Free Tx Resources per Queue
2472 * @tx_ring: Tx descriptor ring for a specific queue
2474 * Free all transmit software resources
2476 void igb_free_tx_resources(struct igb_ring
*tx_ring
)
2478 igb_clean_tx_ring(tx_ring
);
2480 vfree(tx_ring
->buffer_info
);
2481 tx_ring
->buffer_info
= NULL
;
2483 pci_free_consistent(tx_ring
->pdev
, tx_ring
->size
,
2484 tx_ring
->desc
, tx_ring
->dma
);
2486 tx_ring
->desc
= NULL
;
2490 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2491 * @adapter: board private structure
2493 * Free all transmit software resources
2495 static void igb_free_all_tx_resources(struct igb_adapter
*adapter
)
2499 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2500 igb_free_tx_resources(&adapter
->tx_ring
[i
]);
2503 void igb_unmap_and_free_tx_resource(struct igb_ring
*tx_ring
,
2504 struct igb_buffer
*buffer_info
)
2506 buffer_info
->dma
= 0;
2507 if (buffer_info
->skb
) {
2508 skb_dma_unmap(&tx_ring
->pdev
->dev
,
2511 dev_kfree_skb_any(buffer_info
->skb
);
2512 buffer_info
->skb
= NULL
;
2514 buffer_info
->time_stamp
= 0;
2515 /* buffer_info must be completely set up in the transmit path */
2519 * igb_clean_tx_ring - Free Tx Buffers
2520 * @tx_ring: ring to be cleaned
2522 static void igb_clean_tx_ring(struct igb_ring
*tx_ring
)
2524 struct igb_buffer
*buffer_info
;
2528 if (!tx_ring
->buffer_info
)
2530 /* Free all the Tx ring sk_buffs */
2532 for (i
= 0; i
< tx_ring
->count
; i
++) {
2533 buffer_info
= &tx_ring
->buffer_info
[i
];
2534 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
2537 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
2538 memset(tx_ring
->buffer_info
, 0, size
);
2540 /* Zero out the descriptor ring */
2542 memset(tx_ring
->desc
, 0, tx_ring
->size
);
2544 tx_ring
->next_to_use
= 0;
2545 tx_ring
->next_to_clean
= 0;
2547 writel(0, tx_ring
->head
);
2548 writel(0, tx_ring
->tail
);
2552 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2553 * @adapter: board private structure
2555 static void igb_clean_all_tx_rings(struct igb_adapter
*adapter
)
2559 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2560 igb_clean_tx_ring(&adapter
->tx_ring
[i
]);
2564 * igb_free_rx_resources - Free Rx Resources
2565 * @rx_ring: ring to clean the resources from
2567 * Free all receive software resources
2569 void igb_free_rx_resources(struct igb_ring
*rx_ring
)
2571 igb_clean_rx_ring(rx_ring
);
2573 vfree(rx_ring
->buffer_info
);
2574 rx_ring
->buffer_info
= NULL
;
2576 pci_free_consistent(rx_ring
->pdev
, rx_ring
->size
,
2577 rx_ring
->desc
, rx_ring
->dma
);
2579 rx_ring
->desc
= NULL
;
2583 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2584 * @adapter: board private structure
2586 * Free all receive software resources
2588 static void igb_free_all_rx_resources(struct igb_adapter
*adapter
)
2592 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2593 igb_free_rx_resources(&adapter
->rx_ring
[i
]);
2597 * igb_clean_rx_ring - Free Rx Buffers per Queue
2598 * @rx_ring: ring to free buffers from
2600 static void igb_clean_rx_ring(struct igb_ring
*rx_ring
)
2602 struct igb_buffer
*buffer_info
;
2606 if (!rx_ring
->buffer_info
)
2608 /* Free all the Rx ring sk_buffs */
2609 for (i
= 0; i
< rx_ring
->count
; i
++) {
2610 buffer_info
= &rx_ring
->buffer_info
[i
];
2611 if (buffer_info
->dma
) {
2612 pci_unmap_single(rx_ring
->pdev
,
2614 rx_ring
->rx_buffer_len
,
2615 PCI_DMA_FROMDEVICE
);
2616 buffer_info
->dma
= 0;
2619 if (buffer_info
->skb
) {
2620 dev_kfree_skb(buffer_info
->skb
);
2621 buffer_info
->skb
= NULL
;
2623 if (buffer_info
->page_dma
) {
2624 pci_unmap_page(rx_ring
->pdev
,
2625 buffer_info
->page_dma
,
2627 PCI_DMA_FROMDEVICE
);
2628 buffer_info
->page_dma
= 0;
2630 if (buffer_info
->page
) {
2631 put_page(buffer_info
->page
);
2632 buffer_info
->page
= NULL
;
2633 buffer_info
->page_offset
= 0;
2637 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
2638 memset(rx_ring
->buffer_info
, 0, size
);
2640 /* Zero out the descriptor ring */
2641 memset(rx_ring
->desc
, 0, rx_ring
->size
);
2643 rx_ring
->next_to_clean
= 0;
2644 rx_ring
->next_to_use
= 0;
2646 writel(0, rx_ring
->head
);
2647 writel(0, rx_ring
->tail
);
2651 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2652 * @adapter: board private structure
2654 static void igb_clean_all_rx_rings(struct igb_adapter
*adapter
)
2658 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2659 igb_clean_rx_ring(&adapter
->rx_ring
[i
]);
2663 * igb_set_mac - Change the Ethernet Address of the NIC
2664 * @netdev: network interface device structure
2665 * @p: pointer to an address structure
2667 * Returns 0 on success, negative on failure
2669 static int igb_set_mac(struct net_device
*netdev
, void *p
)
2671 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2672 struct e1000_hw
*hw
= &adapter
->hw
;
2673 struct sockaddr
*addr
= p
;
2675 if (!is_valid_ether_addr(addr
->sa_data
))
2676 return -EADDRNOTAVAIL
;
2678 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2679 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
2681 /* set the correct pool for the new PF MAC address in entry 0 */
2682 igb_rar_set_qsel(adapter
, hw
->mac
.addr
, 0,
2683 adapter
->vfs_allocated_count
);
2689 * igb_write_mc_addr_list - write multicast addresses to MTA
2690 * @netdev: network interface device structure
2692 * Writes multicast address list to the MTA hash table.
2693 * Returns: -ENOMEM on failure
2694 * 0 on no addresses written
2695 * X on writing X addresses to MTA
2697 static int igb_write_mc_addr_list(struct net_device
*netdev
)
2699 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2700 struct e1000_hw
*hw
= &adapter
->hw
;
2701 struct dev_mc_list
*mc_ptr
= netdev
->mc_list
;
2706 if (!netdev
->mc_count
) {
2707 /* nothing to program, so clear mc list */
2708 igb_update_mc_addr_list(hw
, NULL
, 0);
2709 igb_restore_vf_multicasts(adapter
);
2713 mta_list
= kzalloc(netdev
->mc_count
* 6, GFP_ATOMIC
);
2717 /* set vmolr receive overflow multicast bit */
2718 vmolr
|= E1000_VMOLR_ROMPE
;
2720 /* The shared function expects a packed array of only addresses. */
2721 mc_ptr
= netdev
->mc_list
;
2723 for (i
= 0; i
< netdev
->mc_count
; i
++) {
2726 memcpy(mta_list
+ (i
*ETH_ALEN
), mc_ptr
->dmi_addr
, ETH_ALEN
);
2727 mc_ptr
= mc_ptr
->next
;
2729 igb_update_mc_addr_list(hw
, mta_list
, i
);
2732 return netdev
->mc_count
;
2736 * igb_write_uc_addr_list - write unicast addresses to RAR table
2737 * @netdev: network interface device structure
2739 * Writes unicast address list to the RAR table.
2740 * Returns: -ENOMEM on failure/insufficient address space
2741 * 0 on no addresses written
2742 * X on writing X addresses to the RAR table
2744 static int igb_write_uc_addr_list(struct net_device
*netdev
)
2746 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2747 struct e1000_hw
*hw
= &adapter
->hw
;
2748 unsigned int vfn
= adapter
->vfs_allocated_count
;
2749 unsigned int rar_entries
= hw
->mac
.rar_entry_count
- (vfn
+ 1);
2752 /* return ENOMEM indicating insufficient memory for addresses */
2753 if (netdev
->uc
.count
> rar_entries
)
2756 if (netdev
->uc
.count
&& rar_entries
) {
2757 struct netdev_hw_addr
*ha
;
2758 list_for_each_entry(ha
, &netdev
->uc
.list
, list
) {
2761 igb_rar_set_qsel(adapter
, ha
->addr
,
2767 /* write the addresses in reverse order to avoid write combining */
2768 for (; rar_entries
> 0 ; rar_entries
--) {
2769 wr32(E1000_RAH(rar_entries
), 0);
2770 wr32(E1000_RAL(rar_entries
), 0);
2778 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2779 * @netdev: network interface device structure
2781 * The set_rx_mode entry point is called whenever the unicast or multicast
2782 * address lists or the network interface flags are updated. This routine is
2783 * responsible for configuring the hardware for proper unicast, multicast,
2784 * promiscuous mode, and all-multi behavior.
2786 static void igb_set_rx_mode(struct net_device
*netdev
)
2788 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2789 struct e1000_hw
*hw
= &adapter
->hw
;
2790 unsigned int vfn
= adapter
->vfs_allocated_count
;
2791 u32 rctl
, vmolr
= 0;
2794 /* Check for Promiscuous and All Multicast modes */
2795 rctl
= rd32(E1000_RCTL
);
2797 /* clear the effected bits */
2798 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
| E1000_RCTL_VFE
);
2800 if (netdev
->flags
& IFF_PROMISC
) {
2801 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2802 vmolr
|= (E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
);
2804 if (netdev
->flags
& IFF_ALLMULTI
) {
2805 rctl
|= E1000_RCTL_MPE
;
2806 vmolr
|= E1000_VMOLR_MPME
;
2809 * Write addresses to the MTA, if the attempt fails
2810 * then we should just turn on promiscous mode so
2811 * that we can at least receive multicast traffic
2813 count
= igb_write_mc_addr_list(netdev
);
2815 rctl
|= E1000_RCTL_MPE
;
2816 vmolr
|= E1000_VMOLR_MPME
;
2818 vmolr
|= E1000_VMOLR_ROMPE
;
2822 * Write addresses to available RAR registers, if there is not
2823 * sufficient space to store all the addresses then enable
2824 * unicast promiscous mode
2826 count
= igb_write_uc_addr_list(netdev
);
2828 rctl
|= E1000_RCTL_UPE
;
2829 vmolr
|= E1000_VMOLR_ROPE
;
2831 rctl
|= E1000_RCTL_VFE
;
2833 wr32(E1000_RCTL
, rctl
);
2836 * In order to support SR-IOV and eventually VMDq it is necessary to set
2837 * the VMOLR to enable the appropriate modes. Without this workaround
2838 * we will have issues with VLAN tag stripping not being done for frames
2839 * that are only arriving because we are the default pool
2841 if (hw
->mac
.type
< e1000_82576
)
2844 vmolr
|= rd32(E1000_VMOLR(vfn
)) &
2845 ~(E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
| E1000_VMOLR_ROMPE
);
2846 wr32(E1000_VMOLR(vfn
), vmolr
);
2847 igb_restore_vf_multicasts(adapter
);
2850 /* Need to wait a few seconds after link up to get diagnostic information from
2852 static void igb_update_phy_info(unsigned long data
)
2854 struct igb_adapter
*adapter
= (struct igb_adapter
*) data
;
2855 igb_get_phy_info(&adapter
->hw
);
2859 * igb_has_link - check shared code for link and determine up/down
2860 * @adapter: pointer to driver private info
2862 static bool igb_has_link(struct igb_adapter
*adapter
)
2864 struct e1000_hw
*hw
= &adapter
->hw
;
2865 bool link_active
= false;
2868 /* get_link_status is set on LSC (link status) interrupt or
2869 * rx sequence error interrupt. get_link_status will stay
2870 * false until the e1000_check_for_link establishes link
2871 * for copper adapters ONLY
2873 switch (hw
->phy
.media_type
) {
2874 case e1000_media_type_copper
:
2875 if (hw
->mac
.get_link_status
) {
2876 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
2877 link_active
= !hw
->mac
.get_link_status
;
2882 case e1000_media_type_internal_serdes
:
2883 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
2884 link_active
= hw
->mac
.serdes_has_link
;
2887 case e1000_media_type_unknown
:
2895 * igb_watchdog - Timer Call-back
2896 * @data: pointer to adapter cast into an unsigned long
2898 static void igb_watchdog(unsigned long data
)
2900 struct igb_adapter
*adapter
= (struct igb_adapter
*)data
;
2901 /* Do the rest outside of interrupt context */
2902 schedule_work(&adapter
->watchdog_task
);
2905 static void igb_watchdog_task(struct work_struct
*work
)
2907 struct igb_adapter
*adapter
= container_of(work
,
2908 struct igb_adapter
, watchdog_task
);
2909 struct e1000_hw
*hw
= &adapter
->hw
;
2910 struct net_device
*netdev
= adapter
->netdev
;
2911 struct igb_ring
*tx_ring
= adapter
->tx_ring
;
2915 link
= igb_has_link(adapter
);
2916 if ((netif_carrier_ok(netdev
)) && link
)
2920 if (!netif_carrier_ok(netdev
)) {
2922 hw
->mac
.ops
.get_speed_and_duplex(&adapter
->hw
,
2923 &adapter
->link_speed
,
2924 &adapter
->link_duplex
);
2926 ctrl
= rd32(E1000_CTRL
);
2927 /* Links status message must follow this format */
2928 printk(KERN_INFO
"igb: %s NIC Link is Up %d Mbps %s, "
2929 "Flow Control: %s\n",
2931 adapter
->link_speed
,
2932 adapter
->link_duplex
== FULL_DUPLEX
?
2933 "Full Duplex" : "Half Duplex",
2934 ((ctrl
& E1000_CTRL_TFCE
) && (ctrl
&
2935 E1000_CTRL_RFCE
)) ? "RX/TX" : ((ctrl
&
2936 E1000_CTRL_RFCE
) ? "RX" : ((ctrl
&
2937 E1000_CTRL_TFCE
) ? "TX" : "None")));
2939 /* tweak tx_queue_len according to speed/duplex and
2940 * adjust the timeout factor */
2941 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
2942 adapter
->tx_timeout_factor
= 1;
2943 switch (adapter
->link_speed
) {
2945 netdev
->tx_queue_len
= 10;
2946 adapter
->tx_timeout_factor
= 14;
2949 netdev
->tx_queue_len
= 100;
2950 /* maybe add some timeout factor ? */
2954 netif_carrier_on(netdev
);
2956 igb_ping_all_vfs(adapter
);
2958 /* link state has changed, schedule phy info update */
2959 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
2960 mod_timer(&adapter
->phy_info_timer
,
2961 round_jiffies(jiffies
+ 2 * HZ
));
2964 if (netif_carrier_ok(netdev
)) {
2965 adapter
->link_speed
= 0;
2966 adapter
->link_duplex
= 0;
2967 /* Links status message must follow this format */
2968 printk(KERN_INFO
"igb: %s NIC Link is Down\n",
2970 netif_carrier_off(netdev
);
2972 igb_ping_all_vfs(adapter
);
2974 /* link state has changed, schedule phy info update */
2975 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
2976 mod_timer(&adapter
->phy_info_timer
,
2977 round_jiffies(jiffies
+ 2 * HZ
));
2982 igb_update_stats(adapter
);
2984 hw
->mac
.tx_packet_delta
= adapter
->stats
.tpt
- adapter
->tpt_old
;
2985 adapter
->tpt_old
= adapter
->stats
.tpt
;
2986 hw
->mac
.collision_delta
= adapter
->stats
.colc
- adapter
->colc_old
;
2987 adapter
->colc_old
= adapter
->stats
.colc
;
2989 adapter
->gorc
= adapter
->stats
.gorc
- adapter
->gorc_old
;
2990 adapter
->gorc_old
= adapter
->stats
.gorc
;
2991 adapter
->gotc
= adapter
->stats
.gotc
- adapter
->gotc_old
;
2992 adapter
->gotc_old
= adapter
->stats
.gotc
;
2994 igb_update_adaptive(&adapter
->hw
);
2996 if (!netif_carrier_ok(netdev
)) {
2997 if (igb_desc_unused(tx_ring
) + 1 < tx_ring
->count
) {
2998 /* We've lost link, so the controller stops DMA,
2999 * but we've got queued Tx work that's never going
3000 * to get done, so reset controller to flush Tx.
3001 * (Do the reset outside of interrupt context). */
3002 adapter
->tx_timeout_count
++;
3003 schedule_work(&adapter
->reset_task
);
3004 /* return immediately since reset is imminent */
3009 /* Cause software interrupt to ensure rx ring is cleaned */
3010 if (adapter
->msix_entries
) {
3012 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
3013 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
3014 eics
|= q_vector
->eims_value
;
3016 wr32(E1000_EICS
, eics
);
3018 wr32(E1000_ICS
, E1000_ICS_RXDMT0
);
3021 /* Force detection of hung controller every watchdog period */
3022 tx_ring
->detect_tx_hung
= true;
3024 /* Reset the timer */
3025 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3026 mod_timer(&adapter
->watchdog_timer
,
3027 round_jiffies(jiffies
+ 2 * HZ
));
3030 enum latency_range
{
3034 latency_invalid
= 255
3038 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3040 * Stores a new ITR value based on strictly on packet size. This
3041 * algorithm is less sophisticated than that used in igb_update_itr,
3042 * due to the difficulty of synchronizing statistics across multiple
3043 * receive rings. The divisors and thresholds used by this fuction
3044 * were determined based on theoretical maximum wire speed and testing
3045 * data, in order to minimize response time while increasing bulk
3047 * This functionality is controlled by the InterruptThrottleRate module
3048 * parameter (see igb_param.c)
3049 * NOTE: This function is called only when operating in a multiqueue
3050 * receive environment.
3051 * @q_vector: pointer to q_vector
3053 static void igb_update_ring_itr(struct igb_q_vector
*q_vector
)
3055 int new_val
= q_vector
->itr_val
;
3056 int avg_wire_size
= 0;
3057 struct igb_adapter
*adapter
= q_vector
->adapter
;
3059 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3060 * ints/sec - ITR timer value of 120 ticks.
3062 if (adapter
->link_speed
!= SPEED_1000
) {
3067 if (q_vector
->rx_ring
&& q_vector
->rx_ring
->total_packets
) {
3068 struct igb_ring
*ring
= q_vector
->rx_ring
;
3069 avg_wire_size
= ring
->total_bytes
/ ring
->total_packets
;
3072 if (q_vector
->tx_ring
&& q_vector
->tx_ring
->total_packets
) {
3073 struct igb_ring
*ring
= q_vector
->tx_ring
;
3074 avg_wire_size
= max_t(u32
, avg_wire_size
,
3075 (ring
->total_bytes
/
3076 ring
->total_packets
));
3079 /* if avg_wire_size isn't set no work was done */
3083 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3084 avg_wire_size
+= 24;
3086 /* Don't starve jumbo frames */
3087 avg_wire_size
= min(avg_wire_size
, 3000);
3089 /* Give a little boost to mid-size frames */
3090 if ((avg_wire_size
> 300) && (avg_wire_size
< 1200))
3091 new_val
= avg_wire_size
/ 3;
3093 new_val
= avg_wire_size
/ 2;
3096 if (new_val
!= q_vector
->itr_val
) {
3097 q_vector
->itr_val
= new_val
;
3098 q_vector
->set_itr
= 1;
3101 if (q_vector
->rx_ring
) {
3102 q_vector
->rx_ring
->total_bytes
= 0;
3103 q_vector
->rx_ring
->total_packets
= 0;
3105 if (q_vector
->tx_ring
) {
3106 q_vector
->tx_ring
->total_bytes
= 0;
3107 q_vector
->tx_ring
->total_packets
= 0;
3112 * igb_update_itr - update the dynamic ITR value based on statistics
3113 * Stores a new ITR value based on packets and byte
3114 * counts during the last interrupt. The advantage of per interrupt
3115 * computation is faster updates and more accurate ITR for the current
3116 * traffic pattern. Constants in this function were computed
3117 * based on theoretical maximum wire speed and thresholds were set based
3118 * on testing data as well as attempting to minimize response time
3119 * while increasing bulk throughput.
3120 * this functionality is controlled by the InterruptThrottleRate module
3121 * parameter (see igb_param.c)
3122 * NOTE: These calculations are only valid when operating in a single-
3123 * queue environment.
3124 * @adapter: pointer to adapter
3125 * @itr_setting: current q_vector->itr_val
3126 * @packets: the number of packets during this measurement interval
3127 * @bytes: the number of bytes during this measurement interval
3129 static unsigned int igb_update_itr(struct igb_adapter
*adapter
, u16 itr_setting
,
3130 int packets
, int bytes
)
3132 unsigned int retval
= itr_setting
;
3135 goto update_itr_done
;
3137 switch (itr_setting
) {
3138 case lowest_latency
:
3139 /* handle TSO and jumbo frames */
3140 if (bytes
/packets
> 8000)
3141 retval
= bulk_latency
;
3142 else if ((packets
< 5) && (bytes
> 512))
3143 retval
= low_latency
;
3145 case low_latency
: /* 50 usec aka 20000 ints/s */
3146 if (bytes
> 10000) {
3147 /* this if handles the TSO accounting */
3148 if (bytes
/packets
> 8000) {
3149 retval
= bulk_latency
;
3150 } else if ((packets
< 10) || ((bytes
/packets
) > 1200)) {
3151 retval
= bulk_latency
;
3152 } else if ((packets
> 35)) {
3153 retval
= lowest_latency
;
3155 } else if (bytes
/packets
> 2000) {
3156 retval
= bulk_latency
;
3157 } else if (packets
<= 2 && bytes
< 512) {
3158 retval
= lowest_latency
;
3161 case bulk_latency
: /* 250 usec aka 4000 ints/s */
3162 if (bytes
> 25000) {
3164 retval
= low_latency
;
3165 } else if (bytes
< 1500) {
3166 retval
= low_latency
;
3175 static void igb_set_itr(struct igb_adapter
*adapter
)
3177 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
3179 u32 new_itr
= q_vector
->itr_val
;
3181 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3182 if (adapter
->link_speed
!= SPEED_1000
) {
3188 adapter
->rx_itr
= igb_update_itr(adapter
,
3190 adapter
->rx_ring
->total_packets
,
3191 adapter
->rx_ring
->total_bytes
);
3193 adapter
->tx_itr
= igb_update_itr(adapter
,
3195 adapter
->tx_ring
->total_packets
,
3196 adapter
->tx_ring
->total_bytes
);
3197 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
3199 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3200 if (adapter
->rx_itr_setting
== 3 && current_itr
== lowest_latency
)
3201 current_itr
= low_latency
;
3203 switch (current_itr
) {
3204 /* counts and packets in update_itr are dependent on these numbers */
3205 case lowest_latency
:
3206 new_itr
= 56; /* aka 70,000 ints/sec */
3209 new_itr
= 196; /* aka 20,000 ints/sec */
3212 new_itr
= 980; /* aka 4,000 ints/sec */
3219 adapter
->rx_ring
->total_bytes
= 0;
3220 adapter
->rx_ring
->total_packets
= 0;
3221 adapter
->tx_ring
->total_bytes
= 0;
3222 adapter
->tx_ring
->total_packets
= 0;
3224 if (new_itr
!= q_vector
->itr_val
) {
3225 /* this attempts to bias the interrupt rate towards Bulk
3226 * by adding intermediate steps when interrupt rate is
3228 new_itr
= new_itr
> q_vector
->itr_val
?
3229 max((new_itr
* q_vector
->itr_val
) /
3230 (new_itr
+ (q_vector
->itr_val
>> 2)),
3233 /* Don't write the value here; it resets the adapter's
3234 * internal timer, and causes us to delay far longer than
3235 * we should between interrupts. Instead, we write the ITR
3236 * value at the beginning of the next interrupt so the timing
3237 * ends up being correct.
3239 q_vector
->itr_val
= new_itr
;
3240 q_vector
->set_itr
= 1;
3246 #define IGB_TX_FLAGS_CSUM 0x00000001
3247 #define IGB_TX_FLAGS_VLAN 0x00000002
3248 #define IGB_TX_FLAGS_TSO 0x00000004
3249 #define IGB_TX_FLAGS_IPV4 0x00000008
3250 #define IGB_TX_FLAGS_TSTAMP 0x00000010
3251 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3252 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3254 static inline int igb_tso_adv(struct igb_ring
*tx_ring
,
3255 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
3257 struct e1000_adv_tx_context_desc
*context_desc
;
3260 struct igb_buffer
*buffer_info
;
3261 u32 info
= 0, tu_cmd
= 0;
3262 u32 mss_l4len_idx
, l4len
;
3265 if (skb_header_cloned(skb
)) {
3266 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3271 l4len
= tcp_hdrlen(skb
);
3274 if (skb
->protocol
== htons(ETH_P_IP
)) {
3275 struct iphdr
*iph
= ip_hdr(skb
);
3278 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
3282 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
3283 ipv6_hdr(skb
)->payload_len
= 0;
3284 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3285 &ipv6_hdr(skb
)->daddr
,
3289 i
= tx_ring
->next_to_use
;
3291 buffer_info
= &tx_ring
->buffer_info
[i
];
3292 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
3293 /* VLAN MACLEN IPLEN */
3294 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3295 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
3296 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
3297 *hdr_len
+= skb_network_offset(skb
);
3298 info
|= skb_network_header_len(skb
);
3299 *hdr_len
+= skb_network_header_len(skb
);
3300 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
3302 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3303 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
3305 if (skb
->protocol
== htons(ETH_P_IP
))
3306 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
3307 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3309 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
3312 mss_l4len_idx
= (skb_shinfo(skb
)->gso_size
<< E1000_ADVTXD_MSS_SHIFT
);
3313 mss_l4len_idx
|= (l4len
<< E1000_ADVTXD_L4LEN_SHIFT
);
3315 /* For 82575, context index must be unique per ring. */
3316 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
3317 mss_l4len_idx
|= tx_ring
->reg_idx
<< 4;
3319 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3320 context_desc
->seqnum_seed
= 0;
3322 buffer_info
->time_stamp
= jiffies
;
3323 buffer_info
->next_to_watch
= i
;
3324 buffer_info
->dma
= 0;
3326 if (i
== tx_ring
->count
)
3329 tx_ring
->next_to_use
= i
;
3334 static inline bool igb_tx_csum_adv(struct igb_ring
*tx_ring
,
3335 struct sk_buff
*skb
, u32 tx_flags
)
3337 struct e1000_adv_tx_context_desc
*context_desc
;
3338 struct pci_dev
*pdev
= tx_ring
->pdev
;
3339 struct igb_buffer
*buffer_info
;
3340 u32 info
= 0, tu_cmd
= 0;
3343 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3344 (tx_flags
& IGB_TX_FLAGS_VLAN
)) {
3345 i
= tx_ring
->next_to_use
;
3346 buffer_info
= &tx_ring
->buffer_info
[i
];
3347 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
3349 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3350 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
3351 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
3352 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3353 info
|= skb_network_header_len(skb
);
3355 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
3357 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
3359 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3362 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
3363 const struct vlan_ethhdr
*vhdr
=
3364 (const struct vlan_ethhdr
*)skb
->data
;
3366 protocol
= vhdr
->h_vlan_encapsulated_proto
;
3368 protocol
= skb
->protocol
;
3372 case cpu_to_be16(ETH_P_IP
):
3373 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
3374 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3375 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3376 else if (ip_hdr(skb
)->protocol
== IPPROTO_SCTP
)
3377 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
3379 case cpu_to_be16(ETH_P_IPV6
):
3380 /* XXX what about other V6 headers?? */
3381 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3382 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3383 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_SCTP
)
3384 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
3387 if (unlikely(net_ratelimit()))
3388 dev_warn(&pdev
->dev
,
3389 "partial checksum but proto=%x!\n",
3395 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
3396 context_desc
->seqnum_seed
= 0;
3397 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
3398 context_desc
->mss_l4len_idx
=
3399 cpu_to_le32(tx_ring
->reg_idx
<< 4);
3401 buffer_info
->time_stamp
= jiffies
;
3402 buffer_info
->next_to_watch
= i
;
3403 buffer_info
->dma
= 0;
3406 if (i
== tx_ring
->count
)
3408 tx_ring
->next_to_use
= i
;
3415 #define IGB_MAX_TXD_PWR 16
3416 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3418 static inline int igb_tx_map_adv(struct igb_ring
*tx_ring
, struct sk_buff
*skb
,
3421 struct igb_buffer
*buffer_info
;
3422 struct pci_dev
*pdev
= tx_ring
->pdev
;
3423 unsigned int len
= skb_headlen(skb
);
3424 unsigned int count
= 0, i
;
3428 i
= tx_ring
->next_to_use
;
3430 if (skb_dma_map(&pdev
->dev
, skb
, DMA_TO_DEVICE
)) {
3431 dev_err(&pdev
->dev
, "TX DMA map failed\n");
3435 map
= skb_shinfo(skb
)->dma_maps
;
3437 buffer_info
= &tx_ring
->buffer_info
[i
];
3438 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
3439 buffer_info
->length
= len
;
3440 /* set time_stamp *before* dma to help avoid a possible race */
3441 buffer_info
->time_stamp
= jiffies
;
3442 buffer_info
->next_to_watch
= i
;
3443 buffer_info
->dma
= skb_shinfo(skb
)->dma_head
;
3445 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
3446 struct skb_frag_struct
*frag
;
3449 if (i
== tx_ring
->count
)
3452 frag
= &skb_shinfo(skb
)->frags
[f
];
3455 buffer_info
= &tx_ring
->buffer_info
[i
];
3456 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
3457 buffer_info
->length
= len
;
3458 buffer_info
->time_stamp
= jiffies
;
3459 buffer_info
->next_to_watch
= i
;
3460 buffer_info
->dma
= map
[count
];
3464 tx_ring
->buffer_info
[i
].skb
= skb
;
3465 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
3470 static inline void igb_tx_queue_adv(struct igb_ring
*tx_ring
,
3471 int tx_flags
, int count
, u32 paylen
,
3474 union e1000_adv_tx_desc
*tx_desc
= NULL
;
3475 struct igb_buffer
*buffer_info
;
3476 u32 olinfo_status
= 0, cmd_type_len
;
3479 cmd_type_len
= (E1000_ADVTXD_DTYP_DATA
| E1000_ADVTXD_DCMD_IFCS
|
3480 E1000_ADVTXD_DCMD_DEXT
);
3482 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3483 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
3485 if (tx_flags
& IGB_TX_FLAGS_TSTAMP
)
3486 cmd_type_len
|= E1000_ADVTXD_MAC_TSTAMP
;
3488 if (tx_flags
& IGB_TX_FLAGS_TSO
) {
3489 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
3491 /* insert tcp checksum */
3492 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
3494 /* insert ip checksum */
3495 if (tx_flags
& IGB_TX_FLAGS_IPV4
)
3496 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
3498 } else if (tx_flags
& IGB_TX_FLAGS_CSUM
) {
3499 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
3502 if ((tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
) &&
3503 (tx_flags
& (IGB_TX_FLAGS_CSUM
|
3505 IGB_TX_FLAGS_VLAN
)))
3506 olinfo_status
|= tx_ring
->reg_idx
<< 4;
3508 olinfo_status
|= ((paylen
- hdr_len
) << E1000_ADVTXD_PAYLEN_SHIFT
);
3510 i
= tx_ring
->next_to_use
;
3512 buffer_info
= &tx_ring
->buffer_info
[i
];
3513 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
3514 tx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
3515 tx_desc
->read
.cmd_type_len
=
3516 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
3517 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3519 if (i
== tx_ring
->count
)
3523 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(IGB_ADVTXD_DCMD
);
3524 /* Force memory writes to complete before letting h/w
3525 * know there are new descriptors to fetch. (Only
3526 * applicable for weak-ordered memory model archs,
3527 * such as IA-64). */
3530 tx_ring
->next_to_use
= i
;
3531 writel(i
, tx_ring
->tail
);
3532 /* we need this if more than one processor can write to our tail
3533 * at a time, it syncronizes IO on IA64/Altix systems */
3537 static int __igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
3539 struct net_device
*netdev
= tx_ring
->netdev
;
3541 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3543 /* Herbert's original patch had:
3544 * smp_mb__after_netif_stop_queue();
3545 * but since that doesn't exist yet, just open code it. */
3548 /* We need to check again in a case another CPU has just
3549 * made room available. */
3550 if (igb_desc_unused(tx_ring
) < size
)
3554 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
3555 tx_ring
->tx_stats
.restart_queue
++;
3559 static int igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
3561 if (igb_desc_unused(tx_ring
) >= size
)
3563 return __igb_maybe_stop_tx(tx_ring
, size
);
3566 netdev_tx_t
igb_xmit_frame_ring_adv(struct sk_buff
*skb
,
3567 struct igb_ring
*tx_ring
)
3569 struct igb_adapter
*adapter
= netdev_priv(tx_ring
->netdev
);
3571 unsigned int tx_flags
= 0;
3575 union skb_shared_tx
*shtx
= skb_tx(skb
);
3577 /* need: 1 descriptor per page,
3578 * + 2 desc gap to keep tail from touching head,
3579 * + 1 desc for skb->data,
3580 * + 1 desc for context descriptor,
3581 * otherwise try next time */
3582 if (igb_maybe_stop_tx(tx_ring
, skb_shinfo(skb
)->nr_frags
+ 4)) {
3583 /* this is a hard error */
3584 return NETDEV_TX_BUSY
;
3587 if (unlikely(shtx
->hardware
)) {
3588 shtx
->in_progress
= 1;
3589 tx_flags
|= IGB_TX_FLAGS_TSTAMP
;
3592 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3593 tx_flags
|= IGB_TX_FLAGS_VLAN
;
3594 tx_flags
|= (vlan_tx_tag_get(skb
) << IGB_TX_FLAGS_VLAN_SHIFT
);
3597 if (skb
->protocol
== htons(ETH_P_IP
))
3598 tx_flags
|= IGB_TX_FLAGS_IPV4
;
3600 first
= tx_ring
->next_to_use
;
3601 if (skb_is_gso(skb
)) {
3602 tso
= igb_tso_adv(tx_ring
, skb
, tx_flags
, &hdr_len
);
3604 dev_kfree_skb_any(skb
);
3605 return NETDEV_TX_OK
;
3610 tx_flags
|= IGB_TX_FLAGS_TSO
;
3611 else if (igb_tx_csum_adv(tx_ring
, skb
, tx_flags
) &&
3612 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3613 tx_flags
|= IGB_TX_FLAGS_CSUM
;
3616 * count reflects descriptors mapped, if 0 then mapping error
3617 * has occured and we need to rewind the descriptor queue
3619 count
= igb_tx_map_adv(tx_ring
, skb
, first
);
3622 dev_kfree_skb_any(skb
);
3623 tx_ring
->buffer_info
[first
].time_stamp
= 0;
3624 tx_ring
->next_to_use
= first
;
3625 return NETDEV_TX_OK
;
3628 igb_tx_queue_adv(tx_ring
, tx_flags
, count
, skb
->len
, hdr_len
);
3630 /* Make sure there is space in the ring for the next send. */
3631 igb_maybe_stop_tx(tx_ring
, MAX_SKB_FRAGS
+ 4);
3633 return NETDEV_TX_OK
;
3636 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
,
3637 struct net_device
*netdev
)
3639 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3640 struct igb_ring
*tx_ring
;
3643 if (test_bit(__IGB_DOWN
, &adapter
->state
)) {
3644 dev_kfree_skb_any(skb
);
3645 return NETDEV_TX_OK
;
3648 if (skb
->len
<= 0) {
3649 dev_kfree_skb_any(skb
);
3650 return NETDEV_TX_OK
;
3653 r_idx
= skb
->queue_mapping
& (IGB_ABS_MAX_TX_QUEUES
- 1);
3654 tx_ring
= adapter
->multi_tx_table
[r_idx
];
3656 /* This goes back to the question of how to logically map a tx queue
3657 * to a flow. Right now, performance is impacted slightly negatively
3658 * if using multiple tx queues. If the stack breaks away from a
3659 * single qdisc implementation, we can look at this again. */
3660 return igb_xmit_frame_ring_adv(skb
, tx_ring
);
3664 * igb_tx_timeout - Respond to a Tx Hang
3665 * @netdev: network interface device structure
3667 static void igb_tx_timeout(struct net_device
*netdev
)
3669 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3670 struct e1000_hw
*hw
= &adapter
->hw
;
3672 /* Do the reset outside of interrupt context */
3673 adapter
->tx_timeout_count
++;
3674 schedule_work(&adapter
->reset_task
);
3676 (adapter
->eims_enable_mask
& ~adapter
->eims_other
));
3679 static void igb_reset_task(struct work_struct
*work
)
3681 struct igb_adapter
*adapter
;
3682 adapter
= container_of(work
, struct igb_adapter
, reset_task
);
3684 igb_reinit_locked(adapter
);
3688 * igb_get_stats - Get System Network Statistics
3689 * @netdev: network interface device structure
3691 * Returns the address of the device statistics structure.
3692 * The statistics are actually updated from the timer callback.
3694 static struct net_device_stats
*igb_get_stats(struct net_device
*netdev
)
3696 /* only return the current stats */
3697 return &netdev
->stats
;
3701 * igb_change_mtu - Change the Maximum Transfer Unit
3702 * @netdev: network interface device structure
3703 * @new_mtu: new value for maximum frame size
3705 * Returns 0 on success, negative on failure
3707 static int igb_change_mtu(struct net_device
*netdev
, int new_mtu
)
3709 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3710 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3711 u32 rx_buffer_len
, i
;
3713 if ((max_frame
< ETH_ZLEN
+ ETH_FCS_LEN
) ||
3714 (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
3715 dev_err(&adapter
->pdev
->dev
, "Invalid MTU setting\n");
3719 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
3720 dev_err(&adapter
->pdev
->dev
, "MTU > 9216 not supported.\n");
3724 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
3727 /* igb_down has a dependency on max_frame_size */
3728 adapter
->max_frame_size
= max_frame
;
3729 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3730 * means we reserve 2 more, this pushes us to allocate from the next
3732 * i.e. RXBUFFER_2048 --> size-4096 slab
3735 if (max_frame
<= IGB_RXBUFFER_1024
)
3736 rx_buffer_len
= IGB_RXBUFFER_1024
;
3737 else if (max_frame
<= MAXIMUM_ETHERNET_VLAN_SIZE
)
3738 rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3740 rx_buffer_len
= IGB_RXBUFFER_128
;
3742 if (netif_running(netdev
))
3745 dev_info(&adapter
->pdev
->dev
, "changing MTU from %d to %d\n",
3746 netdev
->mtu
, new_mtu
);
3747 netdev
->mtu
= new_mtu
;
3749 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3750 adapter
->rx_ring
[i
].rx_buffer_len
= rx_buffer_len
;
3752 if (netif_running(netdev
))
3757 clear_bit(__IGB_RESETTING
, &adapter
->state
);
3763 * igb_update_stats - Update the board statistics counters
3764 * @adapter: board private structure
3767 void igb_update_stats(struct igb_adapter
*adapter
)
3769 struct net_device
*netdev
= adapter
->netdev
;
3770 struct e1000_hw
*hw
= &adapter
->hw
;
3771 struct pci_dev
*pdev
= adapter
->pdev
;
3774 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3777 * Prevent stats update while adapter is being reset, or if the pci
3778 * connection is down.
3780 if (adapter
->link_speed
== 0)
3782 if (pci_channel_offline(pdev
))
3785 adapter
->stats
.crcerrs
+= rd32(E1000_CRCERRS
);
3786 adapter
->stats
.gprc
+= rd32(E1000_GPRC
);
3787 adapter
->stats
.gorc
+= rd32(E1000_GORCL
);
3788 rd32(E1000_GORCH
); /* clear GORCL */
3789 adapter
->stats
.bprc
+= rd32(E1000_BPRC
);
3790 adapter
->stats
.mprc
+= rd32(E1000_MPRC
);
3791 adapter
->stats
.roc
+= rd32(E1000_ROC
);
3793 adapter
->stats
.prc64
+= rd32(E1000_PRC64
);
3794 adapter
->stats
.prc127
+= rd32(E1000_PRC127
);
3795 adapter
->stats
.prc255
+= rd32(E1000_PRC255
);
3796 adapter
->stats
.prc511
+= rd32(E1000_PRC511
);
3797 adapter
->stats
.prc1023
+= rd32(E1000_PRC1023
);
3798 adapter
->stats
.prc1522
+= rd32(E1000_PRC1522
);
3799 adapter
->stats
.symerrs
+= rd32(E1000_SYMERRS
);
3800 adapter
->stats
.sec
+= rd32(E1000_SEC
);
3802 adapter
->stats
.mpc
+= rd32(E1000_MPC
);
3803 adapter
->stats
.scc
+= rd32(E1000_SCC
);
3804 adapter
->stats
.ecol
+= rd32(E1000_ECOL
);
3805 adapter
->stats
.mcc
+= rd32(E1000_MCC
);
3806 adapter
->stats
.latecol
+= rd32(E1000_LATECOL
);
3807 adapter
->stats
.dc
+= rd32(E1000_DC
);
3808 adapter
->stats
.rlec
+= rd32(E1000_RLEC
);
3809 adapter
->stats
.xonrxc
+= rd32(E1000_XONRXC
);
3810 adapter
->stats
.xontxc
+= rd32(E1000_XONTXC
);
3811 adapter
->stats
.xoffrxc
+= rd32(E1000_XOFFRXC
);
3812 adapter
->stats
.xofftxc
+= rd32(E1000_XOFFTXC
);
3813 adapter
->stats
.fcruc
+= rd32(E1000_FCRUC
);
3814 adapter
->stats
.gptc
+= rd32(E1000_GPTC
);
3815 adapter
->stats
.gotc
+= rd32(E1000_GOTCL
);
3816 rd32(E1000_GOTCH
); /* clear GOTCL */
3817 adapter
->stats
.rnbc
+= rd32(E1000_RNBC
);
3818 adapter
->stats
.ruc
+= rd32(E1000_RUC
);
3819 adapter
->stats
.rfc
+= rd32(E1000_RFC
);
3820 adapter
->stats
.rjc
+= rd32(E1000_RJC
);
3821 adapter
->stats
.tor
+= rd32(E1000_TORH
);
3822 adapter
->stats
.tot
+= rd32(E1000_TOTH
);
3823 adapter
->stats
.tpr
+= rd32(E1000_TPR
);
3825 adapter
->stats
.ptc64
+= rd32(E1000_PTC64
);
3826 adapter
->stats
.ptc127
+= rd32(E1000_PTC127
);
3827 adapter
->stats
.ptc255
+= rd32(E1000_PTC255
);
3828 adapter
->stats
.ptc511
+= rd32(E1000_PTC511
);
3829 adapter
->stats
.ptc1023
+= rd32(E1000_PTC1023
);
3830 adapter
->stats
.ptc1522
+= rd32(E1000_PTC1522
);
3832 adapter
->stats
.mptc
+= rd32(E1000_MPTC
);
3833 adapter
->stats
.bptc
+= rd32(E1000_BPTC
);
3835 /* used for adaptive IFS */
3837 hw
->mac
.tx_packet_delta
= rd32(E1000_TPT
);
3838 adapter
->stats
.tpt
+= hw
->mac
.tx_packet_delta
;
3839 hw
->mac
.collision_delta
= rd32(E1000_COLC
);
3840 adapter
->stats
.colc
+= hw
->mac
.collision_delta
;
3842 adapter
->stats
.algnerrc
+= rd32(E1000_ALGNERRC
);
3843 adapter
->stats
.rxerrc
+= rd32(E1000_RXERRC
);
3844 adapter
->stats
.tncrs
+= rd32(E1000_TNCRS
);
3845 adapter
->stats
.tsctc
+= rd32(E1000_TSCTC
);
3846 adapter
->stats
.tsctfc
+= rd32(E1000_TSCTFC
);
3848 adapter
->stats
.iac
+= rd32(E1000_IAC
);
3849 adapter
->stats
.icrxoc
+= rd32(E1000_ICRXOC
);
3850 adapter
->stats
.icrxptc
+= rd32(E1000_ICRXPTC
);
3851 adapter
->stats
.icrxatc
+= rd32(E1000_ICRXATC
);
3852 adapter
->stats
.ictxptc
+= rd32(E1000_ICTXPTC
);
3853 adapter
->stats
.ictxatc
+= rd32(E1000_ICTXATC
);
3854 adapter
->stats
.ictxqec
+= rd32(E1000_ICTXQEC
);
3855 adapter
->stats
.ictxqmtc
+= rd32(E1000_ICTXQMTC
);
3856 adapter
->stats
.icrxdmtc
+= rd32(E1000_ICRXDMTC
);
3858 /* Fill out the OS statistics structure */
3859 netdev
->stats
.multicast
= adapter
->stats
.mprc
;
3860 netdev
->stats
.collisions
= adapter
->stats
.colc
;
3864 if (hw
->mac
.type
!= e1000_82575
) {
3866 u64 rqdpc_total
= 0;
3868 /* Read out drops stats per RX queue. Notice RQDPC (Receive
3869 * Queue Drop Packet Count) stats only gets incremented, if
3870 * the DROP_EN but it set (in the SRRCTL register for that
3871 * queue). If DROP_EN bit is NOT set, then the some what
3872 * equivalent count is stored in RNBC (not per queue basis).
3873 * Also note the drop count is due to lack of available
3876 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3877 rqdpc_tmp
= rd32(E1000_RQDPC(i
)) & 0xFFF;
3878 adapter
->rx_ring
[i
].rx_stats
.drops
+= rqdpc_tmp
;
3879 rqdpc_total
+= adapter
->rx_ring
[i
].rx_stats
.drops
;
3881 netdev
->stats
.rx_fifo_errors
= rqdpc_total
;
3884 /* Note RNBC (Receive No Buffers Count) is an not an exact
3885 * drop count as the hardware FIFO might save the day. Thats
3886 * one of the reason for saving it in rx_fifo_errors, as its
3887 * potentially not a true drop.
3889 netdev
->stats
.rx_fifo_errors
+= adapter
->stats
.rnbc
;
3891 /* RLEC on some newer hardware can be incorrect so build
3892 * our own version based on RUC and ROC */
3893 netdev
->stats
.rx_errors
= adapter
->stats
.rxerrc
+
3894 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
3895 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
3896 adapter
->stats
.cexterr
;
3897 netdev
->stats
.rx_length_errors
= adapter
->stats
.ruc
+
3899 netdev
->stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
3900 netdev
->stats
.rx_frame_errors
= adapter
->stats
.algnerrc
;
3901 netdev
->stats
.rx_missed_errors
= adapter
->stats
.mpc
;
3904 netdev
->stats
.tx_errors
= adapter
->stats
.ecol
+
3905 adapter
->stats
.latecol
;
3906 netdev
->stats
.tx_aborted_errors
= adapter
->stats
.ecol
;
3907 netdev
->stats
.tx_window_errors
= adapter
->stats
.latecol
;
3908 netdev
->stats
.tx_carrier_errors
= adapter
->stats
.tncrs
;
3910 /* Tx Dropped needs to be maintained elsewhere */
3913 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
3914 if ((adapter
->link_speed
== SPEED_1000
) &&
3915 (!igb_read_phy_reg(hw
, PHY_1000T_STATUS
, &phy_tmp
))) {
3916 phy_tmp
&= PHY_IDLE_ERROR_COUNT_MASK
;
3917 adapter
->phy_stats
.idle_errors
+= phy_tmp
;
3921 /* Management Stats */
3922 adapter
->stats
.mgptc
+= rd32(E1000_MGTPTC
);
3923 adapter
->stats
.mgprc
+= rd32(E1000_MGTPRC
);
3924 adapter
->stats
.mgpdc
+= rd32(E1000_MGTPDC
);
3927 static irqreturn_t
igb_msix_other(int irq
, void *data
)
3929 struct igb_adapter
*adapter
= data
;
3930 struct e1000_hw
*hw
= &adapter
->hw
;
3931 u32 icr
= rd32(E1000_ICR
);
3932 /* reading ICR causes bit 31 of EICR to be cleared */
3934 if (icr
& E1000_ICR_DOUTSYNC
) {
3935 /* HW is reporting DMA is out of sync */
3936 adapter
->stats
.doosync
++;
3939 /* Check for a mailbox event */
3940 if (icr
& E1000_ICR_VMMB
)
3941 igb_msg_task(adapter
);
3943 if (icr
& E1000_ICR_LSC
) {
3944 hw
->mac
.get_link_status
= 1;
3945 /* guard against interrupt when we're going down */
3946 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3947 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
3950 wr32(E1000_IMS
, E1000_IMS_LSC
| E1000_IMS_DOUTSYNC
| E1000_IMS_VMMB
);
3951 wr32(E1000_EIMS
, adapter
->eims_other
);
3956 static void igb_write_itr(struct igb_q_vector
*q_vector
)
3958 u32 itr_val
= q_vector
->itr_val
& 0x7FFC;
3960 if (!q_vector
->set_itr
)
3966 if (q_vector
->itr_shift
)
3967 itr_val
|= itr_val
<< q_vector
->itr_shift
;
3969 itr_val
|= 0x8000000;
3971 writel(itr_val
, q_vector
->itr_register
);
3972 q_vector
->set_itr
= 0;
3975 static irqreturn_t
igb_msix_ring(int irq
, void *data
)
3977 struct igb_q_vector
*q_vector
= data
;
3979 /* Write the ITR value calculated from the previous interrupt. */
3980 igb_write_itr(q_vector
);
3982 napi_schedule(&q_vector
->napi
);
3987 #ifdef CONFIG_IGB_DCA
3988 static void igb_update_dca(struct igb_q_vector
*q_vector
)
3990 struct igb_adapter
*adapter
= q_vector
->adapter
;
3991 struct e1000_hw
*hw
= &adapter
->hw
;
3992 int cpu
= get_cpu();
3994 if (q_vector
->cpu
== cpu
)
3997 if (q_vector
->tx_ring
) {
3998 int q
= q_vector
->tx_ring
->reg_idx
;
3999 u32 dca_txctrl
= rd32(E1000_DCA_TXCTRL(q
));
4000 if (hw
->mac
.type
== e1000_82575
) {
4001 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK
;
4002 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4004 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK_82576
;
4005 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4006 E1000_DCA_TXCTRL_CPUID_SHIFT
;
4008 dca_txctrl
|= E1000_DCA_TXCTRL_DESC_DCA_EN
;
4009 wr32(E1000_DCA_TXCTRL(q
), dca_txctrl
);
4011 if (q_vector
->rx_ring
) {
4012 int q
= q_vector
->rx_ring
->reg_idx
;
4013 u32 dca_rxctrl
= rd32(E1000_DCA_RXCTRL(q
));
4014 if (hw
->mac
.type
== e1000_82575
) {
4015 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK
;
4016 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4018 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK_82576
;
4019 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4020 E1000_DCA_RXCTRL_CPUID_SHIFT
;
4022 dca_rxctrl
|= E1000_DCA_RXCTRL_DESC_DCA_EN
;
4023 dca_rxctrl
|= E1000_DCA_RXCTRL_HEAD_DCA_EN
;
4024 dca_rxctrl
|= E1000_DCA_RXCTRL_DATA_DCA_EN
;
4025 wr32(E1000_DCA_RXCTRL(q
), dca_rxctrl
);
4027 q_vector
->cpu
= cpu
;
4032 static void igb_setup_dca(struct igb_adapter
*adapter
)
4034 struct e1000_hw
*hw
= &adapter
->hw
;
4037 if (!(adapter
->flags
& IGB_FLAG_DCA_ENABLED
))
4040 /* Always use CB2 mode, difference is masked in the CB driver. */
4041 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_CB2
);
4043 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
4044 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
4046 igb_update_dca(q_vector
);
4050 static int __igb_notify_dca(struct device
*dev
, void *data
)
4052 struct net_device
*netdev
= dev_get_drvdata(dev
);
4053 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4054 struct e1000_hw
*hw
= &adapter
->hw
;
4055 unsigned long event
= *(unsigned long *)data
;
4058 case DCA_PROVIDER_ADD
:
4059 /* if already enabled, don't do it again */
4060 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
4062 /* Always use CB2 mode, difference is masked
4063 * in the CB driver. */
4064 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_CB2
);
4065 if (dca_add_requester(dev
) == 0) {
4066 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
4067 dev_info(&adapter
->pdev
->dev
, "DCA enabled\n");
4068 igb_setup_dca(adapter
);
4071 /* Fall Through since DCA is disabled. */
4072 case DCA_PROVIDER_REMOVE
:
4073 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
4074 /* without this a class_device is left
4075 * hanging around in the sysfs model */
4076 dca_remove_requester(dev
);
4077 dev_info(&adapter
->pdev
->dev
, "DCA disabled\n");
4078 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
4079 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
4087 static int igb_notify_dca(struct notifier_block
*nb
, unsigned long event
,
4092 ret_val
= driver_for_each_device(&igb_driver
.driver
, NULL
, &event
,
4095 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
4097 #endif /* CONFIG_IGB_DCA */
4099 static void igb_ping_all_vfs(struct igb_adapter
*adapter
)
4101 struct e1000_hw
*hw
= &adapter
->hw
;
4105 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++) {
4106 ping
= E1000_PF_CONTROL_MSG
;
4107 if (adapter
->vf_data
[i
].flags
& IGB_VF_FLAG_CTS
)
4108 ping
|= E1000_VT_MSGTYPE_CTS
;
4109 igb_write_mbx(hw
, &ping
, 1, i
);
4113 static int igb_set_vf_promisc(struct igb_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
4115 struct e1000_hw
*hw
= &adapter
->hw
;
4116 u32 vmolr
= rd32(E1000_VMOLR(vf
));
4117 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4119 vf_data
->flags
|= ~(IGB_VF_FLAG_UNI_PROMISC
|
4120 IGB_VF_FLAG_MULTI_PROMISC
);
4121 vmolr
&= ~(E1000_VMOLR_ROPE
| E1000_VMOLR_ROMPE
| E1000_VMOLR_MPME
);
4123 if (*msgbuf
& E1000_VF_SET_PROMISC_MULTICAST
) {
4124 vmolr
|= E1000_VMOLR_MPME
;
4125 *msgbuf
&= ~E1000_VF_SET_PROMISC_MULTICAST
;
4128 * if we have hashes and we are clearing a multicast promisc
4129 * flag we need to write the hashes to the MTA as this step
4130 * was previously skipped
4132 if (vf_data
->num_vf_mc_hashes
> 30) {
4133 vmolr
|= E1000_VMOLR_MPME
;
4134 } else if (vf_data
->num_vf_mc_hashes
) {
4136 vmolr
|= E1000_VMOLR_ROMPE
;
4137 for (j
= 0; j
< vf_data
->num_vf_mc_hashes
; j
++)
4138 igb_mta_set(hw
, vf_data
->vf_mc_hashes
[j
]);
4142 wr32(E1000_VMOLR(vf
), vmolr
);
4144 /* there are flags left unprocessed, likely not supported */
4145 if (*msgbuf
& E1000_VT_MSGINFO_MASK
)
4152 static int igb_set_vf_multicasts(struct igb_adapter
*adapter
,
4153 u32
*msgbuf
, u32 vf
)
4155 int n
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
4156 u16
*hash_list
= (u16
*)&msgbuf
[1];
4157 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4160 /* salt away the number of multicast addresses assigned
4161 * to this VF for later use to restore when the PF multi cast
4164 vf_data
->num_vf_mc_hashes
= n
;
4166 /* only up to 30 hash values supported */
4170 /* store the hashes for later use */
4171 for (i
= 0; i
< n
; i
++)
4172 vf_data
->vf_mc_hashes
[i
] = hash_list
[i
];
4174 /* Flush and reset the mta with the new values */
4175 igb_set_rx_mode(adapter
->netdev
);
4180 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
)
4182 struct e1000_hw
*hw
= &adapter
->hw
;
4183 struct vf_data_storage
*vf_data
;
4186 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
4187 u32 vmolr
= rd32(E1000_VMOLR(i
));
4188 vmolr
&= ~(E1000_VMOLR_ROMPE
| E1000_VMOLR_MPME
);
4190 vf_data
= &adapter
->vf_data
[i
];
4192 if ((vf_data
->num_vf_mc_hashes
> 30) ||
4193 (vf_data
->flags
& IGB_VF_FLAG_MULTI_PROMISC
)) {
4194 vmolr
|= E1000_VMOLR_MPME
;
4195 } else if (vf_data
->num_vf_mc_hashes
) {
4196 vmolr
|= E1000_VMOLR_ROMPE
;
4197 for (j
= 0; j
< vf_data
->num_vf_mc_hashes
; j
++)
4198 igb_mta_set(hw
, vf_data
->vf_mc_hashes
[j
]);
4200 wr32(E1000_VMOLR(i
), vmolr
);
4204 static void igb_clear_vf_vfta(struct igb_adapter
*adapter
, u32 vf
)
4206 struct e1000_hw
*hw
= &adapter
->hw
;
4207 u32 pool_mask
, reg
, vid
;
4210 pool_mask
= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
4212 /* Find the vlan filter for this id */
4213 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4214 reg
= rd32(E1000_VLVF(i
));
4216 /* remove the vf from the pool */
4219 /* if pool is empty then remove entry from vfta */
4220 if (!(reg
& E1000_VLVF_POOLSEL_MASK
) &&
4221 (reg
& E1000_VLVF_VLANID_ENABLE
)) {
4223 vid
= reg
& E1000_VLVF_VLANID_MASK
;
4224 igb_vfta_set(hw
, vid
, false);
4227 wr32(E1000_VLVF(i
), reg
);
4230 adapter
->vf_data
[vf
].vlans_enabled
= 0;
4233 static s32
igb_vlvf_set(struct igb_adapter
*adapter
, u32 vid
, bool add
, u32 vf
)
4235 struct e1000_hw
*hw
= &adapter
->hw
;
4238 /* The vlvf table only exists on 82576 hardware and newer */
4239 if (hw
->mac
.type
< e1000_82576
)
4242 /* we only need to do this if VMDq is enabled */
4243 if (!adapter
->vfs_allocated_count
)
4246 /* Find the vlan filter for this id */
4247 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4248 reg
= rd32(E1000_VLVF(i
));
4249 if ((reg
& E1000_VLVF_VLANID_ENABLE
) &&
4250 vid
== (reg
& E1000_VLVF_VLANID_MASK
))
4255 if (i
== E1000_VLVF_ARRAY_SIZE
) {
4256 /* Did not find a matching VLAN ID entry that was
4257 * enabled. Search for a free filter entry, i.e.
4258 * one without the enable bit set
4260 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4261 reg
= rd32(E1000_VLVF(i
));
4262 if (!(reg
& E1000_VLVF_VLANID_ENABLE
))
4266 if (i
< E1000_VLVF_ARRAY_SIZE
) {
4267 /* Found an enabled/available entry */
4268 reg
|= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
4270 /* if !enabled we need to set this up in vfta */
4271 if (!(reg
& E1000_VLVF_VLANID_ENABLE
)) {
4272 /* add VID to filter table */
4273 igb_vfta_set(hw
, vid
, true);
4274 reg
|= E1000_VLVF_VLANID_ENABLE
;
4276 reg
&= ~E1000_VLVF_VLANID_MASK
;
4278 wr32(E1000_VLVF(i
), reg
);
4280 /* do not modify RLPML for PF devices */
4281 if (vf
>= adapter
->vfs_allocated_count
)
4284 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
4286 reg
= rd32(E1000_VMOLR(vf
));
4287 size
= reg
& E1000_VMOLR_RLPML_MASK
;
4289 reg
&= ~E1000_VMOLR_RLPML_MASK
;
4291 wr32(E1000_VMOLR(vf
), reg
);
4294 adapter
->vf_data
[vf
].vlans_enabled
++;
4298 if (i
< E1000_VLVF_ARRAY_SIZE
) {
4299 /* remove vf from the pool */
4300 reg
&= ~(1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
));
4301 /* if pool is empty then remove entry from vfta */
4302 if (!(reg
& E1000_VLVF_POOLSEL_MASK
)) {
4304 igb_vfta_set(hw
, vid
, false);
4306 wr32(E1000_VLVF(i
), reg
);
4308 /* do not modify RLPML for PF devices */
4309 if (vf
>= adapter
->vfs_allocated_count
)
4312 adapter
->vf_data
[vf
].vlans_enabled
--;
4313 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
4315 reg
= rd32(E1000_VMOLR(vf
));
4316 size
= reg
& E1000_VMOLR_RLPML_MASK
;
4318 reg
&= ~E1000_VMOLR_RLPML_MASK
;
4320 wr32(E1000_VMOLR(vf
), reg
);
4328 static int igb_set_vf_vlan(struct igb_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
4330 int add
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
4331 int vid
= (msgbuf
[1] & E1000_VLVF_VLANID_MASK
);
4333 return igb_vlvf_set(adapter
, vid
, add
, vf
);
4336 static inline void igb_vf_reset(struct igb_adapter
*adapter
, u32 vf
)
4338 /* clear all flags */
4339 adapter
->vf_data
[vf
].flags
= 0;
4340 adapter
->vf_data
[vf
].last_nack
= jiffies
;
4342 /* reset offloads to defaults */
4343 igb_set_vmolr(adapter
, vf
);
4345 /* reset vlans for device */
4346 igb_clear_vf_vfta(adapter
, vf
);
4348 /* reset multicast table array for vf */
4349 adapter
->vf_data
[vf
].num_vf_mc_hashes
= 0;
4351 /* Flush and reset the mta with the new values */
4352 igb_set_rx_mode(adapter
->netdev
);
4355 static void igb_vf_reset_event(struct igb_adapter
*adapter
, u32 vf
)
4357 unsigned char *vf_mac
= adapter
->vf_data
[vf
].vf_mac_addresses
;
4359 /* generate a new mac address as we were hotplug removed/added */
4360 random_ether_addr(vf_mac
);
4362 /* process remaining reset events */
4363 igb_vf_reset(adapter
, vf
);
4366 static void igb_vf_reset_msg(struct igb_adapter
*adapter
, u32 vf
)
4368 struct e1000_hw
*hw
= &adapter
->hw
;
4369 unsigned char *vf_mac
= adapter
->vf_data
[vf
].vf_mac_addresses
;
4370 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
4372 u8
*addr
= (u8
*)(&msgbuf
[1]);
4374 /* process all the same items cleared in a function level reset */
4375 igb_vf_reset(adapter
, vf
);
4377 /* set vf mac address */
4378 igb_rar_set_qsel(adapter
, vf_mac
, rar_entry
, vf
);
4380 /* enable transmit and receive for vf */
4381 reg
= rd32(E1000_VFTE
);
4382 wr32(E1000_VFTE
, reg
| (1 << vf
));
4383 reg
= rd32(E1000_VFRE
);
4384 wr32(E1000_VFRE
, reg
| (1 << vf
));
4386 adapter
->vf_data
[vf
].flags
= IGB_VF_FLAG_CTS
;
4388 /* reply to reset with ack and vf mac address */
4389 msgbuf
[0] = E1000_VF_RESET
| E1000_VT_MSGTYPE_ACK
;
4390 memcpy(addr
, vf_mac
, 6);
4391 igb_write_mbx(hw
, msgbuf
, 3, vf
);
4394 static int igb_set_vf_mac_addr(struct igb_adapter
*adapter
, u32
*msg
, int vf
)
4396 unsigned char *addr
= (char *)&msg
[1];
4399 if (is_valid_ether_addr(addr
))
4400 err
= igb_set_vf_mac(adapter
, vf
, addr
);
4405 static void igb_rcv_ack_from_vf(struct igb_adapter
*adapter
, u32 vf
)
4407 struct e1000_hw
*hw
= &adapter
->hw
;
4408 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4409 u32 msg
= E1000_VT_MSGTYPE_NACK
;
4411 /* if device isn't clear to send it shouldn't be reading either */
4412 if (!(vf_data
->flags
& IGB_VF_FLAG_CTS
) &&
4413 time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
))) {
4414 igb_write_mbx(hw
, &msg
, 1, vf
);
4415 vf_data
->last_nack
= jiffies
;
4419 static void igb_rcv_msg_from_vf(struct igb_adapter
*adapter
, u32 vf
)
4421 struct pci_dev
*pdev
= adapter
->pdev
;
4422 u32 msgbuf
[E1000_VFMAILBOX_SIZE
];
4423 struct e1000_hw
*hw
= &adapter
->hw
;
4424 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4427 retval
= igb_read_mbx(hw
, msgbuf
, E1000_VFMAILBOX_SIZE
, vf
);
4430 dev_err(&pdev
->dev
, "Error receiving message from VF\n");
4432 /* this is a message we already processed, do nothing */
4433 if (msgbuf
[0] & (E1000_VT_MSGTYPE_ACK
| E1000_VT_MSGTYPE_NACK
))
4437 * until the vf completes a reset it should not be
4438 * allowed to start any configuration.
4441 if (msgbuf
[0] == E1000_VF_RESET
) {
4442 igb_vf_reset_msg(adapter
, vf
);
4446 if (!(vf_data
->flags
& IGB_VF_FLAG_CTS
)) {
4447 msgbuf
[0] = E1000_VT_MSGTYPE_NACK
;
4448 if (time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
))) {
4449 igb_write_mbx(hw
, msgbuf
, 1, vf
);
4450 vf_data
->last_nack
= jiffies
;
4455 switch ((msgbuf
[0] & 0xFFFF)) {
4456 case E1000_VF_SET_MAC_ADDR
:
4457 retval
= igb_set_vf_mac_addr(adapter
, msgbuf
, vf
);
4459 case E1000_VF_SET_PROMISC
:
4460 retval
= igb_set_vf_promisc(adapter
, msgbuf
, vf
);
4462 case E1000_VF_SET_MULTICAST
:
4463 retval
= igb_set_vf_multicasts(adapter
, msgbuf
, vf
);
4465 case E1000_VF_SET_LPE
:
4466 retval
= igb_set_vf_rlpml(adapter
, msgbuf
[1], vf
);
4468 case E1000_VF_SET_VLAN
:
4469 retval
= igb_set_vf_vlan(adapter
, msgbuf
, vf
);
4472 dev_err(&adapter
->pdev
->dev
, "Unhandled Msg %08x\n", msgbuf
[0]);
4477 /* notify the VF of the results of what it sent us */
4479 msgbuf
[0] |= E1000_VT_MSGTYPE_NACK
;
4481 msgbuf
[0] |= E1000_VT_MSGTYPE_ACK
;
4483 msgbuf
[0] |= E1000_VT_MSGTYPE_CTS
;
4485 igb_write_mbx(hw
, msgbuf
, 1, vf
);
4488 static void igb_msg_task(struct igb_adapter
*adapter
)
4490 struct e1000_hw
*hw
= &adapter
->hw
;
4493 for (vf
= 0; vf
< adapter
->vfs_allocated_count
; vf
++) {
4494 /* process any reset requests */
4495 if (!igb_check_for_rst(hw
, vf
))
4496 igb_vf_reset_event(adapter
, vf
);
4498 /* process any messages pending */
4499 if (!igb_check_for_msg(hw
, vf
))
4500 igb_rcv_msg_from_vf(adapter
, vf
);
4502 /* process any acks */
4503 if (!igb_check_for_ack(hw
, vf
))
4504 igb_rcv_ack_from_vf(adapter
, vf
);
4509 * igb_set_uta - Set unicast filter table address
4510 * @adapter: board private structure
4512 * The unicast table address is a register array of 32-bit registers.
4513 * The table is meant to be used in a way similar to how the MTA is used
4514 * however due to certain limitations in the hardware it is necessary to
4515 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4516 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4518 static void igb_set_uta(struct igb_adapter
*adapter
)
4520 struct e1000_hw
*hw
= &adapter
->hw
;
4523 /* The UTA table only exists on 82576 hardware and newer */
4524 if (hw
->mac
.type
< e1000_82576
)
4527 /* we only need to do this if VMDq is enabled */
4528 if (!adapter
->vfs_allocated_count
)
4531 for (i
= 0; i
< hw
->mac
.uta_reg_count
; i
++)
4532 array_wr32(E1000_UTA
, i
, ~0);
4536 * igb_intr_msi - Interrupt Handler
4537 * @irq: interrupt number
4538 * @data: pointer to a network interface device structure
4540 static irqreturn_t
igb_intr_msi(int irq
, void *data
)
4542 struct igb_adapter
*adapter
= data
;
4543 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
4544 struct e1000_hw
*hw
= &adapter
->hw
;
4545 /* read ICR disables interrupts using IAM */
4546 u32 icr
= rd32(E1000_ICR
);
4548 igb_write_itr(q_vector
);
4550 if (icr
& E1000_ICR_DOUTSYNC
) {
4551 /* HW is reporting DMA is out of sync */
4552 adapter
->stats
.doosync
++;
4555 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
4556 hw
->mac
.get_link_status
= 1;
4557 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4558 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
4561 napi_schedule(&q_vector
->napi
);
4567 * igb_intr - Legacy Interrupt Handler
4568 * @irq: interrupt number
4569 * @data: pointer to a network interface device structure
4571 static irqreturn_t
igb_intr(int irq
, void *data
)
4573 struct igb_adapter
*adapter
= data
;
4574 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
4575 struct e1000_hw
*hw
= &adapter
->hw
;
4576 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4577 * need for the IMC write */
4578 u32 icr
= rd32(E1000_ICR
);
4580 return IRQ_NONE
; /* Not our interrupt */
4582 igb_write_itr(q_vector
);
4584 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4585 * not set, then the adapter didn't send an interrupt */
4586 if (!(icr
& E1000_ICR_INT_ASSERTED
))
4589 if (icr
& E1000_ICR_DOUTSYNC
) {
4590 /* HW is reporting DMA is out of sync */
4591 adapter
->stats
.doosync
++;
4594 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
4595 hw
->mac
.get_link_status
= 1;
4596 /* guard against interrupt when we're going down */
4597 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4598 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
4601 napi_schedule(&q_vector
->napi
);
4606 static inline void igb_ring_irq_enable(struct igb_q_vector
*q_vector
)
4608 struct igb_adapter
*adapter
= q_vector
->adapter
;
4609 struct e1000_hw
*hw
= &adapter
->hw
;
4611 if ((q_vector
->rx_ring
&& (adapter
->rx_itr_setting
& 3)) ||
4612 (!q_vector
->rx_ring
&& (adapter
->tx_itr_setting
& 3))) {
4613 if (!adapter
->msix_entries
)
4614 igb_set_itr(adapter
);
4616 igb_update_ring_itr(q_vector
);
4619 if (!test_bit(__IGB_DOWN
, &adapter
->state
)) {
4620 if (adapter
->msix_entries
)
4621 wr32(E1000_EIMS
, q_vector
->eims_value
);
4623 igb_irq_enable(adapter
);
4628 * igb_poll - NAPI Rx polling callback
4629 * @napi: napi polling structure
4630 * @budget: count of how many packets we should handle
4632 static int igb_poll(struct napi_struct
*napi
, int budget
)
4634 struct igb_q_vector
*q_vector
= container_of(napi
,
4635 struct igb_q_vector
,
4637 int tx_clean_complete
= 1, work_done
= 0;
4639 #ifdef CONFIG_IGB_DCA
4640 if (q_vector
->adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
4641 igb_update_dca(q_vector
);
4643 if (q_vector
->tx_ring
)
4644 tx_clean_complete
= igb_clean_tx_irq(q_vector
);
4646 if (q_vector
->rx_ring
)
4647 igb_clean_rx_irq_adv(q_vector
, &work_done
, budget
);
4649 if (!tx_clean_complete
)
4652 /* If not enough Rx work done, exit the polling mode */
4653 if (work_done
< budget
) {
4654 napi_complete(napi
);
4655 igb_ring_irq_enable(q_vector
);
4662 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
4663 * @adapter: board private structure
4664 * @shhwtstamps: timestamp structure to update
4665 * @regval: unsigned 64bit system time value.
4667 * We need to convert the system time value stored in the RX/TXSTMP registers
4668 * into a hwtstamp which can be used by the upper level timestamping functions
4670 static void igb_systim_to_hwtstamp(struct igb_adapter
*adapter
,
4671 struct skb_shared_hwtstamps
*shhwtstamps
,
4676 ns
= timecounter_cyc2time(&adapter
->clock
, regval
);
4677 timecompare_update(&adapter
->compare
, ns
);
4678 memset(shhwtstamps
, 0, sizeof(struct skb_shared_hwtstamps
));
4679 shhwtstamps
->hwtstamp
= ns_to_ktime(ns
);
4680 shhwtstamps
->syststamp
= timecompare_transform(&adapter
->compare
, ns
);
4684 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4685 * @q_vector: pointer to q_vector containing needed info
4686 * @skb: packet that was just sent
4688 * If we were asked to do hardware stamping and such a time stamp is
4689 * available, then it must have been for this skb here because we only
4690 * allow only one such packet into the queue.
4692 static void igb_tx_hwtstamp(struct igb_q_vector
*q_vector
, struct sk_buff
*skb
)
4694 struct igb_adapter
*adapter
= q_vector
->adapter
;
4695 union skb_shared_tx
*shtx
= skb_tx(skb
);
4696 struct e1000_hw
*hw
= &adapter
->hw
;
4697 struct skb_shared_hwtstamps shhwtstamps
;
4700 /* if skb does not support hw timestamp or TX stamp not valid exit */
4701 if (likely(!shtx
->hardware
) ||
4702 !(rd32(E1000_TSYNCTXCTL
) & E1000_TSYNCTXCTL_VALID
))
4705 regval
= rd32(E1000_TXSTMPL
);
4706 regval
|= (u64
)rd32(E1000_TXSTMPH
) << 32;
4708 igb_systim_to_hwtstamp(adapter
, &shhwtstamps
, regval
);
4709 skb_tstamp_tx(skb
, &shhwtstamps
);
4713 * igb_clean_tx_irq - Reclaim resources after transmit completes
4714 * @q_vector: pointer to q_vector containing needed info
4715 * returns true if ring is completely cleaned
4717 static bool igb_clean_tx_irq(struct igb_q_vector
*q_vector
)
4719 struct igb_adapter
*adapter
= q_vector
->adapter
;
4720 struct igb_ring
*tx_ring
= q_vector
->tx_ring
;
4721 struct net_device
*netdev
= tx_ring
->netdev
;
4722 struct e1000_hw
*hw
= &adapter
->hw
;
4723 struct igb_buffer
*buffer_info
;
4724 struct sk_buff
*skb
;
4725 union e1000_adv_tx_desc
*tx_desc
, *eop_desc
;
4726 unsigned int total_bytes
= 0, total_packets
= 0;
4727 unsigned int i
, eop
, count
= 0;
4728 bool cleaned
= false;
4730 i
= tx_ring
->next_to_clean
;
4731 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
4732 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
4734 while ((eop_desc
->wb
.status
& cpu_to_le32(E1000_TXD_STAT_DD
)) &&
4735 (count
< tx_ring
->count
)) {
4736 for (cleaned
= false; !cleaned
; count
++) {
4737 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
4738 buffer_info
= &tx_ring
->buffer_info
[i
];
4739 cleaned
= (i
== eop
);
4740 skb
= buffer_info
->skb
;
4743 unsigned int segs
, bytecount
;
4744 /* gso_segs is currently only valid for tcp */
4745 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
4746 /* multiply data chunks by size of headers */
4747 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
4749 total_packets
+= segs
;
4750 total_bytes
+= bytecount
;
4752 igb_tx_hwtstamp(q_vector
, skb
);
4755 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
4756 tx_desc
->wb
.status
= 0;
4759 if (i
== tx_ring
->count
)
4762 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
4763 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
4766 tx_ring
->next_to_clean
= i
;
4768 if (unlikely(count
&&
4769 netif_carrier_ok(netdev
) &&
4770 igb_desc_unused(tx_ring
) >= IGB_TX_QUEUE_WAKE
)) {
4771 /* Make sure that anybody stopping the queue after this
4772 * sees the new next_to_clean.
4775 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
4776 !(test_bit(__IGB_DOWN
, &adapter
->state
))) {
4777 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
4778 tx_ring
->tx_stats
.restart_queue
++;
4782 if (tx_ring
->detect_tx_hung
) {
4783 /* Detect a transmit hang in hardware, this serializes the
4784 * check with the clearing of time_stamp and movement of i */
4785 tx_ring
->detect_tx_hung
= false;
4786 if (tx_ring
->buffer_info
[i
].time_stamp
&&
4787 time_after(jiffies
, tx_ring
->buffer_info
[i
].time_stamp
+
4788 (adapter
->tx_timeout_factor
* HZ
))
4789 && !(rd32(E1000_STATUS
) &
4790 E1000_STATUS_TXOFF
)) {
4792 /* detected Tx unit hang */
4793 dev_err(&tx_ring
->pdev
->dev
,
4794 "Detected Tx Unit Hang\n"
4798 " next_to_use <%x>\n"
4799 " next_to_clean <%x>\n"
4800 "buffer_info[next_to_clean]\n"
4801 " time_stamp <%lx>\n"
4802 " next_to_watch <%x>\n"
4804 " desc.status <%x>\n",
4805 tx_ring
->queue_index
,
4806 readl(tx_ring
->head
),
4807 readl(tx_ring
->tail
),
4808 tx_ring
->next_to_use
,
4809 tx_ring
->next_to_clean
,
4810 tx_ring
->buffer_info
[i
].time_stamp
,
4813 eop_desc
->wb
.status
);
4814 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
4817 tx_ring
->total_bytes
+= total_bytes
;
4818 tx_ring
->total_packets
+= total_packets
;
4819 tx_ring
->tx_stats
.bytes
+= total_bytes
;
4820 tx_ring
->tx_stats
.packets
+= total_packets
;
4821 netdev
->stats
.tx_bytes
+= total_bytes
;
4822 netdev
->stats
.tx_packets
+= total_packets
;
4823 return (count
< tx_ring
->count
);
4827 * igb_receive_skb - helper function to handle rx indications
4828 * @q_vector: structure containing interrupt and ring information
4829 * @skb: packet to send up
4830 * @vlan_tag: vlan tag for packet
4832 static void igb_receive_skb(struct igb_q_vector
*q_vector
,
4833 struct sk_buff
*skb
,
4836 struct igb_adapter
*adapter
= q_vector
->adapter
;
4839 vlan_gro_receive(&q_vector
->napi
, adapter
->vlgrp
,
4842 napi_gro_receive(&q_vector
->napi
, skb
);
4845 static inline void igb_rx_checksum_adv(struct igb_ring
*ring
,
4846 u32 status_err
, struct sk_buff
*skb
)
4848 skb
->ip_summed
= CHECKSUM_NONE
;
4850 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4851 if (!(ring
->flags
& IGB_RING_FLAG_RX_CSUM
) ||
4852 (status_err
& E1000_RXD_STAT_IXSM
))
4855 /* TCP/UDP checksum error bit is set */
4857 (E1000_RXDEXT_STATERR_TCPE
| E1000_RXDEXT_STATERR_IPE
)) {
4859 * work around errata with sctp packets where the TCPE aka
4860 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4861 * packets, (aka let the stack check the crc32c)
4863 if ((skb
->len
== 60) &&
4864 (ring
->flags
& IGB_RING_FLAG_RX_SCTP_CSUM
))
4865 ring
->rx_stats
.csum_err
++;
4867 /* let the stack verify checksum errors */
4870 /* It must be a TCP or UDP packet with a valid checksum */
4871 if (status_err
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
))
4872 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4874 dev_dbg(&ring
->pdev
->dev
, "cksum success: bits %08X\n", status_err
);
4877 static inline void igb_rx_hwtstamp(struct igb_q_vector
*q_vector
, u32 staterr
,
4878 struct sk_buff
*skb
)
4880 struct igb_adapter
*adapter
= q_vector
->adapter
;
4881 struct e1000_hw
*hw
= &adapter
->hw
;
4885 * If this bit is set, then the RX registers contain the time stamp. No
4886 * other packet will be time stamped until we read these registers, so
4887 * read the registers to make them available again. Because only one
4888 * packet can be time stamped at a time, we know that the register
4889 * values must belong to this one here and therefore we don't need to
4890 * compare any of the additional attributes stored for it.
4892 * If nothing went wrong, then it should have a skb_shared_tx that we
4893 * can turn into a skb_shared_hwtstamps.
4895 if (likely(!(staterr
& E1000_RXDADV_STAT_TS
)))
4897 if (!(rd32(E1000_TSYNCRXCTL
) & E1000_TSYNCRXCTL_VALID
))
4900 regval
= rd32(E1000_RXSTMPL
);
4901 regval
|= (u64
)rd32(E1000_RXSTMPH
) << 32;
4903 igb_systim_to_hwtstamp(adapter
, skb_hwtstamps(skb
), regval
);
4905 static inline u16
igb_get_hlen(struct igb_ring
*rx_ring
,
4906 union e1000_adv_rx_desc
*rx_desc
)
4908 /* HW will not DMA in data larger than the given buffer, even if it
4909 * parses the (NFS, of course) header to be larger. In that case, it
4910 * fills the header buffer and spills the rest into the page.
4912 u16 hlen
= (le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hdr_info
) &
4913 E1000_RXDADV_HDRBUFLEN_MASK
) >> E1000_RXDADV_HDRBUFLEN_SHIFT
;
4914 if (hlen
> rx_ring
->rx_buffer_len
)
4915 hlen
= rx_ring
->rx_buffer_len
;
4919 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*q_vector
,
4920 int *work_done
, int budget
)
4922 struct igb_ring
*rx_ring
= q_vector
->rx_ring
;
4923 struct net_device
*netdev
= rx_ring
->netdev
;
4924 struct pci_dev
*pdev
= rx_ring
->pdev
;
4925 union e1000_adv_rx_desc
*rx_desc
, *next_rxd
;
4926 struct igb_buffer
*buffer_info
, *next_buffer
;
4927 struct sk_buff
*skb
;
4928 bool cleaned
= false;
4929 int cleaned_count
= 0;
4930 unsigned int total_bytes
= 0, total_packets
= 0;
4936 i
= rx_ring
->next_to_clean
;
4937 buffer_info
= &rx_ring
->buffer_info
[i
];
4938 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
4939 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
4941 while (staterr
& E1000_RXD_STAT_DD
) {
4942 if (*work_done
>= budget
)
4946 skb
= buffer_info
->skb
;
4947 prefetch(skb
->data
- NET_IP_ALIGN
);
4948 buffer_info
->skb
= NULL
;
4951 if (i
== rx_ring
->count
)
4953 next_rxd
= E1000_RX_DESC_ADV(*rx_ring
, i
);
4955 next_buffer
= &rx_ring
->buffer_info
[i
];
4957 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
4961 if (buffer_info
->dma
) {
4962 pci_unmap_single(pdev
, buffer_info
->dma
,
4963 rx_ring
->rx_buffer_len
,
4964 PCI_DMA_FROMDEVICE
);
4965 buffer_info
->dma
= 0;
4966 if (rx_ring
->rx_buffer_len
>= IGB_RXBUFFER_1024
) {
4967 skb_put(skb
, length
);
4970 skb_put(skb
, igb_get_hlen(rx_ring
, rx_desc
));
4974 pci_unmap_page(pdev
, buffer_info
->page_dma
,
4975 PAGE_SIZE
/ 2, PCI_DMA_FROMDEVICE
);
4976 buffer_info
->page_dma
= 0;
4978 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
++,
4980 buffer_info
->page_offset
,
4983 if (page_count(buffer_info
->page
) != 1)
4984 buffer_info
->page
= NULL
;
4986 get_page(buffer_info
->page
);
4989 skb
->data_len
+= length
;
4991 skb
->truesize
+= length
;
4994 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
4995 buffer_info
->skb
= next_buffer
->skb
;
4996 buffer_info
->dma
= next_buffer
->dma
;
4997 next_buffer
->skb
= skb
;
4998 next_buffer
->dma
= 0;
5002 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
5003 dev_kfree_skb_irq(skb
);
5007 igb_rx_hwtstamp(q_vector
, staterr
, skb
);
5008 total_bytes
+= skb
->len
;
5011 igb_rx_checksum_adv(rx_ring
, staterr
, skb
);
5013 skb
->protocol
= eth_type_trans(skb
, netdev
);
5014 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
5016 vlan_tag
= ((staterr
& E1000_RXD_STAT_VP
) ?
5017 le16_to_cpu(rx_desc
->wb
.upper
.vlan
) : 0);
5019 igb_receive_skb(q_vector
, skb
, vlan_tag
);
5022 rx_desc
->wb
.upper
.status_error
= 0;
5024 /* return some buffers to hardware, one at a time is too slow */
5025 if (cleaned_count
>= IGB_RX_BUFFER_WRITE
) {
5026 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5030 /* use prefetched values */
5032 buffer_info
= next_buffer
;
5033 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
5036 rx_ring
->next_to_clean
= i
;
5037 cleaned_count
= igb_desc_unused(rx_ring
);
5040 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5042 rx_ring
->total_packets
+= total_packets
;
5043 rx_ring
->total_bytes
+= total_bytes
;
5044 rx_ring
->rx_stats
.packets
+= total_packets
;
5045 rx_ring
->rx_stats
.bytes
+= total_bytes
;
5046 netdev
->stats
.rx_bytes
+= total_bytes
;
5047 netdev
->stats
.rx_packets
+= total_packets
;
5052 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5053 * @adapter: address of board private structure
5055 void igb_alloc_rx_buffers_adv(struct igb_ring
*rx_ring
, int cleaned_count
)
5057 struct net_device
*netdev
= rx_ring
->netdev
;
5058 union e1000_adv_rx_desc
*rx_desc
;
5059 struct igb_buffer
*buffer_info
;
5060 struct sk_buff
*skb
;
5064 i
= rx_ring
->next_to_use
;
5065 buffer_info
= &rx_ring
->buffer_info
[i
];
5067 bufsz
= rx_ring
->rx_buffer_len
;
5069 while (cleaned_count
--) {
5070 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5072 if ((bufsz
< IGB_RXBUFFER_1024
) && !buffer_info
->page_dma
) {
5073 if (!buffer_info
->page
) {
5074 buffer_info
->page
= alloc_page(GFP_ATOMIC
);
5075 if (!buffer_info
->page
) {
5076 rx_ring
->rx_stats
.alloc_failed
++;
5079 buffer_info
->page_offset
= 0;
5081 buffer_info
->page_offset
^= PAGE_SIZE
/ 2;
5083 buffer_info
->page_dma
=
5084 pci_map_page(rx_ring
->pdev
, buffer_info
->page
,
5085 buffer_info
->page_offset
,
5087 PCI_DMA_FROMDEVICE
);
5090 if (!buffer_info
->skb
) {
5091 skb
= netdev_alloc_skb_ip_align(netdev
, bufsz
);
5093 rx_ring
->rx_stats
.alloc_failed
++;
5097 buffer_info
->skb
= skb
;
5098 buffer_info
->dma
= pci_map_single(rx_ring
->pdev
,
5101 PCI_DMA_FROMDEVICE
);
5103 /* Refresh the desc even if buffer_addrs didn't change because
5104 * each write-back erases this info. */
5105 if (bufsz
< IGB_RXBUFFER_1024
) {
5106 rx_desc
->read
.pkt_addr
=
5107 cpu_to_le64(buffer_info
->page_dma
);
5108 rx_desc
->read
.hdr_addr
= cpu_to_le64(buffer_info
->dma
);
5110 rx_desc
->read
.pkt_addr
=
5111 cpu_to_le64(buffer_info
->dma
);
5112 rx_desc
->read
.hdr_addr
= 0;
5116 if (i
== rx_ring
->count
)
5118 buffer_info
= &rx_ring
->buffer_info
[i
];
5122 if (rx_ring
->next_to_use
!= i
) {
5123 rx_ring
->next_to_use
= i
;
5125 i
= (rx_ring
->count
- 1);
5129 /* Force memory writes to complete before letting h/w
5130 * know there are new descriptors to fetch. (Only
5131 * applicable for weak-ordered memory model archs,
5132 * such as IA-64). */
5134 writel(i
, rx_ring
->tail
);
5144 static int igb_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
5146 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5147 struct mii_ioctl_data
*data
= if_mii(ifr
);
5149 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
5154 data
->phy_id
= adapter
->hw
.phy
.addr
;
5157 if (igb_read_phy_reg(&adapter
->hw
, data
->reg_num
& 0x1F,
5169 * igb_hwtstamp_ioctl - control hardware time stamping
5174 * Outgoing time stamping can be enabled and disabled. Play nice and
5175 * disable it when requested, although it shouldn't case any overhead
5176 * when no packet needs it. At most one packet in the queue may be
5177 * marked for time stamping, otherwise it would be impossible to tell
5178 * for sure to which packet the hardware time stamp belongs.
5180 * Incoming time stamping has to be configured via the hardware
5181 * filters. Not all combinations are supported, in particular event
5182 * type has to be specified. Matching the kind of event packet is
5183 * not supported, with the exception of "all V2 events regardless of
5187 static int igb_hwtstamp_ioctl(struct net_device
*netdev
,
5188 struct ifreq
*ifr
, int cmd
)
5190 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5191 struct e1000_hw
*hw
= &adapter
->hw
;
5192 struct hwtstamp_config config
;
5193 u32 tsync_tx_ctl
= E1000_TSYNCTXCTL_ENABLED
;
5194 u32 tsync_rx_ctl
= E1000_TSYNCRXCTL_ENABLED
;
5195 u32 tsync_rx_cfg
= 0;
5200 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
5203 /* reserved for future extensions */
5207 switch (config
.tx_type
) {
5208 case HWTSTAMP_TX_OFF
:
5210 case HWTSTAMP_TX_ON
:
5216 switch (config
.rx_filter
) {
5217 case HWTSTAMP_FILTER_NONE
:
5220 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
5221 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
5222 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
5223 case HWTSTAMP_FILTER_ALL
:
5225 * register TSYNCRXCFG must be set, therefore it is not
5226 * possible to time stamp both Sync and Delay_Req messages
5227 * => fall back to time stamping all packets
5229 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_ALL
;
5230 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
5232 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
5233 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
5234 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE
;
5237 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
5238 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
5239 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE
;
5242 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
5243 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
5244 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
5245 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE
;
5248 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
5250 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
5251 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
5252 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
5253 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE
;
5256 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
5258 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
5259 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
5260 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
5261 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_EVENT_V2
;
5262 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
5269 if (hw
->mac
.type
== e1000_82575
) {
5270 if (tsync_rx_ctl
| tsync_tx_ctl
)
5275 /* enable/disable TX */
5276 regval
= rd32(E1000_TSYNCTXCTL
);
5277 regval
&= ~E1000_TSYNCTXCTL_ENABLED
;
5278 regval
|= tsync_tx_ctl
;
5279 wr32(E1000_TSYNCTXCTL
, regval
);
5281 /* enable/disable RX */
5282 regval
= rd32(E1000_TSYNCRXCTL
);
5283 regval
&= ~(E1000_TSYNCRXCTL_ENABLED
| E1000_TSYNCRXCTL_TYPE_MASK
);
5284 regval
|= tsync_rx_ctl
;
5285 wr32(E1000_TSYNCRXCTL
, regval
);
5287 /* define which PTP packets are time stamped */
5288 wr32(E1000_TSYNCRXCFG
, tsync_rx_cfg
);
5290 /* define ethertype filter for timestamped packets */
5293 (E1000_ETQF_FILTER_ENABLE
| /* enable filter */
5294 E1000_ETQF_1588
| /* enable timestamping */
5295 ETH_P_1588
)); /* 1588 eth protocol type */
5297 wr32(E1000_ETQF(3), 0);
5299 #define PTP_PORT 319
5300 /* L4 Queue Filter[3]: filter by destination port and protocol */
5302 u32 ftqf
= (IPPROTO_UDP
/* UDP */
5303 | E1000_FTQF_VF_BP
/* VF not compared */
5304 | E1000_FTQF_1588_TIME_STAMP
/* Enable Timestamping */
5305 | E1000_FTQF_MASK
); /* mask all inputs */
5306 ftqf
&= ~E1000_FTQF_MASK_PROTO_BP
; /* enable protocol check */
5308 wr32(E1000_IMIR(3), htons(PTP_PORT
));
5309 wr32(E1000_IMIREXT(3),
5310 (E1000_IMIREXT_SIZE_BP
| E1000_IMIREXT_CTRL_BP
));
5311 if (hw
->mac
.type
== e1000_82576
) {
5312 /* enable source port check */
5313 wr32(E1000_SPQF(3), htons(PTP_PORT
));
5314 ftqf
&= ~E1000_FTQF_MASK_SOURCE_PORT_BP
;
5316 wr32(E1000_FTQF(3), ftqf
);
5318 wr32(E1000_FTQF(3), E1000_FTQF_MASK
);
5322 adapter
->hwtstamp_config
= config
;
5324 /* clear TX/RX time stamp registers, just to be sure */
5325 regval
= rd32(E1000_TXSTMPH
);
5326 regval
= rd32(E1000_RXSTMPH
);
5328 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
5338 static int igb_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
5344 return igb_mii_ioctl(netdev
, ifr
, cmd
);
5346 return igb_hwtstamp_ioctl(netdev
, ifr
, cmd
);
5352 s32
igb_read_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
5354 struct igb_adapter
*adapter
= hw
->back
;
5357 cap_offset
= pci_find_capability(adapter
->pdev
, PCI_CAP_ID_EXP
);
5359 return -E1000_ERR_CONFIG
;
5361 pci_read_config_word(adapter
->pdev
, cap_offset
+ reg
, value
);
5366 s32
igb_write_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
5368 struct igb_adapter
*adapter
= hw
->back
;
5371 cap_offset
= pci_find_capability(adapter
->pdev
, PCI_CAP_ID_EXP
);
5373 return -E1000_ERR_CONFIG
;
5375 pci_write_config_word(adapter
->pdev
, cap_offset
+ reg
, *value
);
5380 static void igb_vlan_rx_register(struct net_device
*netdev
,
5381 struct vlan_group
*grp
)
5383 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5384 struct e1000_hw
*hw
= &adapter
->hw
;
5387 igb_irq_disable(adapter
);
5388 adapter
->vlgrp
= grp
;
5391 /* enable VLAN tag insert/strip */
5392 ctrl
= rd32(E1000_CTRL
);
5393 ctrl
|= E1000_CTRL_VME
;
5394 wr32(E1000_CTRL
, ctrl
);
5396 /* Disable CFI check */
5397 rctl
= rd32(E1000_RCTL
);
5398 rctl
&= ~E1000_RCTL_CFIEN
;
5399 wr32(E1000_RCTL
, rctl
);
5401 /* disable VLAN tag insert/strip */
5402 ctrl
= rd32(E1000_CTRL
);
5403 ctrl
&= ~E1000_CTRL_VME
;
5404 wr32(E1000_CTRL
, ctrl
);
5407 igb_rlpml_set(adapter
);
5409 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5410 igb_irq_enable(adapter
);
5413 static void igb_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
5415 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5416 struct e1000_hw
*hw
= &adapter
->hw
;
5417 int pf_id
= adapter
->vfs_allocated_count
;
5419 /* attempt to add filter to vlvf array */
5420 igb_vlvf_set(adapter
, vid
, true, pf_id
);
5422 /* add the filter since PF can receive vlans w/o entry in vlvf */
5423 igb_vfta_set(hw
, vid
, true);
5426 static void igb_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
5428 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5429 struct e1000_hw
*hw
= &adapter
->hw
;
5430 int pf_id
= adapter
->vfs_allocated_count
;
5433 igb_irq_disable(adapter
);
5434 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
5436 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5437 igb_irq_enable(adapter
);
5439 /* remove vlan from VLVF table array */
5440 err
= igb_vlvf_set(adapter
, vid
, false, pf_id
);
5442 /* if vid was not present in VLVF just remove it from table */
5444 igb_vfta_set(hw
, vid
, false);
5447 static void igb_restore_vlan(struct igb_adapter
*adapter
)
5449 igb_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
5451 if (adapter
->vlgrp
) {
5453 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
5454 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
5456 igb_vlan_rx_add_vid(adapter
->netdev
, vid
);
5461 int igb_set_spd_dplx(struct igb_adapter
*adapter
, u16 spddplx
)
5463 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
5468 case SPEED_10
+ DUPLEX_HALF
:
5469 mac
->forced_speed_duplex
= ADVERTISE_10_HALF
;
5471 case SPEED_10
+ DUPLEX_FULL
:
5472 mac
->forced_speed_duplex
= ADVERTISE_10_FULL
;
5474 case SPEED_100
+ DUPLEX_HALF
:
5475 mac
->forced_speed_duplex
= ADVERTISE_100_HALF
;
5477 case SPEED_100
+ DUPLEX_FULL
:
5478 mac
->forced_speed_duplex
= ADVERTISE_100_FULL
;
5480 case SPEED_1000
+ DUPLEX_FULL
:
5482 adapter
->hw
.phy
.autoneg_advertised
= ADVERTISE_1000_FULL
;
5484 case SPEED_1000
+ DUPLEX_HALF
: /* not supported */
5486 dev_err(&adapter
->pdev
->dev
,
5487 "Unsupported Speed/Duplex configuration\n");
5493 static int __igb_shutdown(struct pci_dev
*pdev
, bool *enable_wake
)
5495 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5496 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5497 struct e1000_hw
*hw
= &adapter
->hw
;
5498 u32 ctrl
, rctl
, status
;
5499 u32 wufc
= adapter
->wol
;
5504 netif_device_detach(netdev
);
5506 if (netif_running(netdev
))
5509 igb_clear_interrupt_scheme(adapter
);
5512 retval
= pci_save_state(pdev
);
5517 status
= rd32(E1000_STATUS
);
5518 if (status
& E1000_STATUS_LU
)
5519 wufc
&= ~E1000_WUFC_LNKC
;
5522 igb_setup_rctl(adapter
);
5523 igb_set_rx_mode(netdev
);
5525 /* turn on all-multi mode if wake on multicast is enabled */
5526 if (wufc
& E1000_WUFC_MC
) {
5527 rctl
= rd32(E1000_RCTL
);
5528 rctl
|= E1000_RCTL_MPE
;
5529 wr32(E1000_RCTL
, rctl
);
5532 ctrl
= rd32(E1000_CTRL
);
5533 /* advertise wake from D3Cold */
5534 #define E1000_CTRL_ADVD3WUC 0x00100000
5535 /* phy power management enable */
5536 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5537 ctrl
|= E1000_CTRL_ADVD3WUC
;
5538 wr32(E1000_CTRL
, ctrl
);
5540 /* Allow time for pending master requests to run */
5541 igb_disable_pcie_master(&adapter
->hw
);
5543 wr32(E1000_WUC
, E1000_WUC_PME_EN
);
5544 wr32(E1000_WUFC
, wufc
);
5547 wr32(E1000_WUFC
, 0);
5550 *enable_wake
= wufc
|| adapter
->en_mng_pt
;
5552 igb_shutdown_serdes_link_82575(hw
);
5554 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5555 * would have already happened in close and is redundant. */
5556 igb_release_hw_control(adapter
);
5558 pci_disable_device(pdev
);
5564 static int igb_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5569 retval
= __igb_shutdown(pdev
, &wake
);
5574 pci_prepare_to_sleep(pdev
);
5576 pci_wake_from_d3(pdev
, false);
5577 pci_set_power_state(pdev
, PCI_D3hot
);
5583 static int igb_resume(struct pci_dev
*pdev
)
5585 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5586 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5587 struct e1000_hw
*hw
= &adapter
->hw
;
5590 pci_set_power_state(pdev
, PCI_D0
);
5591 pci_restore_state(pdev
);
5593 err
= pci_enable_device_mem(pdev
);
5596 "igb: Cannot enable PCI device from suspend\n");
5599 pci_set_master(pdev
);
5601 pci_enable_wake(pdev
, PCI_D3hot
, 0);
5602 pci_enable_wake(pdev
, PCI_D3cold
, 0);
5604 if (igb_init_interrupt_scheme(adapter
)) {
5605 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
5609 /* e1000_power_up_phy(adapter); */
5613 /* let the f/w know that the h/w is now under the control of the
5615 igb_get_hw_control(adapter
);
5617 wr32(E1000_WUS
, ~0);
5619 if (netif_running(netdev
)) {
5620 err
= igb_open(netdev
);
5625 netif_device_attach(netdev
);
5631 static void igb_shutdown(struct pci_dev
*pdev
)
5635 __igb_shutdown(pdev
, &wake
);
5637 if (system_state
== SYSTEM_POWER_OFF
) {
5638 pci_wake_from_d3(pdev
, wake
);
5639 pci_set_power_state(pdev
, PCI_D3hot
);
5643 #ifdef CONFIG_NET_POLL_CONTROLLER
5645 * Polling 'interrupt' - used by things like netconsole to send skbs
5646 * without having to re-enable interrupts. It's not called while
5647 * the interrupt routine is executing.
5649 static void igb_netpoll(struct net_device
*netdev
)
5651 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5652 struct e1000_hw
*hw
= &adapter
->hw
;
5655 if (!adapter
->msix_entries
) {
5656 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
5657 igb_irq_disable(adapter
);
5658 napi_schedule(&q_vector
->napi
);
5662 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
5663 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
5664 wr32(E1000_EIMC
, q_vector
->eims_value
);
5665 napi_schedule(&q_vector
->napi
);
5668 #endif /* CONFIG_NET_POLL_CONTROLLER */
5671 * igb_io_error_detected - called when PCI error is detected
5672 * @pdev: Pointer to PCI device
5673 * @state: The current pci connection state
5675 * This function is called after a PCI bus error affecting
5676 * this device has been detected.
5678 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*pdev
,
5679 pci_channel_state_t state
)
5681 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5682 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5684 netif_device_detach(netdev
);
5686 if (state
== pci_channel_io_perm_failure
)
5687 return PCI_ERS_RESULT_DISCONNECT
;
5689 if (netif_running(netdev
))
5691 pci_disable_device(pdev
);
5693 /* Request a slot slot reset. */
5694 return PCI_ERS_RESULT_NEED_RESET
;
5698 * igb_io_slot_reset - called after the pci bus has been reset.
5699 * @pdev: Pointer to PCI device
5701 * Restart the card from scratch, as if from a cold-boot. Implementation
5702 * resembles the first-half of the igb_resume routine.
5704 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*pdev
)
5706 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5707 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5708 struct e1000_hw
*hw
= &adapter
->hw
;
5709 pci_ers_result_t result
;
5712 if (pci_enable_device_mem(pdev
)) {
5714 "Cannot re-enable PCI device after reset.\n");
5715 result
= PCI_ERS_RESULT_DISCONNECT
;
5717 pci_set_master(pdev
);
5718 pci_restore_state(pdev
);
5720 pci_enable_wake(pdev
, PCI_D3hot
, 0);
5721 pci_enable_wake(pdev
, PCI_D3cold
, 0);
5724 wr32(E1000_WUS
, ~0);
5725 result
= PCI_ERS_RESULT_RECOVERED
;
5728 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
5730 dev_err(&pdev
->dev
, "pci_cleanup_aer_uncorrect_error_status "
5731 "failed 0x%0x\n", err
);
5732 /* non-fatal, continue */
5739 * igb_io_resume - called when traffic can start flowing again.
5740 * @pdev: Pointer to PCI device
5742 * This callback is called when the error recovery driver tells us that
5743 * its OK to resume normal operation. Implementation resembles the
5744 * second-half of the igb_resume routine.
5746 static void igb_io_resume(struct pci_dev
*pdev
)
5748 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5749 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5751 if (netif_running(netdev
)) {
5752 if (igb_up(adapter
)) {
5753 dev_err(&pdev
->dev
, "igb_up failed after reset\n");
5758 netif_device_attach(netdev
);
5760 /* let the f/w know that the h/w is now under the control of the
5762 igb_get_hw_control(adapter
);
5765 static void igb_rar_set_qsel(struct igb_adapter
*adapter
, u8
*addr
, u32 index
,
5768 u32 rar_low
, rar_high
;
5769 struct e1000_hw
*hw
= &adapter
->hw
;
5771 /* HW expects these in little endian so we reverse the byte order
5772 * from network order (big endian) to little endian
5774 rar_low
= ((u32
) addr
[0] | ((u32
) addr
[1] << 8) |
5775 ((u32
) addr
[2] << 16) | ((u32
) addr
[3] << 24));
5776 rar_high
= ((u32
) addr
[4] | ((u32
) addr
[5] << 8));
5778 /* Indicate to hardware the Address is Valid. */
5779 rar_high
|= E1000_RAH_AV
;
5781 if (hw
->mac
.type
== e1000_82575
)
5782 rar_high
|= E1000_RAH_POOL_1
* qsel
;
5784 rar_high
|= E1000_RAH_POOL_1
<< qsel
;
5786 wr32(E1000_RAL(index
), rar_low
);
5788 wr32(E1000_RAH(index
), rar_high
);
5792 static int igb_set_vf_mac(struct igb_adapter
*adapter
,
5793 int vf
, unsigned char *mac_addr
)
5795 struct e1000_hw
*hw
= &adapter
->hw
;
5796 /* VF MAC addresses start at end of receive addresses and moves
5797 * torwards the first, as a result a collision should not be possible */
5798 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
5800 memcpy(adapter
->vf_data
[vf
].vf_mac_addresses
, mac_addr
, ETH_ALEN
);
5802 igb_rar_set_qsel(adapter
, mac_addr
, rar_entry
, vf
);
5807 static void igb_vmm_control(struct igb_adapter
*adapter
)
5809 struct e1000_hw
*hw
= &adapter
->hw
;
5812 /* replication is not supported for 82575 */
5813 if (hw
->mac
.type
== e1000_82575
)
5816 /* enable replication vlan tag stripping */
5817 reg
= rd32(E1000_RPLOLR
);
5818 reg
|= E1000_RPLOLR_STRVLAN
;
5819 wr32(E1000_RPLOLR
, reg
);
5821 /* notify HW that the MAC is adding vlan tags */
5822 reg
= rd32(E1000_DTXCTL
);
5823 reg
|= E1000_DTXCTL_VLAN_ADDED
;
5824 wr32(E1000_DTXCTL
, reg
);
5826 if (adapter
->vfs_allocated_count
) {
5827 igb_vmdq_set_loopback_pf(hw
, true);
5828 igb_vmdq_set_replication_pf(hw
, true);
5830 igb_vmdq_set_loopback_pf(hw
, false);
5831 igb_vmdq_set_replication_pf(hw
, false);