1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/pci-aspm.h>
43 #include <linux/delay.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/aer.h>
48 #include <linux/dca.h>
52 #define DRV_VERSION "1.3.16-k2"
53 char igb_driver_name
[] = "igb";
54 char igb_driver_version
[] = DRV_VERSION
;
55 static const char igb_driver_string
[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
57 static const char igb_copyright
[] = "Copyright (c) 2007-2009 Intel Corporation.";
59 static const struct e1000_info
*igb_info_tbl
[] = {
60 [board_82575
] = &e1000_82575_info
,
63 static struct pci_device_id igb_pci_tbl
[] = {
64 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576
), board_82575
},
65 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS
), board_82575
},
66 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS_SERDES
), board_82575
},
67 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_FIBER
), board_82575
},
68 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES
), board_82575
},
69 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES_QUAD
), board_82575
},
70 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_QUAD_COPPER
), board_82575
},
71 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_COPPER
), board_82575
},
72 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_FIBER_SERDES
), board_82575
},
73 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575GB_QUAD_COPPER
), board_82575
},
74 /* required last entry */
78 MODULE_DEVICE_TABLE(pci
, igb_pci_tbl
);
80 void igb_reset(struct igb_adapter
*);
81 static int igb_setup_all_tx_resources(struct igb_adapter
*);
82 static int igb_setup_all_rx_resources(struct igb_adapter
*);
83 static void igb_free_all_tx_resources(struct igb_adapter
*);
84 static void igb_free_all_rx_resources(struct igb_adapter
*);
85 static void igb_setup_mrqc(struct igb_adapter
*);
86 void igb_update_stats(struct igb_adapter
*);
87 static int igb_probe(struct pci_dev
*, const struct pci_device_id
*);
88 static void __devexit
igb_remove(struct pci_dev
*pdev
);
89 static int igb_sw_init(struct igb_adapter
*);
90 static int igb_open(struct net_device
*);
91 static int igb_close(struct net_device
*);
92 static void igb_configure_tx(struct igb_adapter
*);
93 static void igb_configure_rx(struct igb_adapter
*);
94 static void igb_setup_tctl(struct igb_adapter
*);
95 static void igb_setup_rctl(struct igb_adapter
*);
96 static void igb_clean_all_tx_rings(struct igb_adapter
*);
97 static void igb_clean_all_rx_rings(struct igb_adapter
*);
98 static void igb_clean_tx_ring(struct igb_ring
*);
99 static void igb_clean_rx_ring(struct igb_ring
*);
100 static void igb_set_rx_mode(struct net_device
*);
101 static void igb_update_phy_info(unsigned long);
102 static void igb_watchdog(unsigned long);
103 static void igb_watchdog_task(struct work_struct
*);
104 static netdev_tx_t
igb_xmit_frame_ring_adv(struct sk_buff
*,
106 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
,
107 struct net_device
*);
108 static struct net_device_stats
*igb_get_stats(struct net_device
*);
109 static int igb_change_mtu(struct net_device
*, int);
110 static int igb_set_mac(struct net_device
*, void *);
111 static void igb_set_uta(struct igb_adapter
*adapter
);
112 static irqreturn_t
igb_intr(int irq
, void *);
113 static irqreturn_t
igb_intr_msi(int irq
, void *);
114 static irqreturn_t
igb_msix_other(int irq
, void *);
115 static irqreturn_t
igb_msix_ring(int irq
, void *);
116 #ifdef CONFIG_IGB_DCA
117 static void igb_update_dca(struct igb_q_vector
*);
118 static void igb_setup_dca(struct igb_adapter
*);
119 #endif /* CONFIG_IGB_DCA */
120 static bool igb_clean_tx_irq(struct igb_q_vector
*);
121 static int igb_poll(struct napi_struct
*, int);
122 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*, int *, int);
123 static void igb_alloc_rx_buffers_adv(struct igb_ring
*, int);
124 static int igb_ioctl(struct net_device
*, struct ifreq
*, int cmd
);
125 static void igb_tx_timeout(struct net_device
*);
126 static void igb_reset_task(struct work_struct
*);
127 static void igb_vlan_rx_register(struct net_device
*, struct vlan_group
*);
128 static void igb_vlan_rx_add_vid(struct net_device
*, u16
);
129 static void igb_vlan_rx_kill_vid(struct net_device
*, u16
);
130 static void igb_restore_vlan(struct igb_adapter
*);
131 static void igb_rar_set_qsel(struct igb_adapter
*, u8
*, u32
, u8
);
132 static void igb_ping_all_vfs(struct igb_adapter
*);
133 static void igb_msg_task(struct igb_adapter
*);
134 static int igb_rcv_msg_from_vf(struct igb_adapter
*, u32
);
135 static void igb_vmm_control(struct igb_adapter
*);
136 static int igb_set_vf_mac(struct igb_adapter
*adapter
, int, unsigned char *);
137 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
);
139 static inline void igb_set_vmolr(struct e1000_hw
*hw
, int vfn
)
143 reg_data
= rd32(E1000_VMOLR(vfn
));
144 reg_data
|= E1000_VMOLR_BAM
| /* Accept broadcast */
145 E1000_VMOLR_ROMPE
| /* Accept packets matched in MTA */
146 E1000_VMOLR_AUPE
| /* Accept untagged packets */
147 E1000_VMOLR_STRVLAN
; /* Strip vlan tags */
148 wr32(E1000_VMOLR(vfn
), reg_data
);
151 static inline int igb_set_vf_rlpml(struct igb_adapter
*adapter
, int size
,
154 struct e1000_hw
*hw
= &adapter
->hw
;
157 /* if it isn't the PF check to see if VFs are enabled and
158 * increase the size to support vlan tags */
159 if (vfn
< adapter
->vfs_allocated_count
&&
160 adapter
->vf_data
[vfn
].vlans_enabled
)
161 size
+= VLAN_TAG_SIZE
;
163 vmolr
= rd32(E1000_VMOLR(vfn
));
164 vmolr
&= ~E1000_VMOLR_RLPML_MASK
;
165 vmolr
|= size
| E1000_VMOLR_LPE
;
166 wr32(E1000_VMOLR(vfn
), vmolr
);
172 static int igb_suspend(struct pci_dev
*, pm_message_t
);
173 static int igb_resume(struct pci_dev
*);
175 static void igb_shutdown(struct pci_dev
*);
176 #ifdef CONFIG_IGB_DCA
177 static int igb_notify_dca(struct notifier_block
*, unsigned long, void *);
178 static struct notifier_block dca_notifier
= {
179 .notifier_call
= igb_notify_dca
,
184 #ifdef CONFIG_NET_POLL_CONTROLLER
185 /* for netdump / net console */
186 static void igb_netpoll(struct net_device
*);
188 #ifdef CONFIG_PCI_IOV
189 static unsigned int max_vfs
= 0;
190 module_param(max_vfs
, uint
, 0);
191 MODULE_PARM_DESC(max_vfs
, "Maximum number of virtual functions to allocate "
192 "per physical function");
193 #endif /* CONFIG_PCI_IOV */
195 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*,
196 pci_channel_state_t
);
197 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*);
198 static void igb_io_resume(struct pci_dev
*);
200 static struct pci_error_handlers igb_err_handler
= {
201 .error_detected
= igb_io_error_detected
,
202 .slot_reset
= igb_io_slot_reset
,
203 .resume
= igb_io_resume
,
207 static struct pci_driver igb_driver
= {
208 .name
= igb_driver_name
,
209 .id_table
= igb_pci_tbl
,
211 .remove
= __devexit_p(igb_remove
),
213 /* Power Managment Hooks */
214 .suspend
= igb_suspend
,
215 .resume
= igb_resume
,
217 .shutdown
= igb_shutdown
,
218 .err_handler
= &igb_err_handler
221 static int global_quad_port_a
; /* global quad port a indication */
223 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
224 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
225 MODULE_LICENSE("GPL");
226 MODULE_VERSION(DRV_VERSION
);
229 * Scale the NIC clock cycle by a large factor so that
230 * relatively small clock corrections can be added or
231 * substracted at each clock tick. The drawbacks of a
232 * large factor are a) that the clock register overflows
233 * more quickly (not such a big deal) and b) that the
234 * increment per tick has to fit into 24 bits.
237 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
239 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
241 * The base scale factor is intentionally a power of two
242 * so that the division in %struct timecounter can be done with
245 #define IGB_TSYNC_SHIFT (19)
246 #define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
249 * The duration of one clock cycle of the NIC.
251 * @todo This hard-coded value is part of the specification and might change
252 * in future hardware revisions. Add revision check.
254 #define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
256 #if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
257 # error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
261 * igb_read_clock - read raw cycle counter (to be used by time counter)
263 static cycle_t
igb_read_clock(const struct cyclecounter
*tc
)
265 struct igb_adapter
*adapter
=
266 container_of(tc
, struct igb_adapter
, cycles
);
267 struct e1000_hw
*hw
= &adapter
->hw
;
270 stamp
= rd32(E1000_SYSTIML
);
271 stamp
|= (u64
)rd32(E1000_SYSTIMH
) << 32ULL;
278 * igb_get_hw_dev_name - return device name string
279 * used by hardware layer to print debugging information
281 char *igb_get_hw_dev_name(struct e1000_hw
*hw
)
283 struct igb_adapter
*adapter
= hw
->back
;
284 return adapter
->netdev
->name
;
288 * igb_get_time_str - format current NIC and system time as string
290 static char *igb_get_time_str(struct igb_adapter
*adapter
,
293 cycle_t hw
= adapter
->cycles
.read(&adapter
->cycles
);
294 struct timespec nic
= ns_to_timespec(timecounter_read(&adapter
->clock
));
296 struct timespec delta
;
297 getnstimeofday(&sys
);
299 delta
= timespec_sub(nic
, sys
);
302 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
304 (long)nic
.tv_sec
, nic
.tv_nsec
,
305 (long)sys
.tv_sec
, sys
.tv_nsec
,
306 (long)delta
.tv_sec
, delta
.tv_nsec
);
313 * igb_desc_unused - calculate if we have unused descriptors
315 static int igb_desc_unused(struct igb_ring
*ring
)
317 if (ring
->next_to_clean
> ring
->next_to_use
)
318 return ring
->next_to_clean
- ring
->next_to_use
- 1;
320 return ring
->count
+ ring
->next_to_clean
- ring
->next_to_use
- 1;
324 * igb_init_module - Driver Registration Routine
326 * igb_init_module is the first routine called when the driver is
327 * loaded. All it does is register with the PCI subsystem.
329 static int __init
igb_init_module(void)
332 printk(KERN_INFO
"%s - version %s\n",
333 igb_driver_string
, igb_driver_version
);
335 printk(KERN_INFO
"%s\n", igb_copyright
);
337 global_quad_port_a
= 0;
339 #ifdef CONFIG_IGB_DCA
340 dca_register_notify(&dca_notifier
);
343 ret
= pci_register_driver(&igb_driver
);
347 module_init(igb_init_module
);
350 * igb_exit_module - Driver Exit Cleanup Routine
352 * igb_exit_module is called just before the driver is removed
355 static void __exit
igb_exit_module(void)
357 #ifdef CONFIG_IGB_DCA
358 dca_unregister_notify(&dca_notifier
);
360 pci_unregister_driver(&igb_driver
);
363 module_exit(igb_exit_module
);
365 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
367 * igb_cache_ring_register - Descriptor ring to register mapping
368 * @adapter: board private structure to initialize
370 * Once we know the feature-set enabled for the device, we'll cache
371 * the register offset the descriptor ring is assigned to.
373 static void igb_cache_ring_register(struct igb_adapter
*adapter
)
376 u32 rbase_offset
= adapter
->vfs_allocated_count
;
378 switch (adapter
->hw
.mac
.type
) {
380 /* The queues are allocated for virtualization such that VF 0
381 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
382 * In order to avoid collision we start at the first free queue
383 * and continue consuming queues in the same sequence
385 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
386 adapter
->rx_ring
[i
].reg_idx
= rbase_offset
+
388 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
389 adapter
->tx_ring
[i
].reg_idx
= rbase_offset
+
394 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
395 adapter
->rx_ring
[i
].reg_idx
= i
;
396 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
397 adapter
->tx_ring
[i
].reg_idx
= i
;
402 static void igb_free_queues(struct igb_adapter
*adapter
)
404 kfree(adapter
->tx_ring
);
405 kfree(adapter
->rx_ring
);
407 adapter
->tx_ring
= NULL
;
408 adapter
->rx_ring
= NULL
;
410 adapter
->num_rx_queues
= 0;
411 adapter
->num_tx_queues
= 0;
415 * igb_alloc_queues - Allocate memory for all rings
416 * @adapter: board private structure to initialize
418 * We allocate one ring per queue at run-time since we don't know the
419 * number of queues at compile-time.
421 static int igb_alloc_queues(struct igb_adapter
*adapter
)
425 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
426 sizeof(struct igb_ring
), GFP_KERNEL
);
427 if (!adapter
->tx_ring
)
430 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
431 sizeof(struct igb_ring
), GFP_KERNEL
);
432 if (!adapter
->rx_ring
)
435 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
436 struct igb_ring
*ring
= &(adapter
->tx_ring
[i
]);
437 ring
->count
= adapter
->tx_ring_count
;
438 ring
->queue_index
= i
;
439 ring
->pdev
= adapter
->pdev
;
440 ring
->netdev
= adapter
->netdev
;
441 /* For 82575, context index must be unique per ring. */
442 if (adapter
->hw
.mac
.type
== e1000_82575
)
443 ring
->flags
= IGB_RING_FLAG_TX_CTX_IDX
;
446 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
447 struct igb_ring
*ring
= &(adapter
->rx_ring
[i
]);
448 ring
->count
= adapter
->rx_ring_count
;
449 ring
->queue_index
= i
;
450 ring
->pdev
= adapter
->pdev
;
451 ring
->netdev
= adapter
->netdev
;
452 ring
->rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
453 ring
->flags
= IGB_RING_FLAG_RX_CSUM
; /* enable rx checksum */
454 /* set flag indicating ring supports SCTP checksum offload */
455 if (adapter
->hw
.mac
.type
>= e1000_82576
)
456 ring
->flags
|= IGB_RING_FLAG_RX_SCTP_CSUM
;
459 igb_cache_ring_register(adapter
);
464 igb_free_queues(adapter
);
469 #define IGB_N0_QUEUE -1
470 static void igb_assign_vector(struct igb_q_vector
*q_vector
, int msix_vector
)
473 struct igb_adapter
*adapter
= q_vector
->adapter
;
474 struct e1000_hw
*hw
= &adapter
->hw
;
476 int rx_queue
= IGB_N0_QUEUE
;
477 int tx_queue
= IGB_N0_QUEUE
;
479 if (q_vector
->rx_ring
)
480 rx_queue
= q_vector
->rx_ring
->reg_idx
;
481 if (q_vector
->tx_ring
)
482 tx_queue
= q_vector
->tx_ring
->reg_idx
;
484 switch (hw
->mac
.type
) {
486 /* The 82575 assigns vectors using a bitmask, which matches the
487 bitmask for the EICR/EIMS/EIMC registers. To assign one
488 or more queues to a vector, we write the appropriate bits
489 into the MSIXBM register for that vector. */
490 if (rx_queue
> IGB_N0_QUEUE
)
491 msixbm
= E1000_EICR_RX_QUEUE0
<< rx_queue
;
492 if (tx_queue
> IGB_N0_QUEUE
)
493 msixbm
|= E1000_EICR_TX_QUEUE0
<< tx_queue
;
494 array_wr32(E1000_MSIXBM(0), msix_vector
, msixbm
);
495 q_vector
->eims_value
= msixbm
;
498 /* 82576 uses a table-based method for assigning vectors.
499 Each queue has a single entry in the table to which we write
500 a vector number along with a "valid" bit. Sadly, the layout
501 of the table is somewhat counterintuitive. */
502 if (rx_queue
> IGB_N0_QUEUE
) {
503 index
= (rx_queue
& 0x7);
504 ivar
= array_rd32(E1000_IVAR0
, index
);
506 /* vector goes into low byte of register */
507 ivar
= ivar
& 0xFFFFFF00;
508 ivar
|= msix_vector
| E1000_IVAR_VALID
;
510 /* vector goes into third byte of register */
511 ivar
= ivar
& 0xFF00FFFF;
512 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
514 array_wr32(E1000_IVAR0
, index
, ivar
);
516 if (tx_queue
> IGB_N0_QUEUE
) {
517 index
= (tx_queue
& 0x7);
518 ivar
= array_rd32(E1000_IVAR0
, index
);
520 /* vector goes into second byte of register */
521 ivar
= ivar
& 0xFFFF00FF;
522 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
524 /* vector goes into high byte of register */
525 ivar
= ivar
& 0x00FFFFFF;
526 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
528 array_wr32(E1000_IVAR0
, index
, ivar
);
530 q_vector
->eims_value
= 1 << msix_vector
;
539 * igb_configure_msix - Configure MSI-X hardware
541 * igb_configure_msix sets up the hardware to properly
542 * generate MSI-X interrupts.
544 static void igb_configure_msix(struct igb_adapter
*adapter
)
548 struct e1000_hw
*hw
= &adapter
->hw
;
550 adapter
->eims_enable_mask
= 0;
552 /* set vector for other causes, i.e. link changes */
553 switch (hw
->mac
.type
) {
555 tmp
= rd32(E1000_CTRL_EXT
);
556 /* enable MSI-X PBA support*/
557 tmp
|= E1000_CTRL_EXT_PBA_CLR
;
559 /* Auto-Mask interrupts upon ICR read. */
560 tmp
|= E1000_CTRL_EXT_EIAME
;
561 tmp
|= E1000_CTRL_EXT_IRCA
;
563 wr32(E1000_CTRL_EXT
, tmp
);
565 /* enable msix_other interrupt */
566 array_wr32(E1000_MSIXBM(0), vector
++,
568 adapter
->eims_other
= E1000_EIMS_OTHER
;
573 /* Turn on MSI-X capability first, or our settings
574 * won't stick. And it will take days to debug. */
575 wr32(E1000_GPIE
, E1000_GPIE_MSIX_MODE
|
576 E1000_GPIE_PBA
| E1000_GPIE_EIAME
|
579 /* enable msix_other interrupt */
580 adapter
->eims_other
= 1 << vector
;
581 tmp
= (vector
++ | E1000_IVAR_VALID
) << 8;
583 wr32(E1000_IVAR_MISC
, tmp
);
586 /* do nothing, since nothing else supports MSI-X */
588 } /* switch (hw->mac.type) */
590 adapter
->eims_enable_mask
|= adapter
->eims_other
;
592 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
593 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
594 igb_assign_vector(q_vector
, vector
++);
595 adapter
->eims_enable_mask
|= q_vector
->eims_value
;
602 * igb_request_msix - Initialize MSI-X interrupts
604 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
607 static int igb_request_msix(struct igb_adapter
*adapter
)
609 struct net_device
*netdev
= adapter
->netdev
;
610 struct e1000_hw
*hw
= &adapter
->hw
;
611 int i
, err
= 0, vector
= 0;
613 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
614 &igb_msix_other
, 0, netdev
->name
, adapter
);
619 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
620 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
622 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(vector
);
624 if (q_vector
->rx_ring
&& q_vector
->tx_ring
)
625 sprintf(q_vector
->name
, "%s-TxRx-%u", netdev
->name
,
626 q_vector
->rx_ring
->queue_index
);
627 else if (q_vector
->tx_ring
)
628 sprintf(q_vector
->name
, "%s-tx-%u", netdev
->name
,
629 q_vector
->tx_ring
->queue_index
);
630 else if (q_vector
->rx_ring
)
631 sprintf(q_vector
->name
, "%s-rx-%u", netdev
->name
,
632 q_vector
->rx_ring
->queue_index
);
634 sprintf(q_vector
->name
, "%s-unused", netdev
->name
);
636 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
637 &igb_msix_ring
, 0, q_vector
->name
,
644 igb_configure_msix(adapter
);
650 static void igb_reset_interrupt_capability(struct igb_adapter
*adapter
)
652 if (adapter
->msix_entries
) {
653 pci_disable_msix(adapter
->pdev
);
654 kfree(adapter
->msix_entries
);
655 adapter
->msix_entries
= NULL
;
656 } else if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
657 pci_disable_msi(adapter
->pdev
);
662 * igb_free_q_vectors - Free memory allocated for interrupt vectors
663 * @adapter: board private structure to initialize
665 * This function frees the memory allocated to the q_vectors. In addition if
666 * NAPI is enabled it will delete any references to the NAPI struct prior
667 * to freeing the q_vector.
669 static void igb_free_q_vectors(struct igb_adapter
*adapter
)
673 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
674 struct igb_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
675 adapter
->q_vector
[v_idx
] = NULL
;
676 netif_napi_del(&q_vector
->napi
);
679 adapter
->num_q_vectors
= 0;
683 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
685 * This function resets the device so that it has 0 rx queues, tx queues, and
686 * MSI-X interrupts allocated.
688 static void igb_clear_interrupt_scheme(struct igb_adapter
*adapter
)
690 igb_free_queues(adapter
);
691 igb_free_q_vectors(adapter
);
692 igb_reset_interrupt_capability(adapter
);
696 * igb_set_interrupt_capability - set MSI or MSI-X if supported
698 * Attempt to configure interrupts using the best available
699 * capabilities of the hardware and kernel.
701 static void igb_set_interrupt_capability(struct igb_adapter
*adapter
)
706 /* Number of supported queues. */
707 adapter
->num_rx_queues
= min_t(u32
, IGB_MAX_RX_QUEUES
, num_online_cpus());
708 adapter
->num_tx_queues
= min_t(u32
, IGB_MAX_TX_QUEUES
, num_online_cpus());
710 /* start with one vector for every rx queue */
711 numvecs
= adapter
->num_rx_queues
;
713 /* if tx handler is seperate add 1 for every tx queue */
714 numvecs
+= adapter
->num_tx_queues
;
716 /* store the number of vectors reserved for queues */
717 adapter
->num_q_vectors
= numvecs
;
719 /* add 1 vector for link status interrupts */
721 adapter
->msix_entries
= kcalloc(numvecs
, sizeof(struct msix_entry
),
723 if (!adapter
->msix_entries
)
726 for (i
= 0; i
< numvecs
; i
++)
727 adapter
->msix_entries
[i
].entry
= i
;
729 err
= pci_enable_msix(adapter
->pdev
,
730 adapter
->msix_entries
,
735 igb_reset_interrupt_capability(adapter
);
737 /* If we can't do MSI-X, try MSI */
739 #ifdef CONFIG_PCI_IOV
740 /* disable SR-IOV for non MSI-X configurations */
741 if (adapter
->vf_data
) {
742 struct e1000_hw
*hw
= &adapter
->hw
;
743 /* disable iov and allow time for transactions to clear */
744 pci_disable_sriov(adapter
->pdev
);
747 kfree(adapter
->vf_data
);
748 adapter
->vf_data
= NULL
;
749 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
751 dev_info(&adapter
->pdev
->dev
, "IOV Disabled\n");
754 adapter
->num_rx_queues
= 1;
755 adapter
->num_tx_queues
= 1;
756 adapter
->num_q_vectors
= 1;
757 if (!pci_enable_msi(adapter
->pdev
))
758 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
760 /* Notify the stack of the (possibly) reduced Tx Queue count. */
761 adapter
->netdev
->real_num_tx_queues
= adapter
->num_tx_queues
;
766 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
767 * @adapter: board private structure to initialize
769 * We allocate one q_vector per queue interrupt. If allocation fails we
772 static int igb_alloc_q_vectors(struct igb_adapter
*adapter
)
774 struct igb_q_vector
*q_vector
;
775 struct e1000_hw
*hw
= &adapter
->hw
;
778 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
779 q_vector
= kzalloc(sizeof(struct igb_q_vector
), GFP_KERNEL
);
782 q_vector
->adapter
= adapter
;
783 q_vector
->itr_shift
= (hw
->mac
.type
== e1000_82575
) ? 16 : 0;
784 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(0);
785 q_vector
->itr_val
= IGB_START_ITR
;
786 q_vector
->set_itr
= 1;
787 netif_napi_add(adapter
->netdev
, &q_vector
->napi
, igb_poll
, 64);
788 adapter
->q_vector
[v_idx
] = q_vector
;
795 q_vector
= adapter
->q_vector
[v_idx
];
796 netif_napi_del(&q_vector
->napi
);
798 adapter
->q_vector
[v_idx
] = NULL
;
803 static void igb_map_rx_ring_to_vector(struct igb_adapter
*adapter
,
804 int ring_idx
, int v_idx
)
806 struct igb_q_vector
*q_vector
;
808 q_vector
= adapter
->q_vector
[v_idx
];
809 q_vector
->rx_ring
= &adapter
->rx_ring
[ring_idx
];
810 q_vector
->rx_ring
->q_vector
= q_vector
;
811 q_vector
->itr_val
= adapter
->itr
;
814 static void igb_map_tx_ring_to_vector(struct igb_adapter
*adapter
,
815 int ring_idx
, int v_idx
)
817 struct igb_q_vector
*q_vector
;
819 q_vector
= adapter
->q_vector
[v_idx
];
820 q_vector
->tx_ring
= &adapter
->tx_ring
[ring_idx
];
821 q_vector
->tx_ring
->q_vector
= q_vector
;
822 q_vector
->itr_val
= adapter
->itr
;
826 * igb_map_ring_to_vector - maps allocated queues to vectors
828 * This function maps the recently allocated queues to vectors.
830 static int igb_map_ring_to_vector(struct igb_adapter
*adapter
)
835 if ((adapter
->num_q_vectors
< adapter
->num_rx_queues
) ||
836 (adapter
->num_q_vectors
< adapter
->num_tx_queues
))
839 if (adapter
->num_q_vectors
>=
840 (adapter
->num_rx_queues
+ adapter
->num_tx_queues
)) {
841 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
842 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
843 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
844 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
846 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
847 if (i
< adapter
->num_tx_queues
)
848 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
);
849 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
851 for (; i
< adapter
->num_tx_queues
; i
++)
852 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
858 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
860 * This function initializes the interrupts and allocates all of the queues.
862 static int igb_init_interrupt_scheme(struct igb_adapter
*adapter
)
864 struct pci_dev
*pdev
= adapter
->pdev
;
867 igb_set_interrupt_capability(adapter
);
869 err
= igb_alloc_q_vectors(adapter
);
871 dev_err(&pdev
->dev
, "Unable to allocate memory for vectors\n");
872 goto err_alloc_q_vectors
;
875 err
= igb_alloc_queues(adapter
);
877 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
878 goto err_alloc_queues
;
881 err
= igb_map_ring_to_vector(adapter
);
883 dev_err(&pdev
->dev
, "Invalid q_vector to ring mapping\n");
890 igb_free_queues(adapter
);
892 igb_free_q_vectors(adapter
);
894 igb_reset_interrupt_capability(adapter
);
899 * igb_request_irq - initialize interrupts
901 * Attempts to configure interrupts using the best available
902 * capabilities of the hardware and kernel.
904 static int igb_request_irq(struct igb_adapter
*adapter
)
906 struct net_device
*netdev
= adapter
->netdev
;
907 struct pci_dev
*pdev
= adapter
->pdev
;
908 struct e1000_hw
*hw
= &adapter
->hw
;
911 if (adapter
->msix_entries
) {
912 err
= igb_request_msix(adapter
);
915 /* fall back to MSI */
916 igb_clear_interrupt_scheme(adapter
);
917 if (!pci_enable_msi(adapter
->pdev
))
918 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
919 igb_free_all_tx_resources(adapter
);
920 igb_free_all_rx_resources(adapter
);
921 adapter
->num_tx_queues
= 1;
922 adapter
->num_rx_queues
= 1;
923 adapter
->num_q_vectors
= 1;
924 err
= igb_alloc_q_vectors(adapter
);
927 "Unable to allocate memory for vectors\n");
930 err
= igb_alloc_queues(adapter
);
933 "Unable to allocate memory for queues\n");
934 igb_free_q_vectors(adapter
);
937 igb_setup_all_tx_resources(adapter
);
938 igb_setup_all_rx_resources(adapter
);
940 switch (hw
->mac
.type
) {
942 wr32(E1000_MSIXBM(0),
943 (E1000_EICR_RX_QUEUE0
|
944 E1000_EICR_TX_QUEUE0
|
948 wr32(E1000_IVAR0
, E1000_IVAR_VALID
);
955 if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
956 err
= request_irq(adapter
->pdev
->irq
, &igb_intr_msi
, 0,
957 netdev
->name
, adapter
);
961 /* fall back to legacy interrupts */
962 igb_reset_interrupt_capability(adapter
);
963 adapter
->flags
&= ~IGB_FLAG_HAS_MSI
;
966 err
= request_irq(adapter
->pdev
->irq
, &igb_intr
, IRQF_SHARED
,
967 netdev
->name
, adapter
);
970 dev_err(&adapter
->pdev
->dev
, "Error %d getting interrupt\n",
977 static void igb_free_irq(struct igb_adapter
*adapter
)
979 if (adapter
->msix_entries
) {
982 free_irq(adapter
->msix_entries
[vector
++].vector
, adapter
);
984 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
985 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
986 free_irq(adapter
->msix_entries
[vector
++].vector
,
990 free_irq(adapter
->pdev
->irq
, adapter
);
995 * igb_irq_disable - Mask off interrupt generation on the NIC
996 * @adapter: board private structure
998 static void igb_irq_disable(struct igb_adapter
*adapter
)
1000 struct e1000_hw
*hw
= &adapter
->hw
;
1002 if (adapter
->msix_entries
) {
1003 u32 regval
= rd32(E1000_EIAM
);
1004 wr32(E1000_EIAM
, regval
& ~adapter
->eims_enable_mask
);
1005 wr32(E1000_EIMC
, adapter
->eims_enable_mask
);
1006 regval
= rd32(E1000_EIAC
);
1007 wr32(E1000_EIAC
, regval
& ~adapter
->eims_enable_mask
);
1011 wr32(E1000_IMC
, ~0);
1013 synchronize_irq(adapter
->pdev
->irq
);
1017 * igb_irq_enable - Enable default interrupt generation settings
1018 * @adapter: board private structure
1020 static void igb_irq_enable(struct igb_adapter
*adapter
)
1022 struct e1000_hw
*hw
= &adapter
->hw
;
1024 if (adapter
->msix_entries
) {
1025 u32 regval
= rd32(E1000_EIAC
);
1026 wr32(E1000_EIAC
, regval
| adapter
->eims_enable_mask
);
1027 regval
= rd32(E1000_EIAM
);
1028 wr32(E1000_EIAM
, regval
| adapter
->eims_enable_mask
);
1029 wr32(E1000_EIMS
, adapter
->eims_enable_mask
);
1030 if (adapter
->vfs_allocated_count
)
1031 wr32(E1000_MBVFIMR
, 0xFF);
1032 wr32(E1000_IMS
, (E1000_IMS_LSC
| E1000_IMS_VMMB
|
1033 E1000_IMS_DOUTSYNC
));
1035 wr32(E1000_IMS
, IMS_ENABLE_MASK
);
1036 wr32(E1000_IAM
, IMS_ENABLE_MASK
);
1040 static void igb_update_mng_vlan(struct igb_adapter
*adapter
)
1042 struct net_device
*netdev
= adapter
->netdev
;
1043 u16 vid
= adapter
->hw
.mng_cookie
.vlan_id
;
1044 u16 old_vid
= adapter
->mng_vlan_id
;
1045 if (adapter
->vlgrp
) {
1046 if (!vlan_group_get_device(adapter
->vlgrp
, vid
)) {
1047 if (adapter
->hw
.mng_cookie
.status
&
1048 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) {
1049 igb_vlan_rx_add_vid(netdev
, vid
);
1050 adapter
->mng_vlan_id
= vid
;
1052 adapter
->mng_vlan_id
= IGB_MNG_VLAN_NONE
;
1054 if ((old_vid
!= (u16
)IGB_MNG_VLAN_NONE
) &&
1056 !vlan_group_get_device(adapter
->vlgrp
, old_vid
))
1057 igb_vlan_rx_kill_vid(netdev
, old_vid
);
1059 adapter
->mng_vlan_id
= vid
;
1064 * igb_release_hw_control - release control of the h/w to f/w
1065 * @adapter: address of board private structure
1067 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1068 * For ASF and Pass Through versions of f/w this means that the
1069 * driver is no longer loaded.
1072 static void igb_release_hw_control(struct igb_adapter
*adapter
)
1074 struct e1000_hw
*hw
= &adapter
->hw
;
1077 /* Let firmware take over control of h/w */
1078 ctrl_ext
= rd32(E1000_CTRL_EXT
);
1079 wr32(E1000_CTRL_EXT
,
1080 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
1085 * igb_get_hw_control - get control of the h/w from f/w
1086 * @adapter: address of board private structure
1088 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1089 * For ASF and Pass Through versions of f/w this means that
1090 * the driver is loaded.
1093 static void igb_get_hw_control(struct igb_adapter
*adapter
)
1095 struct e1000_hw
*hw
= &adapter
->hw
;
1098 /* Let firmware know the driver has taken over */
1099 ctrl_ext
= rd32(E1000_CTRL_EXT
);
1100 wr32(E1000_CTRL_EXT
,
1101 ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
1105 * igb_configure - configure the hardware for RX and TX
1106 * @adapter: private board structure
1108 static void igb_configure(struct igb_adapter
*adapter
)
1110 struct net_device
*netdev
= adapter
->netdev
;
1113 igb_get_hw_control(adapter
);
1114 igb_set_rx_mode(netdev
);
1116 igb_restore_vlan(adapter
);
1118 igb_setup_tctl(adapter
);
1119 igb_setup_mrqc(adapter
);
1120 igb_setup_rctl(adapter
);
1122 igb_configure_tx(adapter
);
1123 igb_configure_rx(adapter
);
1125 igb_rx_fifo_flush_82575(&adapter
->hw
);
1127 /* call igb_desc_unused which always leaves
1128 * at least 1 descriptor unused to make sure
1129 * next_to_use != next_to_clean */
1130 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1131 struct igb_ring
*ring
= &adapter
->rx_ring
[i
];
1132 igb_alloc_rx_buffers_adv(ring
, igb_desc_unused(ring
));
1136 adapter
->tx_queue_len
= netdev
->tx_queue_len
;
1141 * igb_up - Open the interface and prepare it to handle traffic
1142 * @adapter: board private structure
1145 int igb_up(struct igb_adapter
*adapter
)
1147 struct e1000_hw
*hw
= &adapter
->hw
;
1150 /* hardware has been reset, we need to reload some things */
1151 igb_configure(adapter
);
1153 clear_bit(__IGB_DOWN
, &adapter
->state
);
1155 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1156 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1157 napi_enable(&q_vector
->napi
);
1159 if (adapter
->msix_entries
)
1160 igb_configure_msix(adapter
);
1162 igb_set_vmolr(hw
, adapter
->vfs_allocated_count
);
1164 /* Clear any pending interrupts. */
1166 igb_irq_enable(adapter
);
1168 /* notify VFs that reset has been completed */
1169 if (adapter
->vfs_allocated_count
) {
1170 u32 reg_data
= rd32(E1000_CTRL_EXT
);
1171 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
1172 wr32(E1000_CTRL_EXT
, reg_data
);
1175 netif_tx_start_all_queues(adapter
->netdev
);
1177 /* Fire a link change interrupt to start the watchdog. */
1178 wr32(E1000_ICS
, E1000_ICS_LSC
);
1182 void igb_down(struct igb_adapter
*adapter
)
1184 struct e1000_hw
*hw
= &adapter
->hw
;
1185 struct net_device
*netdev
= adapter
->netdev
;
1189 /* signal that we're down so the interrupt handler does not
1190 * reschedule our watchdog timer */
1191 set_bit(__IGB_DOWN
, &adapter
->state
);
1193 /* disable receives in the hardware */
1194 rctl
= rd32(E1000_RCTL
);
1195 wr32(E1000_RCTL
, rctl
& ~E1000_RCTL_EN
);
1196 /* flush and sleep below */
1198 netif_tx_stop_all_queues(netdev
);
1200 /* disable transmits in the hardware */
1201 tctl
= rd32(E1000_TCTL
);
1202 tctl
&= ~E1000_TCTL_EN
;
1203 wr32(E1000_TCTL
, tctl
);
1204 /* flush both disables and wait for them to finish */
1208 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1209 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1210 napi_disable(&q_vector
->napi
);
1213 igb_irq_disable(adapter
);
1215 del_timer_sync(&adapter
->watchdog_timer
);
1216 del_timer_sync(&adapter
->phy_info_timer
);
1218 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
1219 netif_carrier_off(netdev
);
1221 /* record the stats before reset*/
1222 igb_update_stats(adapter
);
1224 adapter
->link_speed
= 0;
1225 adapter
->link_duplex
= 0;
1227 if (!pci_channel_offline(adapter
->pdev
))
1229 igb_clean_all_tx_rings(adapter
);
1230 igb_clean_all_rx_rings(adapter
);
1231 #ifdef CONFIG_IGB_DCA
1233 /* since we reset the hardware DCA settings were cleared */
1234 igb_setup_dca(adapter
);
1238 void igb_reinit_locked(struct igb_adapter
*adapter
)
1240 WARN_ON(in_interrupt());
1241 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
1245 clear_bit(__IGB_RESETTING
, &adapter
->state
);
1248 void igb_reset(struct igb_adapter
*adapter
)
1250 struct e1000_hw
*hw
= &adapter
->hw
;
1251 struct e1000_mac_info
*mac
= &hw
->mac
;
1252 struct e1000_fc_info
*fc
= &hw
->fc
;
1253 u32 pba
= 0, tx_space
, min_tx_space
, min_rx_space
;
1256 /* Repartition Pba for greater than 9k mtu
1257 * To take effect CTRL.RST is required.
1259 switch (mac
->type
) {
1261 pba
= E1000_PBA_64K
;
1265 pba
= E1000_PBA_34K
;
1269 if ((adapter
->max_frame_size
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) &&
1270 (mac
->type
< e1000_82576
)) {
1271 /* adjust PBA for jumbo frames */
1272 wr32(E1000_PBA
, pba
);
1274 /* To maintain wire speed transmits, the Tx FIFO should be
1275 * large enough to accommodate two full transmit packets,
1276 * rounded up to the next 1KB and expressed in KB. Likewise,
1277 * the Rx FIFO should be large enough to accommodate at least
1278 * one full receive packet and is similarly rounded up and
1279 * expressed in KB. */
1280 pba
= rd32(E1000_PBA
);
1281 /* upper 16 bits has Tx packet buffer allocation size in KB */
1282 tx_space
= pba
>> 16;
1283 /* lower 16 bits has Rx packet buffer allocation size in KB */
1285 /* the tx fifo also stores 16 bytes of information about the tx
1286 * but don't include ethernet FCS because hardware appends it */
1287 min_tx_space
= (adapter
->max_frame_size
+
1288 sizeof(union e1000_adv_tx_desc
) -
1290 min_tx_space
= ALIGN(min_tx_space
, 1024);
1291 min_tx_space
>>= 10;
1292 /* software strips receive CRC, so leave room for it */
1293 min_rx_space
= adapter
->max_frame_size
;
1294 min_rx_space
= ALIGN(min_rx_space
, 1024);
1295 min_rx_space
>>= 10;
1297 /* If current Tx allocation is less than the min Tx FIFO size,
1298 * and the min Tx FIFO size is less than the current Rx FIFO
1299 * allocation, take space away from current Rx allocation */
1300 if (tx_space
< min_tx_space
&&
1301 ((min_tx_space
- tx_space
) < pba
)) {
1302 pba
= pba
- (min_tx_space
- tx_space
);
1304 /* if short on rx space, rx wins and must trump tx
1306 if (pba
< min_rx_space
)
1309 wr32(E1000_PBA
, pba
);
1312 /* flow control settings */
1313 /* The high water mark must be low enough to fit one full frame
1314 * (or the size used for early receive) above it in the Rx FIFO.
1315 * Set it to the lower of:
1316 * - 90% of the Rx FIFO size, or
1317 * - the full Rx FIFO size minus one full frame */
1318 hwm
= min(((pba
<< 10) * 9 / 10),
1319 ((pba
<< 10) - 2 * adapter
->max_frame_size
));
1321 if (mac
->type
< e1000_82576
) {
1322 fc
->high_water
= hwm
& 0xFFF8; /* 8-byte granularity */
1323 fc
->low_water
= fc
->high_water
- 8;
1325 fc
->high_water
= hwm
& 0xFFF0; /* 16-byte granularity */
1326 fc
->low_water
= fc
->high_water
- 16;
1328 fc
->pause_time
= 0xFFFF;
1330 fc
->current_mode
= fc
->requested_mode
;
1332 /* disable receive for all VFs and wait one second */
1333 if (adapter
->vfs_allocated_count
) {
1335 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++)
1336 adapter
->vf_data
[i
].clear_to_send
= false;
1338 /* ping all the active vfs to let them know we are going down */
1339 igb_ping_all_vfs(adapter
);
1341 /* disable transmits and receives */
1342 wr32(E1000_VFRE
, 0);
1343 wr32(E1000_VFTE
, 0);
1346 /* Allow time for pending master requests to run */
1347 adapter
->hw
.mac
.ops
.reset_hw(&adapter
->hw
);
1350 if (adapter
->hw
.mac
.ops
.init_hw(&adapter
->hw
))
1351 dev_err(&adapter
->pdev
->dev
, "Hardware Error\n");
1353 igb_update_mng_vlan(adapter
);
1355 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1356 wr32(E1000_VET
, ETHERNET_IEEE_VLAN_TYPE
);
1358 igb_reset_adaptive(&adapter
->hw
);
1359 igb_get_phy_info(&adapter
->hw
);
1362 static const struct net_device_ops igb_netdev_ops
= {
1363 .ndo_open
= igb_open
,
1364 .ndo_stop
= igb_close
,
1365 .ndo_start_xmit
= igb_xmit_frame_adv
,
1366 .ndo_get_stats
= igb_get_stats
,
1367 .ndo_set_rx_mode
= igb_set_rx_mode
,
1368 .ndo_set_multicast_list
= igb_set_rx_mode
,
1369 .ndo_set_mac_address
= igb_set_mac
,
1370 .ndo_change_mtu
= igb_change_mtu
,
1371 .ndo_do_ioctl
= igb_ioctl
,
1372 .ndo_tx_timeout
= igb_tx_timeout
,
1373 .ndo_validate_addr
= eth_validate_addr
,
1374 .ndo_vlan_rx_register
= igb_vlan_rx_register
,
1375 .ndo_vlan_rx_add_vid
= igb_vlan_rx_add_vid
,
1376 .ndo_vlan_rx_kill_vid
= igb_vlan_rx_kill_vid
,
1377 #ifdef CONFIG_NET_POLL_CONTROLLER
1378 .ndo_poll_controller
= igb_netpoll
,
1383 * igb_probe - Device Initialization Routine
1384 * @pdev: PCI device information struct
1385 * @ent: entry in igb_pci_tbl
1387 * Returns 0 on success, negative on failure
1389 * igb_probe initializes an adapter identified by a pci_dev structure.
1390 * The OS initialization, configuring of the adapter private structure,
1391 * and a hardware reset occur.
1393 static int __devinit
igb_probe(struct pci_dev
*pdev
,
1394 const struct pci_device_id
*ent
)
1396 struct net_device
*netdev
;
1397 struct igb_adapter
*adapter
;
1398 struct e1000_hw
*hw
;
1399 const struct e1000_info
*ei
= igb_info_tbl
[ent
->driver_data
];
1400 unsigned long mmio_start
, mmio_len
;
1401 int err
, pci_using_dac
;
1402 u16 eeprom_data
= 0;
1403 u16 eeprom_apme_mask
= IGB_EEPROM_APME
;
1406 err
= pci_enable_device_mem(pdev
);
1411 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1413 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1417 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1419 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1421 dev_err(&pdev
->dev
, "No usable DMA "
1422 "configuration, aborting\n");
1428 err
= pci_request_selected_regions(pdev
, pci_select_bars(pdev
,
1434 pci_enable_pcie_error_reporting(pdev
);
1436 pci_set_master(pdev
);
1437 pci_save_state(pdev
);
1440 netdev
= alloc_etherdev_mq(sizeof(struct igb_adapter
),
1441 IGB_ABS_MAX_TX_QUEUES
);
1443 goto err_alloc_etherdev
;
1445 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1447 pci_set_drvdata(pdev
, netdev
);
1448 adapter
= netdev_priv(netdev
);
1449 adapter
->netdev
= netdev
;
1450 adapter
->pdev
= pdev
;
1453 adapter
->msg_enable
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
;
1455 mmio_start
= pci_resource_start(pdev
, 0);
1456 mmio_len
= pci_resource_len(pdev
, 0);
1459 hw
->hw_addr
= ioremap(mmio_start
, mmio_len
);
1463 netdev
->netdev_ops
= &igb_netdev_ops
;
1464 igb_set_ethtool_ops(netdev
);
1465 netdev
->watchdog_timeo
= 5 * HZ
;
1467 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
1469 netdev
->mem_start
= mmio_start
;
1470 netdev
->mem_end
= mmio_start
+ mmio_len
;
1472 /* PCI config space info */
1473 hw
->vendor_id
= pdev
->vendor
;
1474 hw
->device_id
= pdev
->device
;
1475 hw
->revision_id
= pdev
->revision
;
1476 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
1477 hw
->subsystem_device_id
= pdev
->subsystem_device
;
1479 /* setup the private structure */
1481 /* Copy the default MAC, PHY and NVM function pointers */
1482 memcpy(&hw
->mac
.ops
, ei
->mac_ops
, sizeof(hw
->mac
.ops
));
1483 memcpy(&hw
->phy
.ops
, ei
->phy_ops
, sizeof(hw
->phy
.ops
));
1484 memcpy(&hw
->nvm
.ops
, ei
->nvm_ops
, sizeof(hw
->nvm
.ops
));
1485 /* Initialize skew-specific constants */
1486 err
= ei
->get_invariants(hw
);
1490 #ifdef CONFIG_PCI_IOV
1491 /* since iov functionality isn't critical to base device function we
1492 * can accept failure. If it fails we don't allow iov to be enabled */
1493 if (hw
->mac
.type
== e1000_82576
) {
1494 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1495 unsigned int num_vfs
= (max_vfs
> 7) ? 7 : max_vfs
;
1497 unsigned char mac_addr
[ETH_ALEN
];
1500 adapter
->vf_data
= kcalloc(num_vfs
,
1501 sizeof(struct vf_data_storage
),
1503 if (!adapter
->vf_data
) {
1505 "Could not allocate VF private data - "
1506 "IOV enable failed\n");
1508 err
= pci_enable_sriov(pdev
, num_vfs
);
1510 adapter
->vfs_allocated_count
= num_vfs
;
1511 dev_info(&pdev
->dev
,
1512 "%d vfs allocated\n",
1515 i
< adapter
->vfs_allocated_count
;
1517 random_ether_addr(mac_addr
);
1518 igb_set_vf_mac(adapter
, i
,
1522 kfree(adapter
->vf_data
);
1523 adapter
->vf_data
= NULL
;
1530 /* setup the private structure */
1531 err
= igb_sw_init(adapter
);
1535 igb_get_bus_info_pcie(hw
);
1537 hw
->phy
.autoneg_wait_to_complete
= false;
1538 hw
->mac
.adaptive_ifs
= true;
1540 /* Copper options */
1541 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
1542 hw
->phy
.mdix
= AUTO_ALL_MODES
;
1543 hw
->phy
.disable_polarity_correction
= false;
1544 hw
->phy
.ms_type
= e1000_ms_hw_default
;
1547 if (igb_check_reset_block(hw
))
1548 dev_info(&pdev
->dev
,
1549 "PHY reset is blocked due to SOL/IDER session.\n");
1551 netdev
->features
= NETIF_F_SG
|
1553 NETIF_F_HW_VLAN_TX
|
1554 NETIF_F_HW_VLAN_RX
|
1555 NETIF_F_HW_VLAN_FILTER
;
1557 netdev
->features
|= NETIF_F_IPV6_CSUM
;
1558 netdev
->features
|= NETIF_F_TSO
;
1559 netdev
->features
|= NETIF_F_TSO6
;
1561 netdev
->features
|= NETIF_F_GRO
;
1563 netdev
->vlan_features
|= NETIF_F_TSO
;
1564 netdev
->vlan_features
|= NETIF_F_TSO6
;
1565 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
1566 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
1567 netdev
->vlan_features
|= NETIF_F_SG
;
1570 netdev
->features
|= NETIF_F_HIGHDMA
;
1572 if (adapter
->hw
.mac
.type
== e1000_82576
)
1573 netdev
->features
|= NETIF_F_SCTP_CSUM
;
1575 adapter
->en_mng_pt
= igb_enable_mng_pass_thru(&adapter
->hw
);
1577 /* before reading the NVM, reset the controller to put the device in a
1578 * known good starting state */
1579 hw
->mac
.ops
.reset_hw(hw
);
1581 /* make sure the NVM is good */
1582 if (igb_validate_nvm_checksum(hw
) < 0) {
1583 dev_err(&pdev
->dev
, "The NVM Checksum Is Not Valid\n");
1588 /* copy the MAC address out of the NVM */
1589 if (hw
->mac
.ops
.read_mac_addr(hw
))
1590 dev_err(&pdev
->dev
, "NVM Read Error\n");
1592 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1593 memcpy(netdev
->perm_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1595 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
1596 dev_err(&pdev
->dev
, "Invalid MAC Address\n");
1601 setup_timer(&adapter
->watchdog_timer
, &igb_watchdog
,
1602 (unsigned long) adapter
);
1603 setup_timer(&adapter
->phy_info_timer
, &igb_update_phy_info
,
1604 (unsigned long) adapter
);
1606 INIT_WORK(&adapter
->reset_task
, igb_reset_task
);
1607 INIT_WORK(&adapter
->watchdog_task
, igb_watchdog_task
);
1609 /* Initialize link properties that are user-changeable */
1610 adapter
->fc_autoneg
= true;
1611 hw
->mac
.autoneg
= true;
1612 hw
->phy
.autoneg_advertised
= 0x2f;
1614 hw
->fc
.requested_mode
= e1000_fc_default
;
1615 hw
->fc
.current_mode
= e1000_fc_default
;
1617 adapter
->itr_setting
= IGB_DEFAULT_ITR
;
1618 adapter
->itr
= IGB_START_ITR
;
1620 igb_validate_mdi_setting(hw
);
1622 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1623 * enable the ACPI Magic Packet filter
1626 if (hw
->bus
.func
== 0)
1627 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_A
, 1, &eeprom_data
);
1628 else if (hw
->bus
.func
== 1)
1629 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_B
, 1, &eeprom_data
);
1631 if (eeprom_data
& eeprom_apme_mask
)
1632 adapter
->eeprom_wol
|= E1000_WUFC_MAG
;
1634 /* now that we have the eeprom settings, apply the special cases where
1635 * the eeprom may be wrong or the board simply won't support wake on
1636 * lan on a particular port */
1637 switch (pdev
->device
) {
1638 case E1000_DEV_ID_82575GB_QUAD_COPPER
:
1639 adapter
->eeprom_wol
= 0;
1641 case E1000_DEV_ID_82575EB_FIBER_SERDES
:
1642 case E1000_DEV_ID_82576_FIBER
:
1643 case E1000_DEV_ID_82576_SERDES
:
1644 /* Wake events only supported on port A for dual fiber
1645 * regardless of eeprom setting */
1646 if (rd32(E1000_STATUS
) & E1000_STATUS_FUNC_1
)
1647 adapter
->eeprom_wol
= 0;
1649 case E1000_DEV_ID_82576_QUAD_COPPER
:
1650 /* if quad port adapter, disable WoL on all but port A */
1651 if (global_quad_port_a
!= 0)
1652 adapter
->eeprom_wol
= 0;
1654 adapter
->flags
|= IGB_FLAG_QUAD_PORT_A
;
1655 /* Reset for multiple quad port adapters */
1656 if (++global_quad_port_a
== 4)
1657 global_quad_port_a
= 0;
1661 /* initialize the wol settings based on the eeprom settings */
1662 adapter
->wol
= adapter
->eeprom_wol
;
1663 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
1665 /* reset the hardware with the new settings */
1668 /* let the f/w know that the h/w is now under the control of the
1670 igb_get_hw_control(adapter
);
1672 strcpy(netdev
->name
, "eth%d");
1673 err
= register_netdev(netdev
);
1677 /* carrier off reporting is important to ethtool even BEFORE open */
1678 netif_carrier_off(netdev
);
1680 #ifdef CONFIG_IGB_DCA
1681 if (dca_add_requester(&pdev
->dev
) == 0) {
1682 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
1683 dev_info(&pdev
->dev
, "DCA enabled\n");
1684 igb_setup_dca(adapter
);
1689 * Initialize hardware timer: we keep it running just in case
1690 * that some program needs it later on.
1692 memset(&adapter
->cycles
, 0, sizeof(adapter
->cycles
));
1693 adapter
->cycles
.read
= igb_read_clock
;
1694 adapter
->cycles
.mask
= CLOCKSOURCE_MASK(64);
1695 adapter
->cycles
.mult
= 1;
1696 adapter
->cycles
.shift
= IGB_TSYNC_SHIFT
;
1699 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS
* IGB_TSYNC_SCALE
);
1702 * Avoid rollover while we initialize by resetting the time counter.
1704 wr32(E1000_SYSTIML
, 0x00000000);
1705 wr32(E1000_SYSTIMH
, 0x00000000);
1708 * Set registers so that rollover occurs soon to test this.
1710 wr32(E1000_SYSTIML
, 0x00000000);
1711 wr32(E1000_SYSTIMH
, 0xFF800000);
1714 timecounter_init(&adapter
->clock
,
1716 ktime_to_ns(ktime_get_real()));
1719 * Synchronize our NIC clock against system wall clock. NIC
1720 * time stamp reading requires ~3us per sample, each sample
1721 * was pretty stable even under load => only require 10
1722 * samples for each offset comparison.
1724 memset(&adapter
->compare
, 0, sizeof(adapter
->compare
));
1725 adapter
->compare
.source
= &adapter
->clock
;
1726 adapter
->compare
.target
= ktime_get_real
;
1727 adapter
->compare
.num_samples
= 10;
1728 timecompare_update(&adapter
->compare
, 0);
1734 "igb: %s: hw %p initialized timer\n",
1735 igb_get_time_str(adapter
, buffer
),
1740 dev_info(&pdev
->dev
, "Intel(R) Gigabit Ethernet Network Connection\n");
1741 /* print bus type/speed/width info */
1742 dev_info(&pdev
->dev
, "%s: (PCIe:%s:%s) %pM\n",
1744 ((hw
->bus
.speed
== e1000_bus_speed_2500
)
1745 ? "2.5Gb/s" : "unknown"),
1746 ((hw
->bus
.width
== e1000_bus_width_pcie_x4
) ? "Width x4" :
1747 (hw
->bus
.width
== e1000_bus_width_pcie_x2
) ? "Width x2" :
1748 (hw
->bus
.width
== e1000_bus_width_pcie_x1
) ? "Width x1" :
1752 igb_read_part_num(hw
, &part_num
);
1753 dev_info(&pdev
->dev
, "%s: PBA No: %06x-%03x\n", netdev
->name
,
1754 (part_num
>> 8), (part_num
& 0xff));
1756 dev_info(&pdev
->dev
,
1757 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1758 adapter
->msix_entries
? "MSI-X" :
1759 (adapter
->flags
& IGB_FLAG_HAS_MSI
) ? "MSI" : "legacy",
1760 adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1765 igb_release_hw_control(adapter
);
1767 if (!igb_check_reset_block(hw
))
1770 if (hw
->flash_address
)
1771 iounmap(hw
->flash_address
);
1773 igb_clear_interrupt_scheme(adapter
);
1774 iounmap(hw
->hw_addr
);
1776 free_netdev(netdev
);
1778 pci_release_selected_regions(pdev
, pci_select_bars(pdev
,
1782 pci_disable_device(pdev
);
1787 * igb_remove - Device Removal Routine
1788 * @pdev: PCI device information struct
1790 * igb_remove is called by the PCI subsystem to alert the driver
1791 * that it should release a PCI device. The could be caused by a
1792 * Hot-Plug event, or because the driver is going to be removed from
1795 static void __devexit
igb_remove(struct pci_dev
*pdev
)
1797 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1798 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1799 struct e1000_hw
*hw
= &adapter
->hw
;
1801 /* flush_scheduled work may reschedule our watchdog task, so
1802 * explicitly disable watchdog tasks from being rescheduled */
1803 set_bit(__IGB_DOWN
, &adapter
->state
);
1804 del_timer_sync(&adapter
->watchdog_timer
);
1805 del_timer_sync(&adapter
->phy_info_timer
);
1807 flush_scheduled_work();
1809 #ifdef CONFIG_IGB_DCA
1810 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
1811 dev_info(&pdev
->dev
, "DCA disabled\n");
1812 dca_remove_requester(&pdev
->dev
);
1813 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
1814 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
1818 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1819 * would have already happened in close and is redundant. */
1820 igb_release_hw_control(adapter
);
1822 unregister_netdev(netdev
);
1824 if (!igb_check_reset_block(&adapter
->hw
))
1825 igb_reset_phy(&adapter
->hw
);
1827 igb_clear_interrupt_scheme(adapter
);
1829 #ifdef CONFIG_PCI_IOV
1830 /* reclaim resources allocated to VFs */
1831 if (adapter
->vf_data
) {
1832 /* disable iov and allow time for transactions to clear */
1833 pci_disable_sriov(pdev
);
1836 kfree(adapter
->vf_data
);
1837 adapter
->vf_data
= NULL
;
1838 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
1840 dev_info(&pdev
->dev
, "IOV Disabled\n");
1843 iounmap(hw
->hw_addr
);
1844 if (hw
->flash_address
)
1845 iounmap(hw
->flash_address
);
1846 pci_release_selected_regions(pdev
, pci_select_bars(pdev
,
1849 free_netdev(netdev
);
1851 pci_disable_pcie_error_reporting(pdev
);
1853 pci_disable_device(pdev
);
1857 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1858 * @adapter: board private structure to initialize
1860 * igb_sw_init initializes the Adapter private data structure.
1861 * Fields are initialized based on PCI device information and
1862 * OS network device settings (MTU size).
1864 static int __devinit
igb_sw_init(struct igb_adapter
*adapter
)
1866 struct e1000_hw
*hw
= &adapter
->hw
;
1867 struct net_device
*netdev
= adapter
->netdev
;
1868 struct pci_dev
*pdev
= adapter
->pdev
;
1870 pci_read_config_word(pdev
, PCI_COMMAND
, &hw
->bus
.pci_cmd_word
);
1872 adapter
->tx_ring_count
= IGB_DEFAULT_TXD
;
1873 adapter
->rx_ring_count
= IGB_DEFAULT_RXD
;
1874 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1875 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
1877 /* This call may decrease the number of queues depending on
1878 * interrupt mode. */
1879 if (igb_init_interrupt_scheme(adapter
)) {
1880 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
1884 /* Explicitly disable IRQ since the NIC can be in any state. */
1885 igb_irq_disable(adapter
);
1887 set_bit(__IGB_DOWN
, &adapter
->state
);
1892 * igb_open - Called when a network interface is made active
1893 * @netdev: network interface device structure
1895 * Returns 0 on success, negative value on failure
1897 * The open entry point is called when a network interface is made
1898 * active by the system (IFF_UP). At this point all resources needed
1899 * for transmit and receive operations are allocated, the interrupt
1900 * handler is registered with the OS, the watchdog timer is started,
1901 * and the stack is notified that the interface is ready.
1903 static int igb_open(struct net_device
*netdev
)
1905 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1906 struct e1000_hw
*hw
= &adapter
->hw
;
1910 /* disallow open during test */
1911 if (test_bit(__IGB_TESTING
, &adapter
->state
))
1914 netif_carrier_off(netdev
);
1916 /* allocate transmit descriptors */
1917 err
= igb_setup_all_tx_resources(adapter
);
1921 /* allocate receive descriptors */
1922 err
= igb_setup_all_rx_resources(adapter
);
1926 /* e1000_power_up_phy(adapter); */
1928 adapter
->mng_vlan_id
= IGB_MNG_VLAN_NONE
;
1929 if ((adapter
->hw
.mng_cookie
.status
&
1930 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
))
1931 igb_update_mng_vlan(adapter
);
1933 /* before we allocate an interrupt, we must be ready to handle it.
1934 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1935 * as soon as we call pci_request_irq, so we have to setup our
1936 * clean_rx handler before we do so. */
1937 igb_configure(adapter
);
1939 igb_set_vmolr(hw
, adapter
->vfs_allocated_count
);
1941 err
= igb_request_irq(adapter
);
1945 /* From here on the code is the same as igb_up() */
1946 clear_bit(__IGB_DOWN
, &adapter
->state
);
1948 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1949 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1950 napi_enable(&q_vector
->napi
);
1953 /* Clear any pending interrupts. */
1956 igb_irq_enable(adapter
);
1958 /* notify VFs that reset has been completed */
1959 if (adapter
->vfs_allocated_count
) {
1960 u32 reg_data
= rd32(E1000_CTRL_EXT
);
1961 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
1962 wr32(E1000_CTRL_EXT
, reg_data
);
1965 netif_tx_start_all_queues(netdev
);
1967 /* Fire a link status change interrupt to start the watchdog. */
1968 wr32(E1000_ICS
, E1000_ICS_LSC
);
1973 igb_release_hw_control(adapter
);
1974 /* e1000_power_down_phy(adapter); */
1975 igb_free_all_rx_resources(adapter
);
1977 igb_free_all_tx_resources(adapter
);
1985 * igb_close - Disables a network interface
1986 * @netdev: network interface device structure
1988 * Returns 0, this is not allowed to fail
1990 * The close entry point is called when an interface is de-activated
1991 * by the OS. The hardware is still under the driver's control, but
1992 * needs to be disabled. A global MAC reset is issued to stop the
1993 * hardware, and all transmit and receive resources are freed.
1995 static int igb_close(struct net_device
*netdev
)
1997 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1999 WARN_ON(test_bit(__IGB_RESETTING
, &adapter
->state
));
2002 igb_free_irq(adapter
);
2004 igb_free_all_tx_resources(adapter
);
2005 igb_free_all_rx_resources(adapter
);
2007 /* kill manageability vlan ID if supported, but not if a vlan with
2008 * the same ID is registered on the host OS (let 8021q kill it) */
2009 if ((adapter
->hw
.mng_cookie
.status
&
2010 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
2012 vlan_group_get_device(adapter
->vlgrp
, adapter
->mng_vlan_id
)))
2013 igb_vlan_rx_kill_vid(netdev
, adapter
->mng_vlan_id
);
2019 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2020 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2022 * Return 0 on success, negative on failure
2024 int igb_setup_tx_resources(struct igb_ring
*tx_ring
)
2026 struct pci_dev
*pdev
= tx_ring
->pdev
;
2029 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
2030 tx_ring
->buffer_info
= vmalloc(size
);
2031 if (!tx_ring
->buffer_info
)
2033 memset(tx_ring
->buffer_info
, 0, size
);
2035 /* round up to nearest 4K */
2036 tx_ring
->size
= tx_ring
->count
* sizeof(union e1000_adv_tx_desc
);
2037 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2039 tx_ring
->desc
= pci_alloc_consistent(pdev
, tx_ring
->size
,
2045 tx_ring
->next_to_use
= 0;
2046 tx_ring
->next_to_clean
= 0;
2050 vfree(tx_ring
->buffer_info
);
2052 "Unable to allocate memory for the transmit descriptor ring\n");
2057 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2058 * (Descriptors) for all queues
2059 * @adapter: board private structure
2061 * Return 0 on success, negative on failure
2063 static int igb_setup_all_tx_resources(struct igb_adapter
*adapter
)
2068 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2069 err
= igb_setup_tx_resources(&adapter
->tx_ring
[i
]);
2071 dev_err(&adapter
->pdev
->dev
,
2072 "Allocation for Tx Queue %u failed\n", i
);
2073 for (i
--; i
>= 0; i
--)
2074 igb_free_tx_resources(&adapter
->tx_ring
[i
]);
2079 for (i
= 0; i
< IGB_MAX_TX_QUEUES
; i
++) {
2080 r_idx
= i
% adapter
->num_tx_queues
;
2081 adapter
->multi_tx_table
[i
] = &adapter
->tx_ring
[r_idx
];
2087 * igb_setup_tctl - configure the transmit control registers
2088 * @adapter: Board private structure
2090 static void igb_setup_tctl(struct igb_adapter
*adapter
)
2092 struct e1000_hw
*hw
= &adapter
->hw
;
2095 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2096 wr32(E1000_TXDCTL(0), 0);
2098 /* Program the Transmit Control Register */
2099 tctl
= rd32(E1000_TCTL
);
2100 tctl
&= ~E1000_TCTL_CT
;
2101 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
2102 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
2104 igb_config_collision_dist(hw
);
2106 /* Enable transmits */
2107 tctl
|= E1000_TCTL_EN
;
2109 wr32(E1000_TCTL
, tctl
);
2113 * igb_configure_tx_ring - Configure transmit ring after Reset
2114 * @adapter: board private structure
2115 * @ring: tx ring to configure
2117 * Configure a transmit ring after a reset.
2119 static void igb_configure_tx_ring(struct igb_adapter
*adapter
,
2120 struct igb_ring
*ring
)
2122 struct e1000_hw
*hw
= &adapter
->hw
;
2124 u64 tdba
= ring
->dma
;
2125 int reg_idx
= ring
->reg_idx
;
2127 /* disable the queue */
2128 txdctl
= rd32(E1000_TXDCTL(reg_idx
));
2129 wr32(E1000_TXDCTL(reg_idx
),
2130 txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
2134 wr32(E1000_TDLEN(reg_idx
),
2135 ring
->count
* sizeof(union e1000_adv_tx_desc
));
2136 wr32(E1000_TDBAL(reg_idx
),
2137 tdba
& 0x00000000ffffffffULL
);
2138 wr32(E1000_TDBAH(reg_idx
), tdba
>> 32);
2140 ring
->head
= hw
->hw_addr
+ E1000_TDH(reg_idx
);
2141 ring
->tail
= hw
->hw_addr
+ E1000_TDT(reg_idx
);
2142 writel(0, ring
->head
);
2143 writel(0, ring
->tail
);
2145 txdctl
|= IGB_TX_PTHRESH
;
2146 txdctl
|= IGB_TX_HTHRESH
<< 8;
2147 txdctl
|= IGB_TX_WTHRESH
<< 16;
2149 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
2150 wr32(E1000_TXDCTL(reg_idx
), txdctl
);
2154 * igb_configure_tx - Configure transmit Unit after Reset
2155 * @adapter: board private structure
2157 * Configure the Tx unit of the MAC after a reset.
2159 static void igb_configure_tx(struct igb_adapter
*adapter
)
2163 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2164 igb_configure_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
2168 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2169 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2171 * Returns 0 on success, negative on failure
2173 int igb_setup_rx_resources(struct igb_ring
*rx_ring
)
2175 struct pci_dev
*pdev
= rx_ring
->pdev
;
2178 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
2179 rx_ring
->buffer_info
= vmalloc(size
);
2180 if (!rx_ring
->buffer_info
)
2182 memset(rx_ring
->buffer_info
, 0, size
);
2184 desc_len
= sizeof(union e1000_adv_rx_desc
);
2186 /* Round up to nearest 4K */
2187 rx_ring
->size
= rx_ring
->count
* desc_len
;
2188 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2190 rx_ring
->desc
= pci_alloc_consistent(pdev
, rx_ring
->size
,
2196 rx_ring
->next_to_clean
= 0;
2197 rx_ring
->next_to_use
= 0;
2202 vfree(rx_ring
->buffer_info
);
2203 dev_err(&pdev
->dev
, "Unable to allocate memory for "
2204 "the receive descriptor ring\n");
2209 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2210 * (Descriptors) for all queues
2211 * @adapter: board private structure
2213 * Return 0 on success, negative on failure
2215 static int igb_setup_all_rx_resources(struct igb_adapter
*adapter
)
2219 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2220 err
= igb_setup_rx_resources(&adapter
->rx_ring
[i
]);
2222 dev_err(&adapter
->pdev
->dev
,
2223 "Allocation for Rx Queue %u failed\n", i
);
2224 for (i
--; i
>= 0; i
--)
2225 igb_free_rx_resources(&adapter
->rx_ring
[i
]);
2234 * igb_setup_mrqc - configure the multiple receive queue control registers
2235 * @adapter: Board private structure
2237 static void igb_setup_mrqc(struct igb_adapter
*adapter
)
2239 struct e1000_hw
*hw
= &adapter
->hw
;
2241 u32 j
, num_rx_queues
, shift
= 0, shift2
= 0;
2246 static const u8 rsshash
[40] = {
2247 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2248 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2249 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2250 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2252 /* Fill out hash function seeds */
2253 for (j
= 0; j
< 10; j
++) {
2254 u32 rsskey
= rsshash
[(j
* 4)];
2255 rsskey
|= rsshash
[(j
* 4) + 1] << 8;
2256 rsskey
|= rsshash
[(j
* 4) + 2] << 16;
2257 rsskey
|= rsshash
[(j
* 4) + 3] << 24;
2258 array_wr32(E1000_RSSRK(0), j
, rsskey
);
2261 num_rx_queues
= adapter
->num_rx_queues
;
2263 if (adapter
->vfs_allocated_count
) {
2264 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2265 switch (hw
->mac
.type
) {
2277 if (hw
->mac
.type
== e1000_82575
)
2281 for (j
= 0; j
< (32 * 4); j
++) {
2282 reta
.bytes
[j
& 3] = (j
% num_rx_queues
) << shift
;
2284 reta
.bytes
[j
& 3] |= num_rx_queues
<< shift2
;
2286 wr32(E1000_RETA(j
>> 2), reta
.dword
);
2290 * Disable raw packet checksumming so that RSS hash is placed in
2291 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2292 * offloads as they are enabled by default
2294 rxcsum
= rd32(E1000_RXCSUM
);
2295 rxcsum
|= E1000_RXCSUM_PCSD
;
2297 if (adapter
->hw
.mac
.type
>= e1000_82576
)
2298 /* Enable Receive Checksum Offload for SCTP */
2299 rxcsum
|= E1000_RXCSUM_CRCOFL
;
2301 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2302 wr32(E1000_RXCSUM
, rxcsum
);
2304 /* If VMDq is enabled then we set the appropriate mode for that, else
2305 * we default to RSS so that an RSS hash is calculated per packet even
2306 * if we are only using one queue */
2307 if (adapter
->vfs_allocated_count
) {
2308 if (hw
->mac
.type
> e1000_82575
) {
2309 /* Set the default pool for the PF's first queue */
2310 u32 vtctl
= rd32(E1000_VT_CTL
);
2311 vtctl
&= ~(E1000_VT_CTL_DEFAULT_POOL_MASK
|
2312 E1000_VT_CTL_DISABLE_DEF_POOL
);
2313 vtctl
|= adapter
->vfs_allocated_count
<<
2314 E1000_VT_CTL_DEFAULT_POOL_SHIFT
;
2315 wr32(E1000_VT_CTL
, vtctl
);
2317 if (adapter
->num_rx_queues
> 1)
2318 mrqc
= E1000_MRQC_ENABLE_VMDQ_RSS_2Q
;
2320 mrqc
= E1000_MRQC_ENABLE_VMDQ
;
2322 mrqc
= E1000_MRQC_ENABLE_RSS_4Q
;
2324 igb_vmm_control(adapter
);
2326 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV4
|
2327 E1000_MRQC_RSS_FIELD_IPV4_TCP
);
2328 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV6
|
2329 E1000_MRQC_RSS_FIELD_IPV6_TCP
);
2330 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV4_UDP
|
2331 E1000_MRQC_RSS_FIELD_IPV6_UDP
);
2332 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX
|
2333 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX
);
2335 wr32(E1000_MRQC
, mrqc
);
2339 * igb_setup_rctl - configure the receive control registers
2340 * @adapter: Board private structure
2342 static void igb_setup_rctl(struct igb_adapter
*adapter
)
2344 struct e1000_hw
*hw
= &adapter
->hw
;
2347 rctl
= rd32(E1000_RCTL
);
2349 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
2350 rctl
&= ~(E1000_RCTL_LBM_TCVR
| E1000_RCTL_LBM_MAC
);
2352 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
| E1000_RCTL_RDMTS_HALF
|
2353 (hw
->mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
2356 * enable stripping of CRC. It's unlikely this will break BMC
2357 * redirection as it did with e1000. Newer features require
2358 * that the HW strips the CRC.
2360 rctl
|= E1000_RCTL_SECRC
;
2363 * disable store bad packets and clear size bits.
2365 rctl
&= ~(E1000_RCTL_SBP
| E1000_RCTL_SZ_256
);
2367 /* enable LPE to prevent packets larger than max_frame_size */
2368 rctl
|= E1000_RCTL_LPE
;
2370 /* disable queue 0 to prevent tail write w/o re-config */
2371 wr32(E1000_RXDCTL(0), 0);
2373 /* Attention!!! For SR-IOV PF driver operations you must enable
2374 * queue drop for all VF and PF queues to prevent head of line blocking
2375 * if an un-trusted VF does not provide descriptors to hardware.
2377 if (adapter
->vfs_allocated_count
) {
2380 /* set all queue drop enable bits */
2381 wr32(E1000_QDE
, ALL_QUEUES
);
2383 vmolr
= rd32(E1000_VMOLR(adapter
->vfs_allocated_count
));
2384 if (rctl
& E1000_RCTL_LPE
)
2385 vmolr
|= E1000_VMOLR_LPE
;
2386 if (adapter
->num_rx_queues
> 1)
2387 vmolr
|= E1000_VMOLR_RSSE
;
2388 wr32(E1000_VMOLR(adapter
->vfs_allocated_count
), vmolr
);
2391 wr32(E1000_RCTL
, rctl
);
2395 * igb_rlpml_set - set maximum receive packet size
2396 * @adapter: board private structure
2398 * Configure maximum receivable packet size.
2400 static void igb_rlpml_set(struct igb_adapter
*adapter
)
2402 u32 max_frame_size
= adapter
->max_frame_size
;
2403 struct e1000_hw
*hw
= &adapter
->hw
;
2404 u16 pf_id
= adapter
->vfs_allocated_count
;
2407 max_frame_size
+= VLAN_TAG_SIZE
;
2409 /* if vfs are enabled we set RLPML to the largest possible request
2410 * size and set the VMOLR RLPML to the size we need */
2412 igb_set_vf_rlpml(adapter
, max_frame_size
, pf_id
);
2413 max_frame_size
= MAX_STD_JUMBO_FRAME_SIZE
+ VLAN_TAG_SIZE
;
2416 wr32(E1000_RLPML
, max_frame_size
);
2420 * igb_configure_rx_ring - Configure a receive ring after Reset
2421 * @adapter: board private structure
2422 * @ring: receive ring to be configured
2424 * Configure the Rx unit of the MAC after a reset.
2426 static void igb_configure_rx_ring(struct igb_adapter
*adapter
,
2427 struct igb_ring
*ring
)
2429 struct e1000_hw
*hw
= &adapter
->hw
;
2430 u64 rdba
= ring
->dma
;
2431 int reg_idx
= ring
->reg_idx
;
2434 /* disable the queue */
2435 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
2436 wr32(E1000_RXDCTL(reg_idx
),
2437 rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
2439 /* Set DMA base address registers */
2440 wr32(E1000_RDBAL(reg_idx
),
2441 rdba
& 0x00000000ffffffffULL
);
2442 wr32(E1000_RDBAH(reg_idx
), rdba
>> 32);
2443 wr32(E1000_RDLEN(reg_idx
),
2444 ring
->count
* sizeof(union e1000_adv_rx_desc
));
2446 /* initialize head and tail */
2447 ring
->head
= hw
->hw_addr
+ E1000_RDH(reg_idx
);
2448 ring
->tail
= hw
->hw_addr
+ E1000_RDT(reg_idx
);
2449 writel(0, ring
->head
);
2450 writel(0, ring
->tail
);
2452 /* set descriptor configuration */
2453 if (ring
->rx_buffer_len
< IGB_RXBUFFER_1024
) {
2454 srrctl
= ALIGN(ring
->rx_buffer_len
, 64) <<
2455 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
2456 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2457 srrctl
|= IGB_RXBUFFER_16384
>>
2458 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2460 srrctl
|= (PAGE_SIZE
/ 2) >>
2461 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2463 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
2465 srrctl
= ALIGN(ring
->rx_buffer_len
, 1024) >>
2466 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2467 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
2470 wr32(E1000_SRRCTL(reg_idx
), srrctl
);
2472 /* enable receive descriptor fetching */
2473 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
2474 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
2475 rxdctl
&= 0xFFF00000;
2476 rxdctl
|= IGB_RX_PTHRESH
;
2477 rxdctl
|= IGB_RX_HTHRESH
<< 8;
2478 rxdctl
|= IGB_RX_WTHRESH
<< 16;
2479 wr32(E1000_RXDCTL(reg_idx
), rxdctl
);
2483 * igb_configure_rx - Configure receive Unit after Reset
2484 * @adapter: board private structure
2486 * Configure the Rx unit of the MAC after a reset.
2488 static void igb_configure_rx(struct igb_adapter
*adapter
)
2492 /* set UTA to appropriate mode */
2493 igb_set_uta(adapter
);
2495 /* set the correct pool for the PF default MAC address in entry 0 */
2496 igb_rar_set_qsel(adapter
, adapter
->hw
.mac
.addr
, 0,
2497 adapter
->vfs_allocated_count
);
2499 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2500 * the Base and Length of the Rx Descriptor Ring */
2501 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2502 igb_configure_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
2506 * igb_free_tx_resources - Free Tx Resources per Queue
2507 * @tx_ring: Tx descriptor ring for a specific queue
2509 * Free all transmit software resources
2511 void igb_free_tx_resources(struct igb_ring
*tx_ring
)
2513 igb_clean_tx_ring(tx_ring
);
2515 vfree(tx_ring
->buffer_info
);
2516 tx_ring
->buffer_info
= NULL
;
2518 pci_free_consistent(tx_ring
->pdev
, tx_ring
->size
,
2519 tx_ring
->desc
, tx_ring
->dma
);
2521 tx_ring
->desc
= NULL
;
2525 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2526 * @adapter: board private structure
2528 * Free all transmit software resources
2530 static void igb_free_all_tx_resources(struct igb_adapter
*adapter
)
2534 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2535 igb_free_tx_resources(&adapter
->tx_ring
[i
]);
2538 static void igb_unmap_and_free_tx_resource(struct igb_ring
*tx_ring
,
2539 struct igb_buffer
*buffer_info
)
2541 buffer_info
->dma
= 0;
2542 if (buffer_info
->skb
) {
2543 skb_dma_unmap(&tx_ring
->pdev
->dev
,
2546 dev_kfree_skb_any(buffer_info
->skb
);
2547 buffer_info
->skb
= NULL
;
2549 buffer_info
->time_stamp
= 0;
2550 /* buffer_info must be completely set up in the transmit path */
2554 * igb_clean_tx_ring - Free Tx Buffers
2555 * @tx_ring: ring to be cleaned
2557 static void igb_clean_tx_ring(struct igb_ring
*tx_ring
)
2559 struct igb_buffer
*buffer_info
;
2563 if (!tx_ring
->buffer_info
)
2565 /* Free all the Tx ring sk_buffs */
2567 for (i
= 0; i
< tx_ring
->count
; i
++) {
2568 buffer_info
= &tx_ring
->buffer_info
[i
];
2569 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
2572 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
2573 memset(tx_ring
->buffer_info
, 0, size
);
2575 /* Zero out the descriptor ring */
2577 memset(tx_ring
->desc
, 0, tx_ring
->size
);
2579 tx_ring
->next_to_use
= 0;
2580 tx_ring
->next_to_clean
= 0;
2582 writel(0, tx_ring
->head
);
2583 writel(0, tx_ring
->tail
);
2587 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2588 * @adapter: board private structure
2590 static void igb_clean_all_tx_rings(struct igb_adapter
*adapter
)
2594 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2595 igb_clean_tx_ring(&adapter
->tx_ring
[i
]);
2599 * igb_free_rx_resources - Free Rx Resources
2600 * @rx_ring: ring to clean the resources from
2602 * Free all receive software resources
2604 void igb_free_rx_resources(struct igb_ring
*rx_ring
)
2606 igb_clean_rx_ring(rx_ring
);
2608 vfree(rx_ring
->buffer_info
);
2609 rx_ring
->buffer_info
= NULL
;
2611 pci_free_consistent(rx_ring
->pdev
, rx_ring
->size
,
2612 rx_ring
->desc
, rx_ring
->dma
);
2614 rx_ring
->desc
= NULL
;
2618 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2619 * @adapter: board private structure
2621 * Free all receive software resources
2623 static void igb_free_all_rx_resources(struct igb_adapter
*adapter
)
2627 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2628 igb_free_rx_resources(&adapter
->rx_ring
[i
]);
2632 * igb_clean_rx_ring - Free Rx Buffers per Queue
2633 * @rx_ring: ring to free buffers from
2635 static void igb_clean_rx_ring(struct igb_ring
*rx_ring
)
2637 struct igb_buffer
*buffer_info
;
2641 if (!rx_ring
->buffer_info
)
2643 /* Free all the Rx ring sk_buffs */
2644 for (i
= 0; i
< rx_ring
->count
; i
++) {
2645 buffer_info
= &rx_ring
->buffer_info
[i
];
2646 if (buffer_info
->dma
) {
2647 pci_unmap_single(rx_ring
->pdev
,
2649 rx_ring
->rx_buffer_len
,
2650 PCI_DMA_FROMDEVICE
);
2651 buffer_info
->dma
= 0;
2654 if (buffer_info
->skb
) {
2655 dev_kfree_skb(buffer_info
->skb
);
2656 buffer_info
->skb
= NULL
;
2658 if (buffer_info
->page_dma
) {
2659 pci_unmap_page(rx_ring
->pdev
,
2660 buffer_info
->page_dma
,
2662 PCI_DMA_FROMDEVICE
);
2663 buffer_info
->page_dma
= 0;
2665 if (buffer_info
->page
) {
2666 put_page(buffer_info
->page
);
2667 buffer_info
->page
= NULL
;
2668 buffer_info
->page_offset
= 0;
2672 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
2673 memset(rx_ring
->buffer_info
, 0, size
);
2675 /* Zero out the descriptor ring */
2676 memset(rx_ring
->desc
, 0, rx_ring
->size
);
2678 rx_ring
->next_to_clean
= 0;
2679 rx_ring
->next_to_use
= 0;
2681 writel(0, rx_ring
->head
);
2682 writel(0, rx_ring
->tail
);
2686 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2687 * @adapter: board private structure
2689 static void igb_clean_all_rx_rings(struct igb_adapter
*adapter
)
2693 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2694 igb_clean_rx_ring(&adapter
->rx_ring
[i
]);
2698 * igb_set_mac - Change the Ethernet Address of the NIC
2699 * @netdev: network interface device structure
2700 * @p: pointer to an address structure
2702 * Returns 0 on success, negative on failure
2704 static int igb_set_mac(struct net_device
*netdev
, void *p
)
2706 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2707 struct e1000_hw
*hw
= &adapter
->hw
;
2708 struct sockaddr
*addr
= p
;
2710 if (!is_valid_ether_addr(addr
->sa_data
))
2711 return -EADDRNOTAVAIL
;
2713 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2714 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
2716 /* set the correct pool for the new PF MAC address in entry 0 */
2717 igb_rar_set_qsel(adapter
, hw
->mac
.addr
, 0,
2718 adapter
->vfs_allocated_count
);
2724 * igb_write_mc_addr_list - write multicast addresses to MTA
2725 * @netdev: network interface device structure
2727 * Writes multicast address list to the MTA hash table.
2728 * Returns: -ENOMEM on failure
2729 * 0 on no addresses written
2730 * X on writing X addresses to MTA
2732 static int igb_write_mc_addr_list(struct net_device
*netdev
)
2734 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2735 struct e1000_hw
*hw
= &adapter
->hw
;
2736 struct dev_mc_list
*mc_ptr
= netdev
->mc_list
;
2741 if (!netdev
->mc_count
) {
2742 /* nothing to program, so clear mc list */
2743 igb_update_mc_addr_list(hw
, NULL
, 0);
2744 igb_restore_vf_multicasts(adapter
);
2748 mta_list
= kzalloc(netdev
->mc_count
* 6, GFP_ATOMIC
);
2752 /* set vmolr receive overflow multicast bit */
2753 vmolr
|= E1000_VMOLR_ROMPE
;
2755 /* The shared function expects a packed array of only addresses. */
2756 mc_ptr
= netdev
->mc_list
;
2758 for (i
= 0; i
< netdev
->mc_count
; i
++) {
2761 memcpy(mta_list
+ (i
*ETH_ALEN
), mc_ptr
->dmi_addr
, ETH_ALEN
);
2762 mc_ptr
= mc_ptr
->next
;
2764 igb_update_mc_addr_list(hw
, mta_list
, i
);
2767 return netdev
->mc_count
;
2771 * igb_write_uc_addr_list - write unicast addresses to RAR table
2772 * @netdev: network interface device structure
2774 * Writes unicast address list to the RAR table.
2775 * Returns: -ENOMEM on failure/insufficient address space
2776 * 0 on no addresses written
2777 * X on writing X addresses to the RAR table
2779 static int igb_write_uc_addr_list(struct net_device
*netdev
)
2781 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2782 struct e1000_hw
*hw
= &adapter
->hw
;
2783 unsigned int vfn
= adapter
->vfs_allocated_count
;
2784 unsigned int rar_entries
= hw
->mac
.rar_entry_count
- (vfn
+ 1);
2787 /* return ENOMEM indicating insufficient memory for addresses */
2788 if (netdev
->uc
.count
> rar_entries
)
2791 if (netdev
->uc
.count
&& rar_entries
) {
2792 struct netdev_hw_addr
*ha
;
2793 list_for_each_entry(ha
, &netdev
->uc
.list
, list
) {
2796 igb_rar_set_qsel(adapter
, ha
->addr
,
2802 /* write the addresses in reverse order to avoid write combining */
2803 for (; rar_entries
> 0 ; rar_entries
--) {
2804 wr32(E1000_RAH(rar_entries
), 0);
2805 wr32(E1000_RAL(rar_entries
), 0);
2813 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2814 * @netdev: network interface device structure
2816 * The set_rx_mode entry point is called whenever the unicast or multicast
2817 * address lists or the network interface flags are updated. This routine is
2818 * responsible for configuring the hardware for proper unicast, multicast,
2819 * promiscuous mode, and all-multi behavior.
2821 static void igb_set_rx_mode(struct net_device
*netdev
)
2823 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2824 struct e1000_hw
*hw
= &adapter
->hw
;
2825 unsigned int vfn
= adapter
->vfs_allocated_count
;
2826 u32 rctl
, vmolr
= 0;
2829 /* Check for Promiscuous and All Multicast modes */
2830 rctl
= rd32(E1000_RCTL
);
2832 /* clear the effected bits */
2833 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
| E1000_RCTL_VFE
);
2835 if (netdev
->flags
& IFF_PROMISC
) {
2836 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2837 vmolr
|= (E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
);
2839 if (netdev
->flags
& IFF_ALLMULTI
) {
2840 rctl
|= E1000_RCTL_MPE
;
2841 vmolr
|= E1000_VMOLR_MPME
;
2844 * Write addresses to the MTA, if the attempt fails
2845 * then we should just turn on promiscous mode so
2846 * that we can at least receive multicast traffic
2848 count
= igb_write_mc_addr_list(netdev
);
2850 rctl
|= E1000_RCTL_MPE
;
2851 vmolr
|= E1000_VMOLR_MPME
;
2853 vmolr
|= E1000_VMOLR_ROMPE
;
2857 * Write addresses to available RAR registers, if there is not
2858 * sufficient space to store all the addresses then enable
2859 * unicast promiscous mode
2861 count
= igb_write_uc_addr_list(netdev
);
2863 rctl
|= E1000_RCTL_UPE
;
2864 vmolr
|= E1000_VMOLR_ROPE
;
2866 rctl
|= E1000_RCTL_VFE
;
2868 wr32(E1000_RCTL
, rctl
);
2871 * In order to support SR-IOV and eventually VMDq it is necessary to set
2872 * the VMOLR to enable the appropriate modes. Without this workaround
2873 * we will have issues with VLAN tag stripping not being done for frames
2874 * that are only arriving because we are the default pool
2876 if (hw
->mac
.type
< e1000_82576
)
2879 vmolr
|= rd32(E1000_VMOLR(vfn
)) &
2880 ~(E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
| E1000_VMOLR_ROMPE
);
2881 wr32(E1000_VMOLR(vfn
), vmolr
);
2882 igb_restore_vf_multicasts(adapter
);
2885 /* Need to wait a few seconds after link up to get diagnostic information from
2887 static void igb_update_phy_info(unsigned long data
)
2889 struct igb_adapter
*adapter
= (struct igb_adapter
*) data
;
2890 igb_get_phy_info(&adapter
->hw
);
2894 * igb_has_link - check shared code for link and determine up/down
2895 * @adapter: pointer to driver private info
2897 static bool igb_has_link(struct igb_adapter
*adapter
)
2899 struct e1000_hw
*hw
= &adapter
->hw
;
2900 bool link_active
= false;
2903 /* get_link_status is set on LSC (link status) interrupt or
2904 * rx sequence error interrupt. get_link_status will stay
2905 * false until the e1000_check_for_link establishes link
2906 * for copper adapters ONLY
2908 switch (hw
->phy
.media_type
) {
2909 case e1000_media_type_copper
:
2910 if (hw
->mac
.get_link_status
) {
2911 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
2912 link_active
= !hw
->mac
.get_link_status
;
2917 case e1000_media_type_internal_serdes
:
2918 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
2919 link_active
= hw
->mac
.serdes_has_link
;
2922 case e1000_media_type_unknown
:
2930 * igb_watchdog - Timer Call-back
2931 * @data: pointer to adapter cast into an unsigned long
2933 static void igb_watchdog(unsigned long data
)
2935 struct igb_adapter
*adapter
= (struct igb_adapter
*)data
;
2936 /* Do the rest outside of interrupt context */
2937 schedule_work(&adapter
->watchdog_task
);
2940 static void igb_watchdog_task(struct work_struct
*work
)
2942 struct igb_adapter
*adapter
= container_of(work
,
2943 struct igb_adapter
, watchdog_task
);
2944 struct e1000_hw
*hw
= &adapter
->hw
;
2945 struct net_device
*netdev
= adapter
->netdev
;
2946 struct igb_ring
*tx_ring
= adapter
->tx_ring
;
2950 link
= igb_has_link(adapter
);
2951 if ((netif_carrier_ok(netdev
)) && link
)
2955 if (!netif_carrier_ok(netdev
)) {
2957 hw
->mac
.ops
.get_speed_and_duplex(&adapter
->hw
,
2958 &adapter
->link_speed
,
2959 &adapter
->link_duplex
);
2961 ctrl
= rd32(E1000_CTRL
);
2962 /* Links status message must follow this format */
2963 printk(KERN_INFO
"igb: %s NIC Link is Up %d Mbps %s, "
2964 "Flow Control: %s\n",
2966 adapter
->link_speed
,
2967 adapter
->link_duplex
== FULL_DUPLEX
?
2968 "Full Duplex" : "Half Duplex",
2969 ((ctrl
& E1000_CTRL_TFCE
) && (ctrl
&
2970 E1000_CTRL_RFCE
)) ? "RX/TX" : ((ctrl
&
2971 E1000_CTRL_RFCE
) ? "RX" : ((ctrl
&
2972 E1000_CTRL_TFCE
) ? "TX" : "None")));
2974 /* tweak tx_queue_len according to speed/duplex and
2975 * adjust the timeout factor */
2976 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
2977 adapter
->tx_timeout_factor
= 1;
2978 switch (adapter
->link_speed
) {
2980 netdev
->tx_queue_len
= 10;
2981 adapter
->tx_timeout_factor
= 14;
2984 netdev
->tx_queue_len
= 100;
2985 /* maybe add some timeout factor ? */
2989 netif_carrier_on(netdev
);
2991 igb_ping_all_vfs(adapter
);
2993 /* link state has changed, schedule phy info update */
2994 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
2995 mod_timer(&adapter
->phy_info_timer
,
2996 round_jiffies(jiffies
+ 2 * HZ
));
2999 if (netif_carrier_ok(netdev
)) {
3000 adapter
->link_speed
= 0;
3001 adapter
->link_duplex
= 0;
3002 /* Links status message must follow this format */
3003 printk(KERN_INFO
"igb: %s NIC Link is Down\n",
3005 netif_carrier_off(netdev
);
3007 igb_ping_all_vfs(adapter
);
3009 /* link state has changed, schedule phy info update */
3010 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3011 mod_timer(&adapter
->phy_info_timer
,
3012 round_jiffies(jiffies
+ 2 * HZ
));
3017 igb_update_stats(adapter
);
3019 hw
->mac
.tx_packet_delta
= adapter
->stats
.tpt
- adapter
->tpt_old
;
3020 adapter
->tpt_old
= adapter
->stats
.tpt
;
3021 hw
->mac
.collision_delta
= adapter
->stats
.colc
- adapter
->colc_old
;
3022 adapter
->colc_old
= adapter
->stats
.colc
;
3024 adapter
->gorc
= adapter
->stats
.gorc
- adapter
->gorc_old
;
3025 adapter
->gorc_old
= adapter
->stats
.gorc
;
3026 adapter
->gotc
= adapter
->stats
.gotc
- adapter
->gotc_old
;
3027 adapter
->gotc_old
= adapter
->stats
.gotc
;
3029 igb_update_adaptive(&adapter
->hw
);
3031 if (!netif_carrier_ok(netdev
)) {
3032 if (igb_desc_unused(tx_ring
) + 1 < tx_ring
->count
) {
3033 /* We've lost link, so the controller stops DMA,
3034 * but we've got queued Tx work that's never going
3035 * to get done, so reset controller to flush Tx.
3036 * (Do the reset outside of interrupt context). */
3037 adapter
->tx_timeout_count
++;
3038 schedule_work(&adapter
->reset_task
);
3039 /* return immediately since reset is imminent */
3044 /* Cause software interrupt to ensure rx ring is cleaned */
3045 if (adapter
->msix_entries
) {
3047 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
3048 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
3049 eics
|= q_vector
->eims_value
;
3051 wr32(E1000_EICS
, eics
);
3053 wr32(E1000_ICS
, E1000_ICS_RXDMT0
);
3056 /* Force detection of hung controller every watchdog period */
3057 tx_ring
->detect_tx_hung
= true;
3059 /* Reset the timer */
3060 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3061 mod_timer(&adapter
->watchdog_timer
,
3062 round_jiffies(jiffies
+ 2 * HZ
));
3065 enum latency_range
{
3069 latency_invalid
= 255
3074 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3076 * Stores a new ITR value based on strictly on packet size. This
3077 * algorithm is less sophisticated than that used in igb_update_itr,
3078 * due to the difficulty of synchronizing statistics across multiple
3079 * receive rings. The divisors and thresholds used by this fuction
3080 * were determined based on theoretical maximum wire speed and testing
3081 * data, in order to minimize response time while increasing bulk
3083 * This functionality is controlled by the InterruptThrottleRate module
3084 * parameter (see igb_param.c)
3085 * NOTE: This function is called only when operating in a multiqueue
3086 * receive environment.
3087 * @q_vector: pointer to q_vector
3089 static void igb_update_ring_itr(struct igb_q_vector
*q_vector
)
3091 int new_val
= q_vector
->itr_val
;
3092 int avg_wire_size
= 0;
3093 struct igb_adapter
*adapter
= q_vector
->adapter
;
3095 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3096 * ints/sec - ITR timer value of 120 ticks.
3098 if (adapter
->link_speed
!= SPEED_1000
) {
3103 if (q_vector
->rx_ring
&& q_vector
->rx_ring
->total_packets
) {
3104 struct igb_ring
*ring
= q_vector
->rx_ring
;
3105 avg_wire_size
= ring
->total_bytes
/ ring
->total_packets
;
3108 if (q_vector
->tx_ring
&& q_vector
->tx_ring
->total_packets
) {
3109 struct igb_ring
*ring
= q_vector
->tx_ring
;
3110 avg_wire_size
= max_t(u32
, avg_wire_size
,
3111 (ring
->total_bytes
/
3112 ring
->total_packets
));
3115 /* if avg_wire_size isn't set no work was done */
3119 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3120 avg_wire_size
+= 24;
3122 /* Don't starve jumbo frames */
3123 avg_wire_size
= min(avg_wire_size
, 3000);
3125 /* Give a little boost to mid-size frames */
3126 if ((avg_wire_size
> 300) && (avg_wire_size
< 1200))
3127 new_val
= avg_wire_size
/ 3;
3129 new_val
= avg_wire_size
/ 2;
3132 if (new_val
!= q_vector
->itr_val
) {
3133 q_vector
->itr_val
= new_val
;
3134 q_vector
->set_itr
= 1;
3137 if (q_vector
->rx_ring
) {
3138 q_vector
->rx_ring
->total_bytes
= 0;
3139 q_vector
->rx_ring
->total_packets
= 0;
3141 if (q_vector
->tx_ring
) {
3142 q_vector
->tx_ring
->total_bytes
= 0;
3143 q_vector
->tx_ring
->total_packets
= 0;
3148 * igb_update_itr - update the dynamic ITR value based on statistics
3149 * Stores a new ITR value based on packets and byte
3150 * counts during the last interrupt. The advantage of per interrupt
3151 * computation is faster updates and more accurate ITR for the current
3152 * traffic pattern. Constants in this function were computed
3153 * based on theoretical maximum wire speed and thresholds were set based
3154 * on testing data as well as attempting to minimize response time
3155 * while increasing bulk throughput.
3156 * this functionality is controlled by the InterruptThrottleRate module
3157 * parameter (see igb_param.c)
3158 * NOTE: These calculations are only valid when operating in a single-
3159 * queue environment.
3160 * @adapter: pointer to adapter
3161 * @itr_setting: current q_vector->itr_val
3162 * @packets: the number of packets during this measurement interval
3163 * @bytes: the number of bytes during this measurement interval
3165 static unsigned int igb_update_itr(struct igb_adapter
*adapter
, u16 itr_setting
,
3166 int packets
, int bytes
)
3168 unsigned int retval
= itr_setting
;
3171 goto update_itr_done
;
3173 switch (itr_setting
) {
3174 case lowest_latency
:
3175 /* handle TSO and jumbo frames */
3176 if (bytes
/packets
> 8000)
3177 retval
= bulk_latency
;
3178 else if ((packets
< 5) && (bytes
> 512))
3179 retval
= low_latency
;
3181 case low_latency
: /* 50 usec aka 20000 ints/s */
3182 if (bytes
> 10000) {
3183 /* this if handles the TSO accounting */
3184 if (bytes
/packets
> 8000) {
3185 retval
= bulk_latency
;
3186 } else if ((packets
< 10) || ((bytes
/packets
) > 1200)) {
3187 retval
= bulk_latency
;
3188 } else if ((packets
> 35)) {
3189 retval
= lowest_latency
;
3191 } else if (bytes
/packets
> 2000) {
3192 retval
= bulk_latency
;
3193 } else if (packets
<= 2 && bytes
< 512) {
3194 retval
= lowest_latency
;
3197 case bulk_latency
: /* 250 usec aka 4000 ints/s */
3198 if (bytes
> 25000) {
3200 retval
= low_latency
;
3201 } else if (bytes
< 1500) {
3202 retval
= low_latency
;
3211 static void igb_set_itr(struct igb_adapter
*adapter
)
3213 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
3215 u32 new_itr
= q_vector
->itr_val
;
3217 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3218 if (adapter
->link_speed
!= SPEED_1000
) {
3224 adapter
->rx_itr
= igb_update_itr(adapter
,
3226 adapter
->rx_ring
->total_packets
,
3227 adapter
->rx_ring
->total_bytes
);
3229 adapter
->tx_itr
= igb_update_itr(adapter
,
3231 adapter
->tx_ring
->total_packets
,
3232 adapter
->tx_ring
->total_bytes
);
3233 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
3235 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3236 if (adapter
->itr_setting
== 3 && current_itr
== lowest_latency
)
3237 current_itr
= low_latency
;
3239 switch (current_itr
) {
3240 /* counts and packets in update_itr are dependent on these numbers */
3241 case lowest_latency
:
3242 new_itr
= 56; /* aka 70,000 ints/sec */
3245 new_itr
= 196; /* aka 20,000 ints/sec */
3248 new_itr
= 980; /* aka 4,000 ints/sec */
3255 adapter
->rx_ring
->total_bytes
= 0;
3256 adapter
->rx_ring
->total_packets
= 0;
3257 adapter
->tx_ring
->total_bytes
= 0;
3258 adapter
->tx_ring
->total_packets
= 0;
3260 if (new_itr
!= q_vector
->itr_val
) {
3261 /* this attempts to bias the interrupt rate towards Bulk
3262 * by adding intermediate steps when interrupt rate is
3264 new_itr
= new_itr
> q_vector
->itr_val
?
3265 max((new_itr
* q_vector
->itr_val
) /
3266 (new_itr
+ (q_vector
->itr_val
>> 2)),
3269 /* Don't write the value here; it resets the adapter's
3270 * internal timer, and causes us to delay far longer than
3271 * we should between interrupts. Instead, we write the ITR
3272 * value at the beginning of the next interrupt so the timing
3273 * ends up being correct.
3275 q_vector
->itr_val
= new_itr
;
3276 q_vector
->set_itr
= 1;
3282 #define IGB_TX_FLAGS_CSUM 0x00000001
3283 #define IGB_TX_FLAGS_VLAN 0x00000002
3284 #define IGB_TX_FLAGS_TSO 0x00000004
3285 #define IGB_TX_FLAGS_IPV4 0x00000008
3286 #define IGB_TX_FLAGS_TSTAMP 0x00000010
3287 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3288 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3290 static inline int igb_tso_adv(struct igb_ring
*tx_ring
,
3291 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
3293 struct e1000_adv_tx_context_desc
*context_desc
;
3296 struct igb_buffer
*buffer_info
;
3297 u32 info
= 0, tu_cmd
= 0;
3298 u32 mss_l4len_idx
, l4len
;
3301 if (skb_header_cloned(skb
)) {
3302 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3307 l4len
= tcp_hdrlen(skb
);
3310 if (skb
->protocol
== htons(ETH_P_IP
)) {
3311 struct iphdr
*iph
= ip_hdr(skb
);
3314 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
3318 } else if (skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
) {
3319 ipv6_hdr(skb
)->payload_len
= 0;
3320 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3321 &ipv6_hdr(skb
)->daddr
,
3325 i
= tx_ring
->next_to_use
;
3327 buffer_info
= &tx_ring
->buffer_info
[i
];
3328 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
3329 /* VLAN MACLEN IPLEN */
3330 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3331 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
3332 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
3333 *hdr_len
+= skb_network_offset(skb
);
3334 info
|= skb_network_header_len(skb
);
3335 *hdr_len
+= skb_network_header_len(skb
);
3336 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
3338 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3339 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
3341 if (skb
->protocol
== htons(ETH_P_IP
))
3342 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
3343 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3345 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
3348 mss_l4len_idx
= (skb_shinfo(skb
)->gso_size
<< E1000_ADVTXD_MSS_SHIFT
);
3349 mss_l4len_idx
|= (l4len
<< E1000_ADVTXD_L4LEN_SHIFT
);
3351 /* For 82575, context index must be unique per ring. */
3352 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
3353 mss_l4len_idx
|= tx_ring
->reg_idx
<< 4;
3355 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3356 context_desc
->seqnum_seed
= 0;
3358 buffer_info
->time_stamp
= jiffies
;
3359 buffer_info
->next_to_watch
= i
;
3360 buffer_info
->dma
= 0;
3362 if (i
== tx_ring
->count
)
3365 tx_ring
->next_to_use
= i
;
3370 static inline bool igb_tx_csum_adv(struct igb_ring
*tx_ring
,
3371 struct sk_buff
*skb
, u32 tx_flags
)
3373 struct e1000_adv_tx_context_desc
*context_desc
;
3374 struct pci_dev
*pdev
= tx_ring
->pdev
;
3375 struct igb_buffer
*buffer_info
;
3376 u32 info
= 0, tu_cmd
= 0;
3379 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3380 (tx_flags
& IGB_TX_FLAGS_VLAN
)) {
3381 i
= tx_ring
->next_to_use
;
3382 buffer_info
= &tx_ring
->buffer_info
[i
];
3383 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
3385 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3386 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
3387 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
3388 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3389 info
|= skb_network_header_len(skb
);
3391 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
3393 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
3395 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3398 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
3399 const struct vlan_ethhdr
*vhdr
=
3400 (const struct vlan_ethhdr
*)skb
->data
;
3402 protocol
= vhdr
->h_vlan_encapsulated_proto
;
3404 protocol
= skb
->protocol
;
3408 case cpu_to_be16(ETH_P_IP
):
3409 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
3410 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3411 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3412 else if (ip_hdr(skb
)->protocol
== IPPROTO_SCTP
)
3413 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
3415 case cpu_to_be16(ETH_P_IPV6
):
3416 /* XXX what about other V6 headers?? */
3417 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3418 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3419 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_SCTP
)
3420 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
3423 if (unlikely(net_ratelimit()))
3424 dev_warn(&pdev
->dev
,
3425 "partial checksum but proto=%x!\n",
3431 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
3432 context_desc
->seqnum_seed
= 0;
3433 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
3434 context_desc
->mss_l4len_idx
=
3435 cpu_to_le32(tx_ring
->reg_idx
<< 4);
3437 buffer_info
->time_stamp
= jiffies
;
3438 buffer_info
->next_to_watch
= i
;
3439 buffer_info
->dma
= 0;
3442 if (i
== tx_ring
->count
)
3444 tx_ring
->next_to_use
= i
;
3451 #define IGB_MAX_TXD_PWR 16
3452 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3454 static inline int igb_tx_map_adv(struct igb_ring
*tx_ring
, struct sk_buff
*skb
,
3457 struct igb_buffer
*buffer_info
;
3458 struct pci_dev
*pdev
= tx_ring
->pdev
;
3459 unsigned int len
= skb_headlen(skb
);
3460 unsigned int count
= 0, i
;
3464 i
= tx_ring
->next_to_use
;
3466 if (skb_dma_map(&pdev
->dev
, skb
, DMA_TO_DEVICE
)) {
3467 dev_err(&pdev
->dev
, "TX DMA map failed\n");
3471 map
= skb_shinfo(skb
)->dma_maps
;
3473 buffer_info
= &tx_ring
->buffer_info
[i
];
3474 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
3475 buffer_info
->length
= len
;
3476 /* set time_stamp *before* dma to help avoid a possible race */
3477 buffer_info
->time_stamp
= jiffies
;
3478 buffer_info
->next_to_watch
= i
;
3479 buffer_info
->dma
= skb_shinfo(skb
)->dma_head
;
3481 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
3482 struct skb_frag_struct
*frag
;
3485 if (i
== tx_ring
->count
)
3488 frag
= &skb_shinfo(skb
)->frags
[f
];
3491 buffer_info
= &tx_ring
->buffer_info
[i
];
3492 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
3493 buffer_info
->length
= len
;
3494 buffer_info
->time_stamp
= jiffies
;
3495 buffer_info
->next_to_watch
= i
;
3496 buffer_info
->dma
= map
[count
];
3500 tx_ring
->buffer_info
[i
].skb
= skb
;
3501 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
3506 static inline void igb_tx_queue_adv(struct igb_ring
*tx_ring
,
3507 int tx_flags
, int count
, u32 paylen
,
3510 union e1000_adv_tx_desc
*tx_desc
= NULL
;
3511 struct igb_buffer
*buffer_info
;
3512 u32 olinfo_status
= 0, cmd_type_len
;
3515 cmd_type_len
= (E1000_ADVTXD_DTYP_DATA
| E1000_ADVTXD_DCMD_IFCS
|
3516 E1000_ADVTXD_DCMD_DEXT
);
3518 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3519 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
3521 if (tx_flags
& IGB_TX_FLAGS_TSTAMP
)
3522 cmd_type_len
|= E1000_ADVTXD_MAC_TSTAMP
;
3524 if (tx_flags
& IGB_TX_FLAGS_TSO
) {
3525 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
3527 /* insert tcp checksum */
3528 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
3530 /* insert ip checksum */
3531 if (tx_flags
& IGB_TX_FLAGS_IPV4
)
3532 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
3534 } else if (tx_flags
& IGB_TX_FLAGS_CSUM
) {
3535 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
3538 if ((tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
) &&
3539 (tx_flags
& (IGB_TX_FLAGS_CSUM
|
3541 IGB_TX_FLAGS_VLAN
)))
3542 olinfo_status
|= tx_ring
->reg_idx
<< 4;
3544 olinfo_status
|= ((paylen
- hdr_len
) << E1000_ADVTXD_PAYLEN_SHIFT
);
3546 i
= tx_ring
->next_to_use
;
3548 buffer_info
= &tx_ring
->buffer_info
[i
];
3549 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
3550 tx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
3551 tx_desc
->read
.cmd_type_len
=
3552 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
3553 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3555 if (i
== tx_ring
->count
)
3559 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(IGB_ADVTXD_DCMD
);
3560 /* Force memory writes to complete before letting h/w
3561 * know there are new descriptors to fetch. (Only
3562 * applicable for weak-ordered memory model archs,
3563 * such as IA-64). */
3566 tx_ring
->next_to_use
= i
;
3567 writel(i
, tx_ring
->tail
);
3568 /* we need this if more than one processor can write to our tail
3569 * at a time, it syncronizes IO on IA64/Altix systems */
3573 static int __igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
3575 struct net_device
*netdev
= tx_ring
->netdev
;
3577 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3579 /* Herbert's original patch had:
3580 * smp_mb__after_netif_stop_queue();
3581 * but since that doesn't exist yet, just open code it. */
3584 /* We need to check again in a case another CPU has just
3585 * made room available. */
3586 if (igb_desc_unused(tx_ring
) < size
)
3590 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
3591 tx_ring
->tx_stats
.restart_queue
++;
3595 static int igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
3597 if (igb_desc_unused(tx_ring
) >= size
)
3599 return __igb_maybe_stop_tx(tx_ring
, size
);
3602 static netdev_tx_t
igb_xmit_frame_ring_adv(struct sk_buff
*skb
,
3603 struct igb_ring
*tx_ring
)
3605 struct igb_adapter
*adapter
= netdev_priv(tx_ring
->netdev
);
3607 unsigned int tx_flags
= 0;
3611 union skb_shared_tx
*shtx
;
3613 if (test_bit(__IGB_DOWN
, &adapter
->state
)) {
3614 dev_kfree_skb_any(skb
);
3615 return NETDEV_TX_OK
;
3618 if (skb
->len
<= 0) {
3619 dev_kfree_skb_any(skb
);
3620 return NETDEV_TX_OK
;
3623 /* need: 1 descriptor per page,
3624 * + 2 desc gap to keep tail from touching head,
3625 * + 1 desc for skb->data,
3626 * + 1 desc for context descriptor,
3627 * otherwise try next time */
3628 if (igb_maybe_stop_tx(tx_ring
, skb_shinfo(skb
)->nr_frags
+ 4)) {
3629 /* this is a hard error */
3630 return NETDEV_TX_BUSY
;
3634 * TODO: check that there currently is no other packet with
3635 * time stamping in the queue
3637 * When doing time stamping, keep the connection to the socket
3638 * a while longer: it is still needed by skb_hwtstamp_tx(),
3639 * called either in igb_tx_hwtstamp() or by our caller when
3640 * doing software time stamping.
3643 if (unlikely(shtx
->hardware
)) {
3644 shtx
->in_progress
= 1;
3645 tx_flags
|= IGB_TX_FLAGS_TSTAMP
;
3648 if (adapter
->vlgrp
&& vlan_tx_tag_present(skb
)) {
3649 tx_flags
|= IGB_TX_FLAGS_VLAN
;
3650 tx_flags
|= (vlan_tx_tag_get(skb
) << IGB_TX_FLAGS_VLAN_SHIFT
);
3653 if (skb
->protocol
== htons(ETH_P_IP
))
3654 tx_flags
|= IGB_TX_FLAGS_IPV4
;
3656 first
= tx_ring
->next_to_use
;
3657 if (skb_is_gso(skb
)) {
3658 tso
= igb_tso_adv(tx_ring
, skb
, tx_flags
, &hdr_len
);
3660 dev_kfree_skb_any(skb
);
3661 return NETDEV_TX_OK
;
3666 tx_flags
|= IGB_TX_FLAGS_TSO
;
3667 else if (igb_tx_csum_adv(tx_ring
, skb
, tx_flags
) &&
3668 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3669 tx_flags
|= IGB_TX_FLAGS_CSUM
;
3672 * count reflects descriptors mapped, if 0 then mapping error
3673 * has occured and we need to rewind the descriptor queue
3675 count
= igb_tx_map_adv(tx_ring
, skb
, first
);
3678 dev_kfree_skb_any(skb
);
3679 tx_ring
->buffer_info
[first
].time_stamp
= 0;
3680 tx_ring
->next_to_use
= first
;
3681 return NETDEV_TX_OK
;
3684 igb_tx_queue_adv(tx_ring
, tx_flags
, count
, skb
->len
, hdr_len
);
3686 /* Make sure there is space in the ring for the next send. */
3687 igb_maybe_stop_tx(tx_ring
, MAX_SKB_FRAGS
+ 4);
3689 return NETDEV_TX_OK
;
3692 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
,
3693 struct net_device
*netdev
)
3695 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3696 struct igb_ring
*tx_ring
;
3699 r_idx
= skb
->queue_mapping
& (IGB_ABS_MAX_TX_QUEUES
- 1);
3700 tx_ring
= adapter
->multi_tx_table
[r_idx
];
3702 /* This goes back to the question of how to logically map a tx queue
3703 * to a flow. Right now, performance is impacted slightly negatively
3704 * if using multiple tx queues. If the stack breaks away from a
3705 * single qdisc implementation, we can look at this again. */
3706 return igb_xmit_frame_ring_adv(skb
, tx_ring
);
3710 * igb_tx_timeout - Respond to a Tx Hang
3711 * @netdev: network interface device structure
3713 static void igb_tx_timeout(struct net_device
*netdev
)
3715 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3716 struct e1000_hw
*hw
= &adapter
->hw
;
3718 /* Do the reset outside of interrupt context */
3719 adapter
->tx_timeout_count
++;
3720 schedule_work(&adapter
->reset_task
);
3722 (adapter
->eims_enable_mask
& ~adapter
->eims_other
));
3725 static void igb_reset_task(struct work_struct
*work
)
3727 struct igb_adapter
*adapter
;
3728 adapter
= container_of(work
, struct igb_adapter
, reset_task
);
3730 igb_reinit_locked(adapter
);
3734 * igb_get_stats - Get System Network Statistics
3735 * @netdev: network interface device structure
3737 * Returns the address of the device statistics structure.
3738 * The statistics are actually updated from the timer callback.
3740 static struct net_device_stats
*igb_get_stats(struct net_device
*netdev
)
3742 /* only return the current stats */
3743 return &netdev
->stats
;
3747 * igb_change_mtu - Change the Maximum Transfer Unit
3748 * @netdev: network interface device structure
3749 * @new_mtu: new value for maximum frame size
3751 * Returns 0 on success, negative on failure
3753 static int igb_change_mtu(struct net_device
*netdev
, int new_mtu
)
3755 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3756 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3757 u32 rx_buffer_len
, i
;
3759 if ((max_frame
< ETH_ZLEN
+ ETH_FCS_LEN
) ||
3760 (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
3761 dev_err(&adapter
->pdev
->dev
, "Invalid MTU setting\n");
3765 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
3766 dev_err(&adapter
->pdev
->dev
, "MTU > 9216 not supported.\n");
3770 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
3773 /* igb_down has a dependency on max_frame_size */
3774 adapter
->max_frame_size
= max_frame
;
3775 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3776 * means we reserve 2 more, this pushes us to allocate from the next
3778 * i.e. RXBUFFER_2048 --> size-4096 slab
3781 if (max_frame
<= IGB_RXBUFFER_1024
)
3782 rx_buffer_len
= IGB_RXBUFFER_1024
;
3783 else if (max_frame
<= MAXIMUM_ETHERNET_VLAN_SIZE
)
3784 rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3786 rx_buffer_len
= IGB_RXBUFFER_128
;
3788 if (netif_running(netdev
))
3791 dev_info(&adapter
->pdev
->dev
, "changing MTU from %d to %d\n",
3792 netdev
->mtu
, new_mtu
);
3793 netdev
->mtu
= new_mtu
;
3795 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3796 adapter
->rx_ring
[i
].rx_buffer_len
= rx_buffer_len
;
3798 if (netif_running(netdev
))
3803 clear_bit(__IGB_RESETTING
, &adapter
->state
);
3809 * igb_update_stats - Update the board statistics counters
3810 * @adapter: board private structure
3813 void igb_update_stats(struct igb_adapter
*adapter
)
3815 struct net_device
*netdev
= adapter
->netdev
;
3816 struct e1000_hw
*hw
= &adapter
->hw
;
3817 struct pci_dev
*pdev
= adapter
->pdev
;
3820 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3823 * Prevent stats update while adapter is being reset, or if the pci
3824 * connection is down.
3826 if (adapter
->link_speed
== 0)
3828 if (pci_channel_offline(pdev
))
3831 adapter
->stats
.crcerrs
+= rd32(E1000_CRCERRS
);
3832 adapter
->stats
.gprc
+= rd32(E1000_GPRC
);
3833 adapter
->stats
.gorc
+= rd32(E1000_GORCL
);
3834 rd32(E1000_GORCH
); /* clear GORCL */
3835 adapter
->stats
.bprc
+= rd32(E1000_BPRC
);
3836 adapter
->stats
.mprc
+= rd32(E1000_MPRC
);
3837 adapter
->stats
.roc
+= rd32(E1000_ROC
);
3839 adapter
->stats
.prc64
+= rd32(E1000_PRC64
);
3840 adapter
->stats
.prc127
+= rd32(E1000_PRC127
);
3841 adapter
->stats
.prc255
+= rd32(E1000_PRC255
);
3842 adapter
->stats
.prc511
+= rd32(E1000_PRC511
);
3843 adapter
->stats
.prc1023
+= rd32(E1000_PRC1023
);
3844 adapter
->stats
.prc1522
+= rd32(E1000_PRC1522
);
3845 adapter
->stats
.symerrs
+= rd32(E1000_SYMERRS
);
3846 adapter
->stats
.sec
+= rd32(E1000_SEC
);
3848 adapter
->stats
.mpc
+= rd32(E1000_MPC
);
3849 adapter
->stats
.scc
+= rd32(E1000_SCC
);
3850 adapter
->stats
.ecol
+= rd32(E1000_ECOL
);
3851 adapter
->stats
.mcc
+= rd32(E1000_MCC
);
3852 adapter
->stats
.latecol
+= rd32(E1000_LATECOL
);
3853 adapter
->stats
.dc
+= rd32(E1000_DC
);
3854 adapter
->stats
.rlec
+= rd32(E1000_RLEC
);
3855 adapter
->stats
.xonrxc
+= rd32(E1000_XONRXC
);
3856 adapter
->stats
.xontxc
+= rd32(E1000_XONTXC
);
3857 adapter
->stats
.xoffrxc
+= rd32(E1000_XOFFRXC
);
3858 adapter
->stats
.xofftxc
+= rd32(E1000_XOFFTXC
);
3859 adapter
->stats
.fcruc
+= rd32(E1000_FCRUC
);
3860 adapter
->stats
.gptc
+= rd32(E1000_GPTC
);
3861 adapter
->stats
.gotc
+= rd32(E1000_GOTCL
);
3862 rd32(E1000_GOTCH
); /* clear GOTCL */
3863 adapter
->stats
.rnbc
+= rd32(E1000_RNBC
);
3864 adapter
->stats
.ruc
+= rd32(E1000_RUC
);
3865 adapter
->stats
.rfc
+= rd32(E1000_RFC
);
3866 adapter
->stats
.rjc
+= rd32(E1000_RJC
);
3867 adapter
->stats
.tor
+= rd32(E1000_TORH
);
3868 adapter
->stats
.tot
+= rd32(E1000_TOTH
);
3869 adapter
->stats
.tpr
+= rd32(E1000_TPR
);
3871 adapter
->stats
.ptc64
+= rd32(E1000_PTC64
);
3872 adapter
->stats
.ptc127
+= rd32(E1000_PTC127
);
3873 adapter
->stats
.ptc255
+= rd32(E1000_PTC255
);
3874 adapter
->stats
.ptc511
+= rd32(E1000_PTC511
);
3875 adapter
->stats
.ptc1023
+= rd32(E1000_PTC1023
);
3876 adapter
->stats
.ptc1522
+= rd32(E1000_PTC1522
);
3878 adapter
->stats
.mptc
+= rd32(E1000_MPTC
);
3879 adapter
->stats
.bptc
+= rd32(E1000_BPTC
);
3881 /* used for adaptive IFS */
3883 hw
->mac
.tx_packet_delta
= rd32(E1000_TPT
);
3884 adapter
->stats
.tpt
+= hw
->mac
.tx_packet_delta
;
3885 hw
->mac
.collision_delta
= rd32(E1000_COLC
);
3886 adapter
->stats
.colc
+= hw
->mac
.collision_delta
;
3888 adapter
->stats
.algnerrc
+= rd32(E1000_ALGNERRC
);
3889 adapter
->stats
.rxerrc
+= rd32(E1000_RXERRC
);
3890 adapter
->stats
.tncrs
+= rd32(E1000_TNCRS
);
3891 adapter
->stats
.tsctc
+= rd32(E1000_TSCTC
);
3892 adapter
->stats
.tsctfc
+= rd32(E1000_TSCTFC
);
3894 adapter
->stats
.iac
+= rd32(E1000_IAC
);
3895 adapter
->stats
.icrxoc
+= rd32(E1000_ICRXOC
);
3896 adapter
->stats
.icrxptc
+= rd32(E1000_ICRXPTC
);
3897 adapter
->stats
.icrxatc
+= rd32(E1000_ICRXATC
);
3898 adapter
->stats
.ictxptc
+= rd32(E1000_ICTXPTC
);
3899 adapter
->stats
.ictxatc
+= rd32(E1000_ICTXATC
);
3900 adapter
->stats
.ictxqec
+= rd32(E1000_ICTXQEC
);
3901 adapter
->stats
.ictxqmtc
+= rd32(E1000_ICTXQMTC
);
3902 adapter
->stats
.icrxdmtc
+= rd32(E1000_ICRXDMTC
);
3904 /* Fill out the OS statistics structure */
3905 netdev
->stats
.multicast
= adapter
->stats
.mprc
;
3906 netdev
->stats
.collisions
= adapter
->stats
.colc
;
3910 if (hw
->mac
.type
!= e1000_82575
) {
3912 u64 rqdpc_total
= 0;
3914 /* Read out drops stats per RX queue. Notice RQDPC (Receive
3915 * Queue Drop Packet Count) stats only gets incremented, if
3916 * the DROP_EN but it set (in the SRRCTL register for that
3917 * queue). If DROP_EN bit is NOT set, then the some what
3918 * equivalent count is stored in RNBC (not per queue basis).
3919 * Also note the drop count is due to lack of available
3922 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3923 rqdpc_tmp
= rd32(E1000_RQDPC(i
)) & 0xFFF;
3924 adapter
->rx_ring
[i
].rx_stats
.drops
+= rqdpc_tmp
;
3925 rqdpc_total
+= adapter
->rx_ring
[i
].rx_stats
.drops
;
3927 netdev
->stats
.rx_fifo_errors
= rqdpc_total
;
3930 /* Note RNBC (Receive No Buffers Count) is an not an exact
3931 * drop count as the hardware FIFO might save the day. Thats
3932 * one of the reason for saving it in rx_fifo_errors, as its
3933 * potentially not a true drop.
3935 netdev
->stats
.rx_fifo_errors
+= adapter
->stats
.rnbc
;
3937 /* RLEC on some newer hardware can be incorrect so build
3938 * our own version based on RUC and ROC */
3939 netdev
->stats
.rx_errors
= adapter
->stats
.rxerrc
+
3940 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
3941 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
3942 adapter
->stats
.cexterr
;
3943 netdev
->stats
.rx_length_errors
= adapter
->stats
.ruc
+
3945 netdev
->stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
3946 netdev
->stats
.rx_frame_errors
= adapter
->stats
.algnerrc
;
3947 netdev
->stats
.rx_missed_errors
= adapter
->stats
.mpc
;
3950 netdev
->stats
.tx_errors
= adapter
->stats
.ecol
+
3951 adapter
->stats
.latecol
;
3952 netdev
->stats
.tx_aborted_errors
= adapter
->stats
.ecol
;
3953 netdev
->stats
.tx_window_errors
= adapter
->stats
.latecol
;
3954 netdev
->stats
.tx_carrier_errors
= adapter
->stats
.tncrs
;
3956 /* Tx Dropped needs to be maintained elsewhere */
3959 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
3960 if ((adapter
->link_speed
== SPEED_1000
) &&
3961 (!igb_read_phy_reg(hw
, PHY_1000T_STATUS
, &phy_tmp
))) {
3962 phy_tmp
&= PHY_IDLE_ERROR_COUNT_MASK
;
3963 adapter
->phy_stats
.idle_errors
+= phy_tmp
;
3967 /* Management Stats */
3968 adapter
->stats
.mgptc
+= rd32(E1000_MGTPTC
);
3969 adapter
->stats
.mgprc
+= rd32(E1000_MGTPRC
);
3970 adapter
->stats
.mgpdc
+= rd32(E1000_MGTPDC
);
3973 static irqreturn_t
igb_msix_other(int irq
, void *data
)
3975 struct igb_adapter
*adapter
= data
;
3976 struct e1000_hw
*hw
= &adapter
->hw
;
3977 u32 icr
= rd32(E1000_ICR
);
3978 /* reading ICR causes bit 31 of EICR to be cleared */
3980 if (icr
& E1000_ICR_DOUTSYNC
) {
3981 /* HW is reporting DMA is out of sync */
3982 adapter
->stats
.doosync
++;
3985 /* Check for a mailbox event */
3986 if (icr
& E1000_ICR_VMMB
)
3987 igb_msg_task(adapter
);
3989 if (icr
& E1000_ICR_LSC
) {
3990 hw
->mac
.get_link_status
= 1;
3991 /* guard against interrupt when we're going down */
3992 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3993 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
3996 wr32(E1000_IMS
, E1000_IMS_LSC
| E1000_IMS_DOUTSYNC
| E1000_IMS_VMMB
);
3997 wr32(E1000_EIMS
, adapter
->eims_other
);
4002 static void igb_write_itr(struct igb_q_vector
*q_vector
)
4004 u32 itr_val
= q_vector
->itr_val
& 0x7FFC;
4006 if (!q_vector
->set_itr
)
4012 if (q_vector
->itr_shift
)
4013 itr_val
|= itr_val
<< q_vector
->itr_shift
;
4015 itr_val
|= 0x8000000;
4017 writel(itr_val
, q_vector
->itr_register
);
4018 q_vector
->set_itr
= 0;
4021 static irqreturn_t
igb_msix_ring(int irq
, void *data
)
4023 struct igb_q_vector
*q_vector
= data
;
4025 /* Write the ITR value calculated from the previous interrupt. */
4026 igb_write_itr(q_vector
);
4028 napi_schedule(&q_vector
->napi
);
4033 #ifdef CONFIG_IGB_DCA
4034 static void igb_update_dca(struct igb_q_vector
*q_vector
)
4036 struct igb_adapter
*adapter
= q_vector
->adapter
;
4037 struct e1000_hw
*hw
= &adapter
->hw
;
4038 int cpu
= get_cpu();
4040 if (q_vector
->cpu
== cpu
)
4043 if (q_vector
->tx_ring
) {
4044 int q
= q_vector
->tx_ring
->reg_idx
;
4045 u32 dca_txctrl
= rd32(E1000_DCA_TXCTRL(q
));
4046 if (hw
->mac
.type
== e1000_82575
) {
4047 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK
;
4048 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4050 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK_82576
;
4051 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4052 E1000_DCA_TXCTRL_CPUID_SHIFT
;
4054 dca_txctrl
|= E1000_DCA_TXCTRL_DESC_DCA_EN
;
4055 wr32(E1000_DCA_TXCTRL(q
), dca_txctrl
);
4057 if (q_vector
->rx_ring
) {
4058 int q
= q_vector
->rx_ring
->reg_idx
;
4059 u32 dca_rxctrl
= rd32(E1000_DCA_RXCTRL(q
));
4060 if (hw
->mac
.type
== e1000_82575
) {
4061 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK
;
4062 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4064 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK_82576
;
4065 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4066 E1000_DCA_RXCTRL_CPUID_SHIFT
;
4068 dca_rxctrl
|= E1000_DCA_RXCTRL_DESC_DCA_EN
;
4069 dca_rxctrl
|= E1000_DCA_RXCTRL_HEAD_DCA_EN
;
4070 dca_rxctrl
|= E1000_DCA_RXCTRL_DATA_DCA_EN
;
4071 wr32(E1000_DCA_RXCTRL(q
), dca_rxctrl
);
4073 q_vector
->cpu
= cpu
;
4078 static void igb_setup_dca(struct igb_adapter
*adapter
)
4080 struct e1000_hw
*hw
= &adapter
->hw
;
4083 if (!(adapter
->flags
& IGB_FLAG_DCA_ENABLED
))
4086 /* Always use CB2 mode, difference is masked in the CB driver. */
4087 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_CB2
);
4089 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
4090 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
4092 igb_update_dca(q_vector
);
4096 static int __igb_notify_dca(struct device
*dev
, void *data
)
4098 struct net_device
*netdev
= dev_get_drvdata(dev
);
4099 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4100 struct e1000_hw
*hw
= &adapter
->hw
;
4101 unsigned long event
= *(unsigned long *)data
;
4104 case DCA_PROVIDER_ADD
:
4105 /* if already enabled, don't do it again */
4106 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
4108 /* Always use CB2 mode, difference is masked
4109 * in the CB driver. */
4110 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_CB2
);
4111 if (dca_add_requester(dev
) == 0) {
4112 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
4113 dev_info(&adapter
->pdev
->dev
, "DCA enabled\n");
4114 igb_setup_dca(adapter
);
4117 /* Fall Through since DCA is disabled. */
4118 case DCA_PROVIDER_REMOVE
:
4119 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
4120 /* without this a class_device is left
4121 * hanging around in the sysfs model */
4122 dca_remove_requester(dev
);
4123 dev_info(&adapter
->pdev
->dev
, "DCA disabled\n");
4124 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
4125 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
4133 static int igb_notify_dca(struct notifier_block
*nb
, unsigned long event
,
4138 ret_val
= driver_for_each_device(&igb_driver
.driver
, NULL
, &event
,
4141 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
4143 #endif /* CONFIG_IGB_DCA */
4145 static void igb_ping_all_vfs(struct igb_adapter
*adapter
)
4147 struct e1000_hw
*hw
= &adapter
->hw
;
4151 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++) {
4152 ping
= E1000_PF_CONTROL_MSG
;
4153 if (adapter
->vf_data
[i
].clear_to_send
)
4154 ping
|= E1000_VT_MSGTYPE_CTS
;
4155 igb_write_mbx(hw
, &ping
, 1, i
);
4159 static int igb_set_vf_multicasts(struct igb_adapter
*adapter
,
4160 u32
*msgbuf
, u32 vf
)
4162 int n
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
4163 u16
*hash_list
= (u16
*)&msgbuf
[1];
4164 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4167 /* only up to 30 hash values supported */
4171 /* salt away the number of multi cast addresses assigned
4172 * to this VF for later use to restore when the PF multi cast
4175 vf_data
->num_vf_mc_hashes
= n
;
4177 /* VFs are limited to using the MTA hash table for their multicast
4179 for (i
= 0; i
< n
; i
++)
4180 vf_data
->vf_mc_hashes
[i
] = hash_list
[i
];
4182 /* Flush and reset the mta with the new values */
4183 igb_set_rx_mode(adapter
->netdev
);
4188 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
)
4190 struct e1000_hw
*hw
= &adapter
->hw
;
4191 struct vf_data_storage
*vf_data
;
4194 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
4195 vf_data
= &adapter
->vf_data
[i
];
4196 for (j
= 0; j
< vf_data
->num_vf_mc_hashes
; j
++)
4197 igb_mta_set(hw
, vf_data
->vf_mc_hashes
[j
]);
4201 static void igb_clear_vf_vfta(struct igb_adapter
*adapter
, u32 vf
)
4203 struct e1000_hw
*hw
= &adapter
->hw
;
4204 u32 pool_mask
, reg
, vid
;
4207 pool_mask
= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
4209 /* Find the vlan filter for this id */
4210 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4211 reg
= rd32(E1000_VLVF(i
));
4213 /* remove the vf from the pool */
4216 /* if pool is empty then remove entry from vfta */
4217 if (!(reg
& E1000_VLVF_POOLSEL_MASK
) &&
4218 (reg
& E1000_VLVF_VLANID_ENABLE
)) {
4220 vid
= reg
& E1000_VLVF_VLANID_MASK
;
4221 igb_vfta_set(hw
, vid
, false);
4224 wr32(E1000_VLVF(i
), reg
);
4227 adapter
->vf_data
[vf
].vlans_enabled
= 0;
4230 static s32
igb_vlvf_set(struct igb_adapter
*adapter
, u32 vid
, bool add
, u32 vf
)
4232 struct e1000_hw
*hw
= &adapter
->hw
;
4235 /* It is an error to call this function when VFs are not enabled */
4236 if (!adapter
->vfs_allocated_count
)
4239 /* Find the vlan filter for this id */
4240 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4241 reg
= rd32(E1000_VLVF(i
));
4242 if ((reg
& E1000_VLVF_VLANID_ENABLE
) &&
4243 vid
== (reg
& E1000_VLVF_VLANID_MASK
))
4248 if (i
== E1000_VLVF_ARRAY_SIZE
) {
4249 /* Did not find a matching VLAN ID entry that was
4250 * enabled. Search for a free filter entry, i.e.
4251 * one without the enable bit set
4253 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4254 reg
= rd32(E1000_VLVF(i
));
4255 if (!(reg
& E1000_VLVF_VLANID_ENABLE
))
4259 if (i
< E1000_VLVF_ARRAY_SIZE
) {
4260 /* Found an enabled/available entry */
4261 reg
|= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
4263 /* if !enabled we need to set this up in vfta */
4264 if (!(reg
& E1000_VLVF_VLANID_ENABLE
)) {
4265 /* add VID to filter table, if bit already set
4266 * PF must have added it outside of table */
4267 if (igb_vfta_set(hw
, vid
, true))
4268 reg
|= 1 << (E1000_VLVF_POOLSEL_SHIFT
+
4269 adapter
->vfs_allocated_count
);
4270 reg
|= E1000_VLVF_VLANID_ENABLE
;
4272 reg
&= ~E1000_VLVF_VLANID_MASK
;
4275 wr32(E1000_VLVF(i
), reg
);
4277 /* do not modify RLPML for PF devices */
4278 if (vf
>= adapter
->vfs_allocated_count
)
4281 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
4283 reg
= rd32(E1000_VMOLR(vf
));
4284 size
= reg
& E1000_VMOLR_RLPML_MASK
;
4286 reg
&= ~E1000_VMOLR_RLPML_MASK
;
4288 wr32(E1000_VMOLR(vf
), reg
);
4290 adapter
->vf_data
[vf
].vlans_enabled
++;
4295 if (i
< E1000_VLVF_ARRAY_SIZE
) {
4296 /* remove vf from the pool */
4297 reg
&= ~(1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
));
4298 /* if pool is empty then remove entry from vfta */
4299 if (!(reg
& E1000_VLVF_POOLSEL_MASK
)) {
4301 igb_vfta_set(hw
, vid
, false);
4303 wr32(E1000_VLVF(i
), reg
);
4305 /* do not modify RLPML for PF devices */
4306 if (vf
>= adapter
->vfs_allocated_count
)
4309 adapter
->vf_data
[vf
].vlans_enabled
--;
4310 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
4312 reg
= rd32(E1000_VMOLR(vf
));
4313 size
= reg
& E1000_VMOLR_RLPML_MASK
;
4315 reg
&= ~E1000_VMOLR_RLPML_MASK
;
4317 wr32(E1000_VMOLR(vf
), reg
);
4325 static int igb_set_vf_vlan(struct igb_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
4327 int add
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
4328 int vid
= (msgbuf
[1] & E1000_VLVF_VLANID_MASK
);
4330 return igb_vlvf_set(adapter
, vid
, add
, vf
);
4333 static inline void igb_vf_reset_event(struct igb_adapter
*adapter
, u32 vf
)
4335 struct e1000_hw
*hw
= &adapter
->hw
;
4337 /* disable mailbox functionality for vf */
4338 adapter
->vf_data
[vf
].clear_to_send
= false;
4340 /* reset offloads to defaults */
4341 igb_set_vmolr(hw
, vf
);
4343 /* reset vlans for device */
4344 igb_clear_vf_vfta(adapter
, vf
);
4346 /* reset multicast table array for vf */
4347 adapter
->vf_data
[vf
].num_vf_mc_hashes
= 0;
4349 /* Flush and reset the mta with the new values */
4350 igb_set_rx_mode(adapter
->netdev
);
4353 static inline void igb_vf_reset_msg(struct igb_adapter
*adapter
, u32 vf
)
4355 struct e1000_hw
*hw
= &adapter
->hw
;
4356 unsigned char *vf_mac
= adapter
->vf_data
[vf
].vf_mac_addresses
;
4357 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
4359 u8
*addr
= (u8
*)(&msgbuf
[1]);
4361 /* process all the same items cleared in a function level reset */
4362 igb_vf_reset_event(adapter
, vf
);
4364 /* set vf mac address */
4365 igb_rar_set_qsel(adapter
, vf_mac
, rar_entry
, vf
);
4367 /* enable transmit and receive for vf */
4368 reg
= rd32(E1000_VFTE
);
4369 wr32(E1000_VFTE
, reg
| (1 << vf
));
4370 reg
= rd32(E1000_VFRE
);
4371 wr32(E1000_VFRE
, reg
| (1 << vf
));
4373 /* enable mailbox functionality for vf */
4374 adapter
->vf_data
[vf
].clear_to_send
= true;
4376 /* reply to reset with ack and vf mac address */
4377 msgbuf
[0] = E1000_VF_RESET
| E1000_VT_MSGTYPE_ACK
;
4378 memcpy(addr
, vf_mac
, 6);
4379 igb_write_mbx(hw
, msgbuf
, 3, vf
);
4382 static int igb_set_vf_mac_addr(struct igb_adapter
*adapter
, u32
*msg
, int vf
)
4384 unsigned char *addr
= (char *)&msg
[1];
4387 if (is_valid_ether_addr(addr
))
4388 err
= igb_set_vf_mac(adapter
, vf
, addr
);
4394 static void igb_rcv_ack_from_vf(struct igb_adapter
*adapter
, u32 vf
)
4396 struct e1000_hw
*hw
= &adapter
->hw
;
4397 u32 msg
= E1000_VT_MSGTYPE_NACK
;
4399 /* if device isn't clear to send it shouldn't be reading either */
4400 if (!adapter
->vf_data
[vf
].clear_to_send
)
4401 igb_write_mbx(hw
, &msg
, 1, vf
);
4405 static void igb_msg_task(struct igb_adapter
*adapter
)
4407 struct e1000_hw
*hw
= &adapter
->hw
;
4410 for (vf
= 0; vf
< adapter
->vfs_allocated_count
; vf
++) {
4411 /* process any reset requests */
4412 if (!igb_check_for_rst(hw
, vf
)) {
4413 adapter
->vf_data
[vf
].clear_to_send
= false;
4414 igb_vf_reset_event(adapter
, vf
);
4417 /* process any messages pending */
4418 if (!igb_check_for_msg(hw
, vf
))
4419 igb_rcv_msg_from_vf(adapter
, vf
);
4421 /* process any acks */
4422 if (!igb_check_for_ack(hw
, vf
))
4423 igb_rcv_ack_from_vf(adapter
, vf
);
4428 static int igb_rcv_msg_from_vf(struct igb_adapter
*adapter
, u32 vf
)
4430 u32 mbx_size
= E1000_VFMAILBOX_SIZE
;
4431 u32 msgbuf
[mbx_size
];
4432 struct e1000_hw
*hw
= &adapter
->hw
;
4435 retval
= igb_read_mbx(hw
, msgbuf
, mbx_size
, vf
);
4438 dev_err(&adapter
->pdev
->dev
,
4439 "Error receiving message from VF\n");
4441 /* this is a message we already processed, do nothing */
4442 if (msgbuf
[0] & (E1000_VT_MSGTYPE_ACK
| E1000_VT_MSGTYPE_NACK
))
4446 * until the vf completes a reset it should not be
4447 * allowed to start any configuration.
4450 if (msgbuf
[0] == E1000_VF_RESET
) {
4451 igb_vf_reset_msg(adapter
, vf
);
4456 if (!adapter
->vf_data
[vf
].clear_to_send
) {
4457 msgbuf
[0] |= E1000_VT_MSGTYPE_NACK
;
4458 igb_write_mbx(hw
, msgbuf
, 1, vf
);
4462 switch ((msgbuf
[0] & 0xFFFF)) {
4463 case E1000_VF_SET_MAC_ADDR
:
4464 retval
= igb_set_vf_mac_addr(adapter
, msgbuf
, vf
);
4466 case E1000_VF_SET_MULTICAST
:
4467 retval
= igb_set_vf_multicasts(adapter
, msgbuf
, vf
);
4469 case E1000_VF_SET_LPE
:
4470 retval
= igb_set_vf_rlpml(adapter
, msgbuf
[1], vf
);
4472 case E1000_VF_SET_VLAN
:
4473 retval
= igb_set_vf_vlan(adapter
, msgbuf
, vf
);
4476 dev_err(&adapter
->pdev
->dev
, "Unhandled Msg %08x\n", msgbuf
[0]);
4481 /* notify the VF of the results of what it sent us */
4483 msgbuf
[0] |= E1000_VT_MSGTYPE_NACK
;
4485 msgbuf
[0] |= E1000_VT_MSGTYPE_ACK
;
4487 msgbuf
[0] |= E1000_VT_MSGTYPE_CTS
;
4489 igb_write_mbx(hw
, msgbuf
, 1, vf
);
4495 * igb_set_uta - Set unicast filter table address
4496 * @adapter: board private structure
4498 * The unicast table address is a register array of 32-bit registers.
4499 * The table is meant to be used in a way similar to how the MTA is used
4500 * however due to certain limitations in the hardware it is necessary to
4501 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4502 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4504 static void igb_set_uta(struct igb_adapter
*adapter
)
4506 struct e1000_hw
*hw
= &adapter
->hw
;
4509 /* The UTA table only exists on 82576 hardware and newer */
4510 if (hw
->mac
.type
< e1000_82576
)
4513 /* we only need to do this if VMDq is enabled */
4514 if (!adapter
->vfs_allocated_count
)
4517 for (i
= 0; i
< hw
->mac
.uta_reg_count
; i
++)
4518 array_wr32(E1000_UTA
, i
, ~0);
4522 * igb_intr_msi - Interrupt Handler
4523 * @irq: interrupt number
4524 * @data: pointer to a network interface device structure
4526 static irqreturn_t
igb_intr_msi(int irq
, void *data
)
4528 struct igb_adapter
*adapter
= data
;
4529 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
4530 struct e1000_hw
*hw
= &adapter
->hw
;
4531 /* read ICR disables interrupts using IAM */
4532 u32 icr
= rd32(E1000_ICR
);
4534 igb_write_itr(q_vector
);
4536 if (icr
& E1000_ICR_DOUTSYNC
) {
4537 /* HW is reporting DMA is out of sync */
4538 adapter
->stats
.doosync
++;
4541 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
4542 hw
->mac
.get_link_status
= 1;
4543 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4544 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
4547 napi_schedule(&q_vector
->napi
);
4553 * igb_intr - Legacy Interrupt Handler
4554 * @irq: interrupt number
4555 * @data: pointer to a network interface device structure
4557 static irqreturn_t
igb_intr(int irq
, void *data
)
4559 struct igb_adapter
*adapter
= data
;
4560 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
4561 struct e1000_hw
*hw
= &adapter
->hw
;
4562 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4563 * need for the IMC write */
4564 u32 icr
= rd32(E1000_ICR
);
4566 return IRQ_NONE
; /* Not our interrupt */
4568 igb_write_itr(q_vector
);
4570 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4571 * not set, then the adapter didn't send an interrupt */
4572 if (!(icr
& E1000_ICR_INT_ASSERTED
))
4575 if (icr
& E1000_ICR_DOUTSYNC
) {
4576 /* HW is reporting DMA is out of sync */
4577 adapter
->stats
.doosync
++;
4580 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
4581 hw
->mac
.get_link_status
= 1;
4582 /* guard against interrupt when we're going down */
4583 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4584 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
4587 napi_schedule(&q_vector
->napi
);
4592 static inline void igb_ring_irq_enable(struct igb_q_vector
*q_vector
)
4594 struct igb_adapter
*adapter
= q_vector
->adapter
;
4595 struct e1000_hw
*hw
= &adapter
->hw
;
4597 if (adapter
->itr_setting
& 3) {
4598 if (!adapter
->msix_entries
)
4599 igb_set_itr(adapter
);
4601 igb_update_ring_itr(q_vector
);
4604 if (!test_bit(__IGB_DOWN
, &adapter
->state
)) {
4605 if (adapter
->msix_entries
)
4606 wr32(E1000_EIMS
, q_vector
->eims_value
);
4608 igb_irq_enable(adapter
);
4613 * igb_poll - NAPI Rx polling callback
4614 * @napi: napi polling structure
4615 * @budget: count of how many packets we should handle
4617 static int igb_poll(struct napi_struct
*napi
, int budget
)
4619 struct igb_q_vector
*q_vector
= container_of(napi
,
4620 struct igb_q_vector
,
4622 int tx_clean_complete
= 1, work_done
= 0;
4624 #ifdef CONFIG_IGB_DCA
4625 if (q_vector
->adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
4626 igb_update_dca(q_vector
);
4628 if (q_vector
->tx_ring
)
4629 tx_clean_complete
= igb_clean_tx_irq(q_vector
);
4631 if (q_vector
->rx_ring
)
4632 igb_clean_rx_irq_adv(q_vector
, &work_done
, budget
);
4634 if (!tx_clean_complete
)
4637 /* If not enough Rx work done, exit the polling mode */
4638 if (work_done
< budget
) {
4639 napi_complete(napi
);
4640 igb_ring_irq_enable(q_vector
);
4647 * igb_hwtstamp - utility function which checks for TX time stamp
4648 * @adapter: board private structure
4649 * @skb: packet that was just sent
4651 * If we were asked to do hardware stamping and such a time stamp is
4652 * available, then it must have been for this skb here because we only
4653 * allow only one such packet into the queue.
4655 static void igb_tx_hwtstamp(struct igb_adapter
*adapter
, struct sk_buff
*skb
)
4657 union skb_shared_tx
*shtx
= skb_tx(skb
);
4658 struct e1000_hw
*hw
= &adapter
->hw
;
4660 if (unlikely(shtx
->hardware
)) {
4661 u32 valid
= rd32(E1000_TSYNCTXCTL
) & E1000_TSYNCTXCTL_VALID
;
4663 u64 regval
= rd32(E1000_TXSTMPL
);
4665 struct skb_shared_hwtstamps shhwtstamps
;
4667 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
4668 regval
|= (u64
)rd32(E1000_TXSTMPH
) << 32;
4669 ns
= timecounter_cyc2time(&adapter
->clock
,
4671 timecompare_update(&adapter
->compare
, ns
);
4672 shhwtstamps
.hwtstamp
= ns_to_ktime(ns
);
4673 shhwtstamps
.syststamp
=
4674 timecompare_transform(&adapter
->compare
, ns
);
4675 skb_tstamp_tx(skb
, &shhwtstamps
);
4681 * igb_clean_tx_irq - Reclaim resources after transmit completes
4682 * @q_vector: pointer to q_vector containing needed info
4683 * returns true if ring is completely cleaned
4685 static bool igb_clean_tx_irq(struct igb_q_vector
*q_vector
)
4687 struct igb_adapter
*adapter
= q_vector
->adapter
;
4688 struct igb_ring
*tx_ring
= q_vector
->tx_ring
;
4689 struct net_device
*netdev
= tx_ring
->netdev
;
4690 struct e1000_hw
*hw
= &adapter
->hw
;
4691 struct igb_buffer
*buffer_info
;
4692 struct sk_buff
*skb
;
4693 union e1000_adv_tx_desc
*tx_desc
, *eop_desc
;
4694 unsigned int total_bytes
= 0, total_packets
= 0;
4695 unsigned int i
, eop
, count
= 0;
4696 bool cleaned
= false;
4698 i
= tx_ring
->next_to_clean
;
4699 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
4700 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
4702 while ((eop_desc
->wb
.status
& cpu_to_le32(E1000_TXD_STAT_DD
)) &&
4703 (count
< tx_ring
->count
)) {
4704 for (cleaned
= false; !cleaned
; count
++) {
4705 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
4706 buffer_info
= &tx_ring
->buffer_info
[i
];
4707 cleaned
= (i
== eop
);
4708 skb
= buffer_info
->skb
;
4711 unsigned int segs
, bytecount
;
4712 /* gso_segs is currently only valid for tcp */
4713 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
4714 /* multiply data chunks by size of headers */
4715 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
4717 total_packets
+= segs
;
4718 total_bytes
+= bytecount
;
4720 igb_tx_hwtstamp(adapter
, skb
);
4723 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
4724 tx_desc
->wb
.status
= 0;
4727 if (i
== tx_ring
->count
)
4730 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
4731 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
4734 tx_ring
->next_to_clean
= i
;
4736 if (unlikely(count
&&
4737 netif_carrier_ok(netdev
) &&
4738 igb_desc_unused(tx_ring
) >= IGB_TX_QUEUE_WAKE
)) {
4739 /* Make sure that anybody stopping the queue after this
4740 * sees the new next_to_clean.
4743 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
4744 !(test_bit(__IGB_DOWN
, &adapter
->state
))) {
4745 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
4746 tx_ring
->tx_stats
.restart_queue
++;
4750 if (tx_ring
->detect_tx_hung
) {
4751 /* Detect a transmit hang in hardware, this serializes the
4752 * check with the clearing of time_stamp and movement of i */
4753 tx_ring
->detect_tx_hung
= false;
4754 if (tx_ring
->buffer_info
[i
].time_stamp
&&
4755 time_after(jiffies
, tx_ring
->buffer_info
[i
].time_stamp
+
4756 (adapter
->tx_timeout_factor
* HZ
))
4757 && !(rd32(E1000_STATUS
) &
4758 E1000_STATUS_TXOFF
)) {
4760 /* detected Tx unit hang */
4761 dev_err(&tx_ring
->pdev
->dev
,
4762 "Detected Tx Unit Hang\n"
4766 " next_to_use <%x>\n"
4767 " next_to_clean <%x>\n"
4768 "buffer_info[next_to_clean]\n"
4769 " time_stamp <%lx>\n"
4770 " next_to_watch <%x>\n"
4772 " desc.status <%x>\n",
4773 tx_ring
->queue_index
,
4774 readl(tx_ring
->head
),
4775 readl(tx_ring
->tail
),
4776 tx_ring
->next_to_use
,
4777 tx_ring
->next_to_clean
,
4778 tx_ring
->buffer_info
[i
].time_stamp
,
4781 eop_desc
->wb
.status
);
4782 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
4785 tx_ring
->total_bytes
+= total_bytes
;
4786 tx_ring
->total_packets
+= total_packets
;
4787 tx_ring
->tx_stats
.bytes
+= total_bytes
;
4788 tx_ring
->tx_stats
.packets
+= total_packets
;
4789 netdev
->stats
.tx_bytes
+= total_bytes
;
4790 netdev
->stats
.tx_packets
+= total_packets
;
4791 return (count
< tx_ring
->count
);
4795 * igb_receive_skb - helper function to handle rx indications
4796 * @q_vector: structure containing interrupt and ring information
4797 * @skb: packet to send up
4798 * @vlan_tag: vlan tag for packet
4800 static void igb_receive_skb(struct igb_q_vector
*q_vector
,
4801 struct sk_buff
*skb
,
4804 struct igb_adapter
*adapter
= q_vector
->adapter
;
4807 vlan_gro_receive(&q_vector
->napi
, adapter
->vlgrp
,
4810 napi_gro_receive(&q_vector
->napi
, skb
);
4813 static inline void igb_rx_checksum_adv(struct igb_ring
*ring
,
4814 u32 status_err
, struct sk_buff
*skb
)
4816 skb
->ip_summed
= CHECKSUM_NONE
;
4818 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4819 if (!(ring
->flags
& IGB_RING_FLAG_RX_CSUM
) ||
4820 (status_err
& E1000_RXD_STAT_IXSM
))
4823 /* TCP/UDP checksum error bit is set */
4825 (E1000_RXDEXT_STATERR_TCPE
| E1000_RXDEXT_STATERR_IPE
)) {
4827 * work around errata with sctp packets where the TCPE aka
4828 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4829 * packets, (aka let the stack check the crc32c)
4831 if ((skb
->len
== 60) &&
4832 (ring
->flags
& IGB_RING_FLAG_RX_SCTP_CSUM
))
4833 ring
->rx_stats
.csum_err
++;
4835 /* let the stack verify checksum errors */
4838 /* It must be a TCP or UDP packet with a valid checksum */
4839 if (status_err
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
))
4840 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4842 dev_dbg(&ring
->pdev
->dev
, "cksum success: bits %08X\n", status_err
);
4845 static inline u16
igb_get_hlen(struct igb_ring
*rx_ring
,
4846 union e1000_adv_rx_desc
*rx_desc
)
4848 /* HW will not DMA in data larger than the given buffer, even if it
4849 * parses the (NFS, of course) header to be larger. In that case, it
4850 * fills the header buffer and spills the rest into the page.
4852 u16 hlen
= (le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hdr_info
) &
4853 E1000_RXDADV_HDRBUFLEN_MASK
) >> E1000_RXDADV_HDRBUFLEN_SHIFT
;
4854 if (hlen
> rx_ring
->rx_buffer_len
)
4855 hlen
= rx_ring
->rx_buffer_len
;
4859 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*q_vector
,
4860 int *work_done
, int budget
)
4862 struct igb_adapter
*adapter
= q_vector
->adapter
;
4863 struct igb_ring
*rx_ring
= q_vector
->rx_ring
;
4864 struct net_device
*netdev
= rx_ring
->netdev
;
4865 struct e1000_hw
*hw
= &adapter
->hw
;
4866 struct pci_dev
*pdev
= rx_ring
->pdev
;
4867 union e1000_adv_rx_desc
*rx_desc
, *next_rxd
;
4868 struct igb_buffer
*buffer_info
, *next_buffer
;
4869 struct sk_buff
*skb
;
4870 bool cleaned
= false;
4871 int cleaned_count
= 0;
4872 unsigned int total_bytes
= 0, total_packets
= 0;
4878 i
= rx_ring
->next_to_clean
;
4879 buffer_info
= &rx_ring
->buffer_info
[i
];
4880 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
4881 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
4883 while (staterr
& E1000_RXD_STAT_DD
) {
4884 if (*work_done
>= budget
)
4888 skb
= buffer_info
->skb
;
4889 prefetch(skb
->data
- NET_IP_ALIGN
);
4890 buffer_info
->skb
= NULL
;
4893 if (i
== rx_ring
->count
)
4895 next_rxd
= E1000_RX_DESC_ADV(*rx_ring
, i
);
4897 next_buffer
= &rx_ring
->buffer_info
[i
];
4899 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
4903 if (buffer_info
->dma
) {
4904 pci_unmap_single(pdev
, buffer_info
->dma
,
4905 rx_ring
->rx_buffer_len
,
4906 PCI_DMA_FROMDEVICE
);
4907 buffer_info
->dma
= 0;
4908 if (rx_ring
->rx_buffer_len
>= IGB_RXBUFFER_1024
) {
4909 skb_put(skb
, length
);
4912 skb_put(skb
, igb_get_hlen(rx_ring
, rx_desc
));
4916 pci_unmap_page(pdev
, buffer_info
->page_dma
,
4917 PAGE_SIZE
/ 2, PCI_DMA_FROMDEVICE
);
4918 buffer_info
->page_dma
= 0;
4920 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
++,
4922 buffer_info
->page_offset
,
4925 if (page_count(buffer_info
->page
) != 1)
4926 buffer_info
->page
= NULL
;
4928 get_page(buffer_info
->page
);
4931 skb
->data_len
+= length
;
4933 skb
->truesize
+= length
;
4936 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
4937 buffer_info
->skb
= next_buffer
->skb
;
4938 buffer_info
->dma
= next_buffer
->dma
;
4939 next_buffer
->skb
= skb
;
4940 next_buffer
->dma
= 0;
4945 * If this bit is set, then the RX registers contain
4946 * the time stamp. No other packet will be time
4947 * stamped until we read these registers, so read the
4948 * registers to make them available again. Because
4949 * only one packet can be time stamped at a time, we
4950 * know that the register values must belong to this
4951 * one here and therefore we don't need to compare
4952 * any of the additional attributes stored for it.
4954 * If nothing went wrong, then it should have a
4955 * skb_shared_tx that we can turn into a
4956 * skb_shared_hwtstamps.
4958 * TODO: can time stamping be triggered (thus locking
4959 * the registers) without the packet reaching this point
4960 * here? In that case RX time stamping would get stuck.
4962 * TODO: in "time stamp all packets" mode this bit is
4963 * not set. Need a global flag for this mode and then
4964 * always read the registers. Cannot be done without
4967 if (unlikely(staterr
& E1000_RXD_STAT_TS
)) {
4970 struct skb_shared_hwtstamps
*shhwtstamps
=
4973 WARN(!(rd32(E1000_TSYNCRXCTL
) & E1000_TSYNCRXCTL_VALID
),
4974 "igb: no RX time stamp available for time stamped packet");
4975 regval
= rd32(E1000_RXSTMPL
);
4976 regval
|= (u64
)rd32(E1000_RXSTMPH
) << 32;
4977 ns
= timecounter_cyc2time(&adapter
->clock
, regval
);
4978 timecompare_update(&adapter
->compare
, ns
);
4979 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
4980 shhwtstamps
->hwtstamp
= ns_to_ktime(ns
);
4981 shhwtstamps
->syststamp
=
4982 timecompare_transform(&adapter
->compare
, ns
);
4985 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
4986 dev_kfree_skb_irq(skb
);
4990 total_bytes
+= skb
->len
;
4993 igb_rx_checksum_adv(rx_ring
, staterr
, skb
);
4995 skb
->protocol
= eth_type_trans(skb
, netdev
);
4996 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
4998 vlan_tag
= ((staterr
& E1000_RXD_STAT_VP
) ?
4999 le16_to_cpu(rx_desc
->wb
.upper
.vlan
) : 0);
5001 igb_receive_skb(q_vector
, skb
, vlan_tag
);
5004 rx_desc
->wb
.upper
.status_error
= 0;
5006 /* return some buffers to hardware, one at a time is too slow */
5007 if (cleaned_count
>= IGB_RX_BUFFER_WRITE
) {
5008 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5012 /* use prefetched values */
5014 buffer_info
= next_buffer
;
5015 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
5018 rx_ring
->next_to_clean
= i
;
5019 cleaned_count
= igb_desc_unused(rx_ring
);
5022 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5024 rx_ring
->total_packets
+= total_packets
;
5025 rx_ring
->total_bytes
+= total_bytes
;
5026 rx_ring
->rx_stats
.packets
+= total_packets
;
5027 rx_ring
->rx_stats
.bytes
+= total_bytes
;
5028 netdev
->stats
.rx_bytes
+= total_bytes
;
5029 netdev
->stats
.rx_packets
+= total_packets
;
5034 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5035 * @adapter: address of board private structure
5037 static void igb_alloc_rx_buffers_adv(struct igb_ring
*rx_ring
,
5040 struct net_device
*netdev
= rx_ring
->netdev
;
5041 union e1000_adv_rx_desc
*rx_desc
;
5042 struct igb_buffer
*buffer_info
;
5043 struct sk_buff
*skb
;
5047 i
= rx_ring
->next_to_use
;
5048 buffer_info
= &rx_ring
->buffer_info
[i
];
5050 bufsz
= rx_ring
->rx_buffer_len
;
5052 while (cleaned_count
--) {
5053 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5055 if ((bufsz
< IGB_RXBUFFER_1024
) && !buffer_info
->page_dma
) {
5056 if (!buffer_info
->page
) {
5057 buffer_info
->page
= alloc_page(GFP_ATOMIC
);
5058 if (!buffer_info
->page
) {
5059 rx_ring
->rx_stats
.alloc_failed
++;
5062 buffer_info
->page_offset
= 0;
5064 buffer_info
->page_offset
^= PAGE_SIZE
/ 2;
5066 buffer_info
->page_dma
=
5067 pci_map_page(rx_ring
->pdev
, buffer_info
->page
,
5068 buffer_info
->page_offset
,
5070 PCI_DMA_FROMDEVICE
);
5073 if (!buffer_info
->skb
) {
5074 skb
= netdev_alloc_skb_ip_align(netdev
, bufsz
);
5076 rx_ring
->rx_stats
.alloc_failed
++;
5080 buffer_info
->skb
= skb
;
5081 buffer_info
->dma
= pci_map_single(rx_ring
->pdev
,
5084 PCI_DMA_FROMDEVICE
);
5086 /* Refresh the desc even if buffer_addrs didn't change because
5087 * each write-back erases this info. */
5088 if (bufsz
< IGB_RXBUFFER_1024
) {
5089 rx_desc
->read
.pkt_addr
=
5090 cpu_to_le64(buffer_info
->page_dma
);
5091 rx_desc
->read
.hdr_addr
= cpu_to_le64(buffer_info
->dma
);
5093 rx_desc
->read
.pkt_addr
=
5094 cpu_to_le64(buffer_info
->dma
);
5095 rx_desc
->read
.hdr_addr
= 0;
5099 if (i
== rx_ring
->count
)
5101 buffer_info
= &rx_ring
->buffer_info
[i
];
5105 if (rx_ring
->next_to_use
!= i
) {
5106 rx_ring
->next_to_use
= i
;
5108 i
= (rx_ring
->count
- 1);
5112 /* Force memory writes to complete before letting h/w
5113 * know there are new descriptors to fetch. (Only
5114 * applicable for weak-ordered memory model archs,
5115 * such as IA-64). */
5117 writel(i
, rx_ring
->tail
);
5127 static int igb_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
5129 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5130 struct mii_ioctl_data
*data
= if_mii(ifr
);
5132 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
5137 data
->phy_id
= adapter
->hw
.phy
.addr
;
5140 if (igb_read_phy_reg(&adapter
->hw
, data
->reg_num
& 0x1F,
5152 * igb_hwtstamp_ioctl - control hardware time stamping
5157 * Outgoing time stamping can be enabled and disabled. Play nice and
5158 * disable it when requested, although it shouldn't case any overhead
5159 * when no packet needs it. At most one packet in the queue may be
5160 * marked for time stamping, otherwise it would be impossible to tell
5161 * for sure to which packet the hardware time stamp belongs.
5163 * Incoming time stamping has to be configured via the hardware
5164 * filters. Not all combinations are supported, in particular event
5165 * type has to be specified. Matching the kind of event packet is
5166 * not supported, with the exception of "all V2 events regardless of
5170 static int igb_hwtstamp_ioctl(struct net_device
*netdev
,
5171 struct ifreq
*ifr
, int cmd
)
5173 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5174 struct e1000_hw
*hw
= &adapter
->hw
;
5175 struct hwtstamp_config config
;
5176 u32 tsync_tx_ctl_bit
= E1000_TSYNCTXCTL_ENABLED
;
5177 u32 tsync_rx_ctl_bit
= E1000_TSYNCRXCTL_ENABLED
;
5178 u32 tsync_rx_ctl_type
= 0;
5179 u32 tsync_rx_cfg
= 0;
5182 short port
= 319; /* PTP */
5185 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
5188 /* reserved for future extensions */
5192 switch (config
.tx_type
) {
5193 case HWTSTAMP_TX_OFF
:
5194 tsync_tx_ctl_bit
= 0;
5196 case HWTSTAMP_TX_ON
:
5197 tsync_tx_ctl_bit
= E1000_TSYNCTXCTL_ENABLED
;
5203 switch (config
.rx_filter
) {
5204 case HWTSTAMP_FILTER_NONE
:
5205 tsync_rx_ctl_bit
= 0;
5207 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
5208 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
5209 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
5210 case HWTSTAMP_FILTER_ALL
:
5212 * register TSYNCRXCFG must be set, therefore it is not
5213 * possible to time stamp both Sync and Delay_Req messages
5214 * => fall back to time stamping all packets
5216 tsync_rx_ctl_type
= E1000_TSYNCRXCTL_TYPE_ALL
;
5217 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
5219 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
5220 tsync_rx_ctl_type
= E1000_TSYNCRXCTL_TYPE_L4_V1
;
5221 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE
;
5224 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
5225 tsync_rx_ctl_type
= E1000_TSYNCRXCTL_TYPE_L4_V1
;
5226 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE
;
5229 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
5230 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
5231 tsync_rx_ctl_type
= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
5232 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE
;
5235 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
5237 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
5238 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
5239 tsync_rx_ctl_type
= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
5240 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE
;
5243 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
5245 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
5246 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
5247 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
5248 tsync_rx_ctl_type
= E1000_TSYNCRXCTL_TYPE_EVENT_V2
;
5249 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
5256 /* enable/disable TX */
5257 regval
= rd32(E1000_TSYNCTXCTL
);
5258 regval
= (regval
& ~E1000_TSYNCTXCTL_ENABLED
) | tsync_tx_ctl_bit
;
5259 wr32(E1000_TSYNCTXCTL
, regval
);
5261 /* enable/disable RX, define which PTP packets are time stamped */
5262 regval
= rd32(E1000_TSYNCRXCTL
);
5263 regval
= (regval
& ~E1000_TSYNCRXCTL_ENABLED
) | tsync_rx_ctl_bit
;
5264 regval
= (regval
& ~0xE) | tsync_rx_ctl_type
;
5265 wr32(E1000_TSYNCRXCTL
, regval
);
5266 wr32(E1000_TSYNCRXCFG
, tsync_rx_cfg
);
5269 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
5270 * (Ethertype to filter on)
5271 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
5272 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
5274 wr32(E1000_ETQF0
, is_l2
? 0x440088f7 : 0);
5276 /* L4 Queue Filter[0]: only filter by source and destination port */
5277 wr32(E1000_SPQF0
, htons(port
));
5278 wr32(E1000_IMIREXT(0), is_l4
?
5279 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
5280 wr32(E1000_IMIR(0), is_l4
?
5282 | (0<<16) /* immediate interrupt disabled */
5283 | 0 /* (1<<17) bit cleared: do not bypass
5284 destination port check */)
5286 wr32(E1000_FTQF0
, is_l4
?
5288 | (1<<15) /* VF not compared */
5289 | (1<<27) /* Enable Timestamping */
5290 | (7<<28) /* only source port filter enabled,
5291 source/target address and protocol
5293 : ((1<<15) | (15<<28) /* all mask bits set = filter not
5298 adapter
->hwtstamp_config
= config
;
5300 /* clear TX/RX time stamp registers, just to be sure */
5301 regval
= rd32(E1000_TXSTMPH
);
5302 regval
= rd32(E1000_RXSTMPH
);
5304 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
5314 static int igb_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
5320 return igb_mii_ioctl(netdev
, ifr
, cmd
);
5322 return igb_hwtstamp_ioctl(netdev
, ifr
, cmd
);
5328 s32
igb_read_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
5330 struct igb_adapter
*adapter
= hw
->back
;
5333 cap_offset
= pci_find_capability(adapter
->pdev
, PCI_CAP_ID_EXP
);
5335 return -E1000_ERR_CONFIG
;
5337 pci_read_config_word(adapter
->pdev
, cap_offset
+ reg
, value
);
5342 s32
igb_write_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
5344 struct igb_adapter
*adapter
= hw
->back
;
5347 cap_offset
= pci_find_capability(adapter
->pdev
, PCI_CAP_ID_EXP
);
5349 return -E1000_ERR_CONFIG
;
5351 pci_write_config_word(adapter
->pdev
, cap_offset
+ reg
, *value
);
5356 static void igb_vlan_rx_register(struct net_device
*netdev
,
5357 struct vlan_group
*grp
)
5359 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5360 struct e1000_hw
*hw
= &adapter
->hw
;
5363 igb_irq_disable(adapter
);
5364 adapter
->vlgrp
= grp
;
5367 /* enable VLAN tag insert/strip */
5368 ctrl
= rd32(E1000_CTRL
);
5369 ctrl
|= E1000_CTRL_VME
;
5370 wr32(E1000_CTRL
, ctrl
);
5372 /* enable VLAN receive filtering */
5373 rctl
= rd32(E1000_RCTL
);
5374 rctl
&= ~E1000_RCTL_CFIEN
;
5375 wr32(E1000_RCTL
, rctl
);
5376 igb_update_mng_vlan(adapter
);
5378 /* disable VLAN tag insert/strip */
5379 ctrl
= rd32(E1000_CTRL
);
5380 ctrl
&= ~E1000_CTRL_VME
;
5381 wr32(E1000_CTRL
, ctrl
);
5383 if (adapter
->mng_vlan_id
!= (u16
)IGB_MNG_VLAN_NONE
) {
5384 igb_vlan_rx_kill_vid(netdev
, adapter
->mng_vlan_id
);
5385 adapter
->mng_vlan_id
= IGB_MNG_VLAN_NONE
;
5389 igb_rlpml_set(adapter
);
5391 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5392 igb_irq_enable(adapter
);
5395 static void igb_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
5397 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5398 struct e1000_hw
*hw
= &adapter
->hw
;
5399 int pf_id
= adapter
->vfs_allocated_count
;
5401 if ((hw
->mng_cookie
.status
&
5402 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
5403 (vid
== adapter
->mng_vlan_id
))
5406 /* add vid to vlvf if sr-iov is enabled,
5407 * if that fails add directly to filter table */
5408 if (igb_vlvf_set(adapter
, vid
, true, pf_id
))
5409 igb_vfta_set(hw
, vid
, true);
5413 static void igb_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
5415 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5416 struct e1000_hw
*hw
= &adapter
->hw
;
5417 int pf_id
= adapter
->vfs_allocated_count
;
5419 igb_irq_disable(adapter
);
5420 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
5422 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5423 igb_irq_enable(adapter
);
5425 if ((adapter
->hw
.mng_cookie
.status
&
5426 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
5427 (vid
== adapter
->mng_vlan_id
)) {
5428 /* release control to f/w */
5429 igb_release_hw_control(adapter
);
5433 /* remove vid from vlvf if sr-iov is enabled,
5434 * if not in vlvf remove from vfta */
5435 if (igb_vlvf_set(adapter
, vid
, false, pf_id
))
5436 igb_vfta_set(hw
, vid
, false);
5439 static void igb_restore_vlan(struct igb_adapter
*adapter
)
5441 igb_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
5443 if (adapter
->vlgrp
) {
5445 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
5446 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
5448 igb_vlan_rx_add_vid(adapter
->netdev
, vid
);
5453 int igb_set_spd_dplx(struct igb_adapter
*adapter
, u16 spddplx
)
5455 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
5460 case SPEED_10
+ DUPLEX_HALF
:
5461 mac
->forced_speed_duplex
= ADVERTISE_10_HALF
;
5463 case SPEED_10
+ DUPLEX_FULL
:
5464 mac
->forced_speed_duplex
= ADVERTISE_10_FULL
;
5466 case SPEED_100
+ DUPLEX_HALF
:
5467 mac
->forced_speed_duplex
= ADVERTISE_100_HALF
;
5469 case SPEED_100
+ DUPLEX_FULL
:
5470 mac
->forced_speed_duplex
= ADVERTISE_100_FULL
;
5472 case SPEED_1000
+ DUPLEX_FULL
:
5474 adapter
->hw
.phy
.autoneg_advertised
= ADVERTISE_1000_FULL
;
5476 case SPEED_1000
+ DUPLEX_HALF
: /* not supported */
5478 dev_err(&adapter
->pdev
->dev
,
5479 "Unsupported Speed/Duplex configuration\n");
5485 static int __igb_shutdown(struct pci_dev
*pdev
, bool *enable_wake
)
5487 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5488 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5489 struct e1000_hw
*hw
= &adapter
->hw
;
5490 u32 ctrl
, rctl
, status
;
5491 u32 wufc
= adapter
->wol
;
5496 netif_device_detach(netdev
);
5498 if (netif_running(netdev
))
5501 igb_clear_interrupt_scheme(adapter
);
5504 retval
= pci_save_state(pdev
);
5509 status
= rd32(E1000_STATUS
);
5510 if (status
& E1000_STATUS_LU
)
5511 wufc
&= ~E1000_WUFC_LNKC
;
5514 igb_setup_rctl(adapter
);
5515 igb_set_rx_mode(netdev
);
5517 /* turn on all-multi mode if wake on multicast is enabled */
5518 if (wufc
& E1000_WUFC_MC
) {
5519 rctl
= rd32(E1000_RCTL
);
5520 rctl
|= E1000_RCTL_MPE
;
5521 wr32(E1000_RCTL
, rctl
);
5524 ctrl
= rd32(E1000_CTRL
);
5525 /* advertise wake from D3Cold */
5526 #define E1000_CTRL_ADVD3WUC 0x00100000
5527 /* phy power management enable */
5528 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5529 ctrl
|= E1000_CTRL_ADVD3WUC
;
5530 wr32(E1000_CTRL
, ctrl
);
5532 /* Allow time for pending master requests to run */
5533 igb_disable_pcie_master(&adapter
->hw
);
5535 wr32(E1000_WUC
, E1000_WUC_PME_EN
);
5536 wr32(E1000_WUFC
, wufc
);
5539 wr32(E1000_WUFC
, 0);
5542 *enable_wake
= wufc
|| adapter
->en_mng_pt
;
5544 igb_shutdown_serdes_link_82575(hw
);
5546 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5547 * would have already happened in close and is redundant. */
5548 igb_release_hw_control(adapter
);
5550 pci_disable_device(pdev
);
5556 static int igb_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5561 retval
= __igb_shutdown(pdev
, &wake
);
5566 pci_prepare_to_sleep(pdev
);
5568 pci_wake_from_d3(pdev
, false);
5569 pci_set_power_state(pdev
, PCI_D3hot
);
5575 static int igb_resume(struct pci_dev
*pdev
)
5577 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5578 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5579 struct e1000_hw
*hw
= &adapter
->hw
;
5582 pci_set_power_state(pdev
, PCI_D0
);
5583 pci_restore_state(pdev
);
5585 err
= pci_enable_device_mem(pdev
);
5588 "igb: Cannot enable PCI device from suspend\n");
5591 pci_set_master(pdev
);
5593 pci_enable_wake(pdev
, PCI_D3hot
, 0);
5594 pci_enable_wake(pdev
, PCI_D3cold
, 0);
5596 if (igb_init_interrupt_scheme(adapter
)) {
5597 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
5601 /* e1000_power_up_phy(adapter); */
5605 /* let the f/w know that the h/w is now under the control of the
5607 igb_get_hw_control(adapter
);
5609 wr32(E1000_WUS
, ~0);
5611 if (netif_running(netdev
)) {
5612 err
= igb_open(netdev
);
5617 netif_device_attach(netdev
);
5623 static void igb_shutdown(struct pci_dev
*pdev
)
5627 __igb_shutdown(pdev
, &wake
);
5629 if (system_state
== SYSTEM_POWER_OFF
) {
5630 pci_wake_from_d3(pdev
, wake
);
5631 pci_set_power_state(pdev
, PCI_D3hot
);
5635 #ifdef CONFIG_NET_POLL_CONTROLLER
5637 * Polling 'interrupt' - used by things like netconsole to send skbs
5638 * without having to re-enable interrupts. It's not called while
5639 * the interrupt routine is executing.
5641 static void igb_netpoll(struct net_device
*netdev
)
5643 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5644 struct e1000_hw
*hw
= &adapter
->hw
;
5647 if (!adapter
->msix_entries
) {
5648 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
5649 igb_irq_disable(adapter
);
5650 napi_schedule(&q_vector
->napi
);
5654 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
5655 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
5656 wr32(E1000_EIMC
, q_vector
->eims_value
);
5657 napi_schedule(&q_vector
->napi
);
5660 #endif /* CONFIG_NET_POLL_CONTROLLER */
5663 * igb_io_error_detected - called when PCI error is detected
5664 * @pdev: Pointer to PCI device
5665 * @state: The current pci connection state
5667 * This function is called after a PCI bus error affecting
5668 * this device has been detected.
5670 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*pdev
,
5671 pci_channel_state_t state
)
5673 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5674 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5676 netif_device_detach(netdev
);
5678 if (state
== pci_channel_io_perm_failure
)
5679 return PCI_ERS_RESULT_DISCONNECT
;
5681 if (netif_running(netdev
))
5683 pci_disable_device(pdev
);
5685 /* Request a slot slot reset. */
5686 return PCI_ERS_RESULT_NEED_RESET
;
5690 * igb_io_slot_reset - called after the pci bus has been reset.
5691 * @pdev: Pointer to PCI device
5693 * Restart the card from scratch, as if from a cold-boot. Implementation
5694 * resembles the first-half of the igb_resume routine.
5696 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*pdev
)
5698 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5699 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5700 struct e1000_hw
*hw
= &adapter
->hw
;
5701 pci_ers_result_t result
;
5704 if (pci_enable_device_mem(pdev
)) {
5706 "Cannot re-enable PCI device after reset.\n");
5707 result
= PCI_ERS_RESULT_DISCONNECT
;
5709 pci_set_master(pdev
);
5710 pci_restore_state(pdev
);
5712 pci_enable_wake(pdev
, PCI_D3hot
, 0);
5713 pci_enable_wake(pdev
, PCI_D3cold
, 0);
5716 wr32(E1000_WUS
, ~0);
5717 result
= PCI_ERS_RESULT_RECOVERED
;
5720 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
5722 dev_err(&pdev
->dev
, "pci_cleanup_aer_uncorrect_error_status "
5723 "failed 0x%0x\n", err
);
5724 /* non-fatal, continue */
5731 * igb_io_resume - called when traffic can start flowing again.
5732 * @pdev: Pointer to PCI device
5734 * This callback is called when the error recovery driver tells us that
5735 * its OK to resume normal operation. Implementation resembles the
5736 * second-half of the igb_resume routine.
5738 static void igb_io_resume(struct pci_dev
*pdev
)
5740 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5741 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5743 if (netif_running(netdev
)) {
5744 if (igb_up(adapter
)) {
5745 dev_err(&pdev
->dev
, "igb_up failed after reset\n");
5750 netif_device_attach(netdev
);
5752 /* let the f/w know that the h/w is now under the control of the
5754 igb_get_hw_control(adapter
);
5757 static void igb_rar_set_qsel(struct igb_adapter
*adapter
, u8
*addr
, u32 index
,
5760 u32 rar_low
, rar_high
;
5761 struct e1000_hw
*hw
= &adapter
->hw
;
5763 /* HW expects these in little endian so we reverse the byte order
5764 * from network order (big endian) to little endian
5766 rar_low
= ((u32
) addr
[0] | ((u32
) addr
[1] << 8) |
5767 ((u32
) addr
[2] << 16) | ((u32
) addr
[3] << 24));
5768 rar_high
= ((u32
) addr
[4] | ((u32
) addr
[5] << 8));
5770 /* Indicate to hardware the Address is Valid. */
5771 rar_high
|= E1000_RAH_AV
;
5773 if (hw
->mac
.type
== e1000_82575
)
5774 rar_high
|= E1000_RAH_POOL_1
* qsel
;
5776 rar_high
|= E1000_RAH_POOL_1
<< qsel
;
5778 wr32(E1000_RAL(index
), rar_low
);
5780 wr32(E1000_RAH(index
), rar_high
);
5784 static int igb_set_vf_mac(struct igb_adapter
*adapter
,
5785 int vf
, unsigned char *mac_addr
)
5787 struct e1000_hw
*hw
= &adapter
->hw
;
5788 /* VF MAC addresses start at end of receive addresses and moves
5789 * torwards the first, as a result a collision should not be possible */
5790 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
5792 memcpy(adapter
->vf_data
[vf
].vf_mac_addresses
, mac_addr
, ETH_ALEN
);
5794 igb_rar_set_qsel(adapter
, mac_addr
, rar_entry
, vf
);
5799 static void igb_vmm_control(struct igb_adapter
*adapter
)
5801 struct e1000_hw
*hw
= &adapter
->hw
;
5804 /* replication is not supported for 82575 */
5805 if (hw
->mac
.type
== e1000_82575
)
5808 /* enable replication vlan tag stripping */
5809 reg
= rd32(E1000_RPLOLR
);
5810 reg
|= E1000_RPLOLR_STRVLAN
;
5811 wr32(E1000_RPLOLR
, reg
);
5813 /* notify HW that the MAC is adding vlan tags */
5814 reg
= rd32(E1000_DTXCTL
);
5815 reg
|= E1000_DTXCTL_VLAN_ADDED
;
5816 wr32(E1000_DTXCTL
, reg
);
5818 if (adapter
->vfs_allocated_count
) {
5819 igb_vmdq_set_loopback_pf(hw
, true);
5820 igb_vmdq_set_replication_pf(hw
, true);
5822 igb_vmdq_set_loopback_pf(hw
, false);
5823 igb_vmdq_set_replication_pf(hw
, false);