1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/pci-aspm.h>
43 #include <linux/delay.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/aer.h>
48 #include <linux/dca.h>
52 #define DRV_VERSION "2.1.0-k2"
53 char igb_driver_name
[] = "igb";
54 char igb_driver_version
[] = DRV_VERSION
;
55 static const char igb_driver_string
[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
57 static const char igb_copyright
[] = "Copyright (c) 2007-2009 Intel Corporation.";
59 static const struct e1000_info
*igb_info_tbl
[] = {
60 [board_82575
] = &e1000_82575_info
,
63 static struct pci_device_id igb_pci_tbl
[] = {
64 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_COPPER
), board_82575
},
65 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_FIBER
), board_82575
},
66 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_SERDES
), board_82575
},
67 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_SGMII
), board_82575
},
68 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82580_COPPER_DUAL
), board_82575
},
69 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576
), board_82575
},
70 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS
), board_82575
},
71 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_NS_SERDES
), board_82575
},
72 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_FIBER
), board_82575
},
73 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES
), board_82575
},
74 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_SERDES_QUAD
), board_82575
},
75 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82576_QUAD_COPPER
), board_82575
},
76 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_COPPER
), board_82575
},
77 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575EB_FIBER_SERDES
), board_82575
},
78 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82575GB_QUAD_COPPER
), board_82575
},
79 /* required last entry */
83 MODULE_DEVICE_TABLE(pci
, igb_pci_tbl
);
85 void igb_reset(struct igb_adapter
*);
86 static int igb_setup_all_tx_resources(struct igb_adapter
*);
87 static int igb_setup_all_rx_resources(struct igb_adapter
*);
88 static void igb_free_all_tx_resources(struct igb_adapter
*);
89 static void igb_free_all_rx_resources(struct igb_adapter
*);
90 static void igb_setup_mrqc(struct igb_adapter
*);
91 void igb_update_stats(struct igb_adapter
*);
92 static int igb_probe(struct pci_dev
*, const struct pci_device_id
*);
93 static void __devexit
igb_remove(struct pci_dev
*pdev
);
94 static int igb_sw_init(struct igb_adapter
*);
95 static int igb_open(struct net_device
*);
96 static int igb_close(struct net_device
*);
97 static void igb_configure_tx(struct igb_adapter
*);
98 static void igb_configure_rx(struct igb_adapter
*);
99 static void igb_clean_all_tx_rings(struct igb_adapter
*);
100 static void igb_clean_all_rx_rings(struct igb_adapter
*);
101 static void igb_clean_tx_ring(struct igb_ring
*);
102 static void igb_clean_rx_ring(struct igb_ring
*);
103 static void igb_set_rx_mode(struct net_device
*);
104 static void igb_update_phy_info(unsigned long);
105 static void igb_watchdog(unsigned long);
106 static void igb_watchdog_task(struct work_struct
*);
107 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
, struct net_device
*);
108 static struct net_device_stats
*igb_get_stats(struct net_device
*);
109 static int igb_change_mtu(struct net_device
*, int);
110 static int igb_set_mac(struct net_device
*, void *);
111 static void igb_set_uta(struct igb_adapter
*adapter
);
112 static irqreturn_t
igb_intr(int irq
, void *);
113 static irqreturn_t
igb_intr_msi(int irq
, void *);
114 static irqreturn_t
igb_msix_other(int irq
, void *);
115 static irqreturn_t
igb_msix_ring(int irq
, void *);
116 #ifdef CONFIG_IGB_DCA
117 static void igb_update_dca(struct igb_q_vector
*);
118 static void igb_setup_dca(struct igb_adapter
*);
119 #endif /* CONFIG_IGB_DCA */
120 static bool igb_clean_tx_irq(struct igb_q_vector
*);
121 static int igb_poll(struct napi_struct
*, int);
122 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*, int *, int);
123 static int igb_ioctl(struct net_device
*, struct ifreq
*, int cmd
);
124 static void igb_tx_timeout(struct net_device
*);
125 static void igb_reset_task(struct work_struct
*);
126 static void igb_vlan_rx_register(struct net_device
*, struct vlan_group
*);
127 static void igb_vlan_rx_add_vid(struct net_device
*, u16
);
128 static void igb_vlan_rx_kill_vid(struct net_device
*, u16
);
129 static void igb_restore_vlan(struct igb_adapter
*);
130 static void igb_rar_set_qsel(struct igb_adapter
*, u8
*, u32
, u8
);
131 static void igb_ping_all_vfs(struct igb_adapter
*);
132 static void igb_msg_task(struct igb_adapter
*);
133 static void igb_vmm_control(struct igb_adapter
*);
134 static int igb_set_vf_mac(struct igb_adapter
*, int, unsigned char *);
135 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
);
138 static int igb_suspend(struct pci_dev
*, pm_message_t
);
139 static int igb_resume(struct pci_dev
*);
141 static void igb_shutdown(struct pci_dev
*);
142 #ifdef CONFIG_IGB_DCA
143 static int igb_notify_dca(struct notifier_block
*, unsigned long, void *);
144 static struct notifier_block dca_notifier
= {
145 .notifier_call
= igb_notify_dca
,
150 #ifdef CONFIG_NET_POLL_CONTROLLER
151 /* for netdump / net console */
152 static void igb_netpoll(struct net_device
*);
154 #ifdef CONFIG_PCI_IOV
155 static unsigned int max_vfs
= 0;
156 module_param(max_vfs
, uint
, 0);
157 MODULE_PARM_DESC(max_vfs
, "Maximum number of virtual functions to allocate "
158 "per physical function");
159 #endif /* CONFIG_PCI_IOV */
161 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*,
162 pci_channel_state_t
);
163 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*);
164 static void igb_io_resume(struct pci_dev
*);
166 static struct pci_error_handlers igb_err_handler
= {
167 .error_detected
= igb_io_error_detected
,
168 .slot_reset
= igb_io_slot_reset
,
169 .resume
= igb_io_resume
,
173 static struct pci_driver igb_driver
= {
174 .name
= igb_driver_name
,
175 .id_table
= igb_pci_tbl
,
177 .remove
= __devexit_p(igb_remove
),
179 /* Power Managment Hooks */
180 .suspend
= igb_suspend
,
181 .resume
= igb_resume
,
183 .shutdown
= igb_shutdown
,
184 .err_handler
= &igb_err_handler
187 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
188 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
189 MODULE_LICENSE("GPL");
190 MODULE_VERSION(DRV_VERSION
);
193 * igb_read_clock - read raw cycle counter (to be used by time counter)
195 static cycle_t
igb_read_clock(const struct cyclecounter
*tc
)
197 struct igb_adapter
*adapter
=
198 container_of(tc
, struct igb_adapter
, cycles
);
199 struct e1000_hw
*hw
= &adapter
->hw
;
204 * The timestamp latches on lowest register read. For the 82580
205 * the lowest register is SYSTIMR instead of SYSTIML. However we never
206 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
208 if (hw
->mac
.type
== e1000_82580
) {
209 stamp
= rd32(E1000_SYSTIMR
) >> 8;
210 shift
= IGB_82580_TSYNC_SHIFT
;
213 stamp
|= (u64
)rd32(E1000_SYSTIML
) << shift
;
214 stamp
|= (u64
)rd32(E1000_SYSTIMH
) << (shift
+ 32);
220 * igb_get_hw_dev_name - return device name string
221 * used by hardware layer to print debugging information
223 char *igb_get_hw_dev_name(struct e1000_hw
*hw
)
225 struct igb_adapter
*adapter
= hw
->back
;
226 return adapter
->netdev
->name
;
230 * igb_get_time_str - format current NIC and system time as string
232 static char *igb_get_time_str(struct igb_adapter
*adapter
,
235 cycle_t hw
= adapter
->cycles
.read(&adapter
->cycles
);
236 struct timespec nic
= ns_to_timespec(timecounter_read(&adapter
->clock
));
238 struct timespec delta
;
239 getnstimeofday(&sys
);
241 delta
= timespec_sub(nic
, sys
);
244 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
246 (long)nic
.tv_sec
, nic
.tv_nsec
,
247 (long)sys
.tv_sec
, sys
.tv_nsec
,
248 (long)delta
.tv_sec
, delta
.tv_nsec
);
255 * igb_init_module - Driver Registration Routine
257 * igb_init_module is the first routine called when the driver is
258 * loaded. All it does is register with the PCI subsystem.
260 static int __init
igb_init_module(void)
263 printk(KERN_INFO
"%s - version %s\n",
264 igb_driver_string
, igb_driver_version
);
266 printk(KERN_INFO
"%s\n", igb_copyright
);
268 #ifdef CONFIG_IGB_DCA
269 dca_register_notify(&dca_notifier
);
271 ret
= pci_register_driver(&igb_driver
);
275 module_init(igb_init_module
);
278 * igb_exit_module - Driver Exit Cleanup Routine
280 * igb_exit_module is called just before the driver is removed
283 static void __exit
igb_exit_module(void)
285 #ifdef CONFIG_IGB_DCA
286 dca_unregister_notify(&dca_notifier
);
288 pci_unregister_driver(&igb_driver
);
291 module_exit(igb_exit_module
);
293 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
295 * igb_cache_ring_register - Descriptor ring to register mapping
296 * @adapter: board private structure to initialize
298 * Once we know the feature-set enabled for the device, we'll cache
299 * the register offset the descriptor ring is assigned to.
301 static void igb_cache_ring_register(struct igb_adapter
*adapter
)
304 u32 rbase_offset
= adapter
->vfs_allocated_count
;
306 switch (adapter
->hw
.mac
.type
) {
308 /* The queues are allocated for virtualization such that VF 0
309 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
310 * In order to avoid collision we start at the first free queue
311 * and continue consuming queues in the same sequence
313 if (adapter
->vfs_allocated_count
) {
314 for (; i
< adapter
->rss_queues
; i
++)
315 adapter
->rx_ring
[i
].reg_idx
= rbase_offset
+
317 for (; j
< adapter
->rss_queues
; j
++)
318 adapter
->tx_ring
[j
].reg_idx
= rbase_offset
+
324 for (; i
< adapter
->num_rx_queues
; i
++)
325 adapter
->rx_ring
[i
].reg_idx
= rbase_offset
+ i
;
326 for (; j
< adapter
->num_tx_queues
; j
++)
327 adapter
->tx_ring
[j
].reg_idx
= rbase_offset
+ j
;
332 static void igb_free_queues(struct igb_adapter
*adapter
)
334 kfree(adapter
->tx_ring
);
335 kfree(adapter
->rx_ring
);
337 adapter
->tx_ring
= NULL
;
338 adapter
->rx_ring
= NULL
;
340 adapter
->num_rx_queues
= 0;
341 adapter
->num_tx_queues
= 0;
345 * igb_alloc_queues - Allocate memory for all rings
346 * @adapter: board private structure to initialize
348 * We allocate one ring per queue at run-time since we don't know the
349 * number of queues at compile-time.
351 static int igb_alloc_queues(struct igb_adapter
*adapter
)
355 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
356 sizeof(struct igb_ring
), GFP_KERNEL
);
357 if (!adapter
->tx_ring
)
360 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
361 sizeof(struct igb_ring
), GFP_KERNEL
);
362 if (!adapter
->rx_ring
)
365 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
366 struct igb_ring
*ring
= &(adapter
->tx_ring
[i
]);
367 ring
->count
= adapter
->tx_ring_count
;
368 ring
->queue_index
= i
;
369 ring
->pdev
= adapter
->pdev
;
370 ring
->netdev
= adapter
->netdev
;
371 /* For 82575, context index must be unique per ring. */
372 if (adapter
->hw
.mac
.type
== e1000_82575
)
373 ring
->flags
= IGB_RING_FLAG_TX_CTX_IDX
;
376 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
377 struct igb_ring
*ring
= &(adapter
->rx_ring
[i
]);
378 ring
->count
= adapter
->rx_ring_count
;
379 ring
->queue_index
= i
;
380 ring
->pdev
= adapter
->pdev
;
381 ring
->netdev
= adapter
->netdev
;
382 ring
->rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
383 ring
->flags
= IGB_RING_FLAG_RX_CSUM
; /* enable rx checksum */
384 /* set flag indicating ring supports SCTP checksum offload */
385 if (adapter
->hw
.mac
.type
>= e1000_82576
)
386 ring
->flags
|= IGB_RING_FLAG_RX_SCTP_CSUM
;
389 igb_cache_ring_register(adapter
);
394 igb_free_queues(adapter
);
399 #define IGB_N0_QUEUE -1
400 static void igb_assign_vector(struct igb_q_vector
*q_vector
, int msix_vector
)
403 struct igb_adapter
*adapter
= q_vector
->adapter
;
404 struct e1000_hw
*hw
= &adapter
->hw
;
406 int rx_queue
= IGB_N0_QUEUE
;
407 int tx_queue
= IGB_N0_QUEUE
;
409 if (q_vector
->rx_ring
)
410 rx_queue
= q_vector
->rx_ring
->reg_idx
;
411 if (q_vector
->tx_ring
)
412 tx_queue
= q_vector
->tx_ring
->reg_idx
;
414 switch (hw
->mac
.type
) {
416 /* The 82575 assigns vectors using a bitmask, which matches the
417 bitmask for the EICR/EIMS/EIMC registers. To assign one
418 or more queues to a vector, we write the appropriate bits
419 into the MSIXBM register for that vector. */
420 if (rx_queue
> IGB_N0_QUEUE
)
421 msixbm
= E1000_EICR_RX_QUEUE0
<< rx_queue
;
422 if (tx_queue
> IGB_N0_QUEUE
)
423 msixbm
|= E1000_EICR_TX_QUEUE0
<< tx_queue
;
424 if (!adapter
->msix_entries
&& msix_vector
== 0)
425 msixbm
|= E1000_EIMS_OTHER
;
426 array_wr32(E1000_MSIXBM(0), msix_vector
, msixbm
);
427 q_vector
->eims_value
= msixbm
;
430 /* 82576 uses a table-based method for assigning vectors.
431 Each queue has a single entry in the table to which we write
432 a vector number along with a "valid" bit. Sadly, the layout
433 of the table is somewhat counterintuitive. */
434 if (rx_queue
> IGB_N0_QUEUE
) {
435 index
= (rx_queue
& 0x7);
436 ivar
= array_rd32(E1000_IVAR0
, index
);
438 /* vector goes into low byte of register */
439 ivar
= ivar
& 0xFFFFFF00;
440 ivar
|= msix_vector
| E1000_IVAR_VALID
;
442 /* vector goes into third byte of register */
443 ivar
= ivar
& 0xFF00FFFF;
444 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
446 array_wr32(E1000_IVAR0
, index
, ivar
);
448 if (tx_queue
> IGB_N0_QUEUE
) {
449 index
= (tx_queue
& 0x7);
450 ivar
= array_rd32(E1000_IVAR0
, index
);
452 /* vector goes into second byte of register */
453 ivar
= ivar
& 0xFFFF00FF;
454 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
456 /* vector goes into high byte of register */
457 ivar
= ivar
& 0x00FFFFFF;
458 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
460 array_wr32(E1000_IVAR0
, index
, ivar
);
462 q_vector
->eims_value
= 1 << msix_vector
;
465 /* 82580 uses the same table-based approach as 82576 but has fewer
466 entries as a result we carry over for queues greater than 4. */
467 if (rx_queue
> IGB_N0_QUEUE
) {
468 index
= (rx_queue
>> 1);
469 ivar
= array_rd32(E1000_IVAR0
, index
);
470 if (rx_queue
& 0x1) {
471 /* vector goes into third byte of register */
472 ivar
= ivar
& 0xFF00FFFF;
473 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 16;
475 /* vector goes into low byte of register */
476 ivar
= ivar
& 0xFFFFFF00;
477 ivar
|= msix_vector
| E1000_IVAR_VALID
;
479 array_wr32(E1000_IVAR0
, index
, ivar
);
481 if (tx_queue
> IGB_N0_QUEUE
) {
482 index
= (tx_queue
>> 1);
483 ivar
= array_rd32(E1000_IVAR0
, index
);
484 if (tx_queue
& 0x1) {
485 /* vector goes into high byte of register */
486 ivar
= ivar
& 0x00FFFFFF;
487 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 24;
489 /* vector goes into second byte of register */
490 ivar
= ivar
& 0xFFFF00FF;
491 ivar
|= (msix_vector
| E1000_IVAR_VALID
) << 8;
493 array_wr32(E1000_IVAR0
, index
, ivar
);
495 q_vector
->eims_value
= 1 << msix_vector
;
504 * igb_configure_msix - Configure MSI-X hardware
506 * igb_configure_msix sets up the hardware to properly
507 * generate MSI-X interrupts.
509 static void igb_configure_msix(struct igb_adapter
*adapter
)
513 struct e1000_hw
*hw
= &adapter
->hw
;
515 adapter
->eims_enable_mask
= 0;
517 /* set vector for other causes, i.e. link changes */
518 switch (hw
->mac
.type
) {
520 tmp
= rd32(E1000_CTRL_EXT
);
521 /* enable MSI-X PBA support*/
522 tmp
|= E1000_CTRL_EXT_PBA_CLR
;
524 /* Auto-Mask interrupts upon ICR read. */
525 tmp
|= E1000_CTRL_EXT_EIAME
;
526 tmp
|= E1000_CTRL_EXT_IRCA
;
528 wr32(E1000_CTRL_EXT
, tmp
);
530 /* enable msix_other interrupt */
531 array_wr32(E1000_MSIXBM(0), vector
++,
533 adapter
->eims_other
= E1000_EIMS_OTHER
;
539 /* Turn on MSI-X capability first, or our settings
540 * won't stick. And it will take days to debug. */
541 wr32(E1000_GPIE
, E1000_GPIE_MSIX_MODE
|
542 E1000_GPIE_PBA
| E1000_GPIE_EIAME
|
545 /* enable msix_other interrupt */
546 adapter
->eims_other
= 1 << vector
;
547 tmp
= (vector
++ | E1000_IVAR_VALID
) << 8;
549 wr32(E1000_IVAR_MISC
, tmp
);
552 /* do nothing, since nothing else supports MSI-X */
554 } /* switch (hw->mac.type) */
556 adapter
->eims_enable_mask
|= adapter
->eims_other
;
558 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
559 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
560 igb_assign_vector(q_vector
, vector
++);
561 adapter
->eims_enable_mask
|= q_vector
->eims_value
;
568 * igb_request_msix - Initialize MSI-X interrupts
570 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
573 static int igb_request_msix(struct igb_adapter
*adapter
)
575 struct net_device
*netdev
= adapter
->netdev
;
576 struct e1000_hw
*hw
= &adapter
->hw
;
577 int i
, err
= 0, vector
= 0;
579 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
580 igb_msix_other
, 0, netdev
->name
, adapter
);
585 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
586 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
588 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(vector
);
590 if (q_vector
->rx_ring
&& q_vector
->tx_ring
)
591 sprintf(q_vector
->name
, "%s-TxRx-%u", netdev
->name
,
592 q_vector
->rx_ring
->queue_index
);
593 else if (q_vector
->tx_ring
)
594 sprintf(q_vector
->name
, "%s-tx-%u", netdev
->name
,
595 q_vector
->tx_ring
->queue_index
);
596 else if (q_vector
->rx_ring
)
597 sprintf(q_vector
->name
, "%s-rx-%u", netdev
->name
,
598 q_vector
->rx_ring
->queue_index
);
600 sprintf(q_vector
->name
, "%s-unused", netdev
->name
);
602 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
603 igb_msix_ring
, 0, q_vector
->name
,
610 igb_configure_msix(adapter
);
616 static void igb_reset_interrupt_capability(struct igb_adapter
*adapter
)
618 if (adapter
->msix_entries
) {
619 pci_disable_msix(adapter
->pdev
);
620 kfree(adapter
->msix_entries
);
621 adapter
->msix_entries
= NULL
;
622 } else if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
623 pci_disable_msi(adapter
->pdev
);
628 * igb_free_q_vectors - Free memory allocated for interrupt vectors
629 * @adapter: board private structure to initialize
631 * This function frees the memory allocated to the q_vectors. In addition if
632 * NAPI is enabled it will delete any references to the NAPI struct prior
633 * to freeing the q_vector.
635 static void igb_free_q_vectors(struct igb_adapter
*adapter
)
639 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
640 struct igb_q_vector
*q_vector
= adapter
->q_vector
[v_idx
];
641 adapter
->q_vector
[v_idx
] = NULL
;
642 netif_napi_del(&q_vector
->napi
);
645 adapter
->num_q_vectors
= 0;
649 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
651 * This function resets the device so that it has 0 rx queues, tx queues, and
652 * MSI-X interrupts allocated.
654 static void igb_clear_interrupt_scheme(struct igb_adapter
*adapter
)
656 igb_free_queues(adapter
);
657 igb_free_q_vectors(adapter
);
658 igb_reset_interrupt_capability(adapter
);
662 * igb_set_interrupt_capability - set MSI or MSI-X if supported
664 * Attempt to configure interrupts using the best available
665 * capabilities of the hardware and kernel.
667 static void igb_set_interrupt_capability(struct igb_adapter
*adapter
)
672 /* Number of supported queues. */
673 adapter
->num_rx_queues
= adapter
->rss_queues
;
674 adapter
->num_tx_queues
= adapter
->rss_queues
;
676 /* start with one vector for every rx queue */
677 numvecs
= adapter
->num_rx_queues
;
679 /* if tx handler is seperate add 1 for every tx queue */
680 if (!(adapter
->flags
& IGB_FLAG_QUEUE_PAIRS
))
681 numvecs
+= adapter
->num_tx_queues
;
683 /* store the number of vectors reserved for queues */
684 adapter
->num_q_vectors
= numvecs
;
686 /* add 1 vector for link status interrupts */
688 adapter
->msix_entries
= kcalloc(numvecs
, sizeof(struct msix_entry
),
690 if (!adapter
->msix_entries
)
693 for (i
= 0; i
< numvecs
; i
++)
694 adapter
->msix_entries
[i
].entry
= i
;
696 err
= pci_enable_msix(adapter
->pdev
,
697 adapter
->msix_entries
,
702 igb_reset_interrupt_capability(adapter
);
704 /* If we can't do MSI-X, try MSI */
706 #ifdef CONFIG_PCI_IOV
707 /* disable SR-IOV for non MSI-X configurations */
708 if (adapter
->vf_data
) {
709 struct e1000_hw
*hw
= &adapter
->hw
;
710 /* disable iov and allow time for transactions to clear */
711 pci_disable_sriov(adapter
->pdev
);
714 kfree(adapter
->vf_data
);
715 adapter
->vf_data
= NULL
;
716 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
718 dev_info(&adapter
->pdev
->dev
, "IOV Disabled\n");
721 adapter
->vfs_allocated_count
= 0;
722 adapter
->rss_queues
= 1;
723 adapter
->flags
|= IGB_FLAG_QUEUE_PAIRS
;
724 adapter
->num_rx_queues
= 1;
725 adapter
->num_tx_queues
= 1;
726 adapter
->num_q_vectors
= 1;
727 if (!pci_enable_msi(adapter
->pdev
))
728 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
730 /* Notify the stack of the (possibly) reduced Tx Queue count. */
731 adapter
->netdev
->real_num_tx_queues
= adapter
->num_tx_queues
;
736 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
737 * @adapter: board private structure to initialize
739 * We allocate one q_vector per queue interrupt. If allocation fails we
742 static int igb_alloc_q_vectors(struct igb_adapter
*adapter
)
744 struct igb_q_vector
*q_vector
;
745 struct e1000_hw
*hw
= &adapter
->hw
;
748 for (v_idx
= 0; v_idx
< adapter
->num_q_vectors
; v_idx
++) {
749 q_vector
= kzalloc(sizeof(struct igb_q_vector
), GFP_KERNEL
);
752 q_vector
->adapter
= adapter
;
753 q_vector
->itr_shift
= (hw
->mac
.type
== e1000_82575
) ? 16 : 0;
754 q_vector
->itr_register
= hw
->hw_addr
+ E1000_EITR(0);
755 q_vector
->itr_val
= IGB_START_ITR
;
756 q_vector
->set_itr
= 1;
757 netif_napi_add(adapter
->netdev
, &q_vector
->napi
, igb_poll
, 64);
758 adapter
->q_vector
[v_idx
] = q_vector
;
765 q_vector
= adapter
->q_vector
[v_idx
];
766 netif_napi_del(&q_vector
->napi
);
768 adapter
->q_vector
[v_idx
] = NULL
;
773 static void igb_map_rx_ring_to_vector(struct igb_adapter
*adapter
,
774 int ring_idx
, int v_idx
)
776 struct igb_q_vector
*q_vector
;
778 q_vector
= adapter
->q_vector
[v_idx
];
779 q_vector
->rx_ring
= &adapter
->rx_ring
[ring_idx
];
780 q_vector
->rx_ring
->q_vector
= q_vector
;
781 q_vector
->itr_val
= adapter
->rx_itr_setting
;
782 if (q_vector
->itr_val
&& q_vector
->itr_val
<= 3)
783 q_vector
->itr_val
= IGB_START_ITR
;
786 static void igb_map_tx_ring_to_vector(struct igb_adapter
*adapter
,
787 int ring_idx
, int v_idx
)
789 struct igb_q_vector
*q_vector
;
791 q_vector
= adapter
->q_vector
[v_idx
];
792 q_vector
->tx_ring
= &adapter
->tx_ring
[ring_idx
];
793 q_vector
->tx_ring
->q_vector
= q_vector
;
794 q_vector
->itr_val
= adapter
->tx_itr_setting
;
795 if (q_vector
->itr_val
&& q_vector
->itr_val
<= 3)
796 q_vector
->itr_val
= IGB_START_ITR
;
800 * igb_map_ring_to_vector - maps allocated queues to vectors
802 * This function maps the recently allocated queues to vectors.
804 static int igb_map_ring_to_vector(struct igb_adapter
*adapter
)
809 if ((adapter
->num_q_vectors
< adapter
->num_rx_queues
) ||
810 (adapter
->num_q_vectors
< adapter
->num_tx_queues
))
813 if (adapter
->num_q_vectors
>=
814 (adapter
->num_rx_queues
+ adapter
->num_tx_queues
)) {
815 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
816 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
817 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
818 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
820 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
821 if (i
< adapter
->num_tx_queues
)
822 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
);
823 igb_map_rx_ring_to_vector(adapter
, i
, v_idx
++);
825 for (; i
< adapter
->num_tx_queues
; i
++)
826 igb_map_tx_ring_to_vector(adapter
, i
, v_idx
++);
832 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
834 * This function initializes the interrupts and allocates all of the queues.
836 static int igb_init_interrupt_scheme(struct igb_adapter
*adapter
)
838 struct pci_dev
*pdev
= adapter
->pdev
;
841 igb_set_interrupt_capability(adapter
);
843 err
= igb_alloc_q_vectors(adapter
);
845 dev_err(&pdev
->dev
, "Unable to allocate memory for vectors\n");
846 goto err_alloc_q_vectors
;
849 err
= igb_alloc_queues(adapter
);
851 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
852 goto err_alloc_queues
;
855 err
= igb_map_ring_to_vector(adapter
);
857 dev_err(&pdev
->dev
, "Invalid q_vector to ring mapping\n");
864 igb_free_queues(adapter
);
866 igb_free_q_vectors(adapter
);
868 igb_reset_interrupt_capability(adapter
);
873 * igb_request_irq - initialize interrupts
875 * Attempts to configure interrupts using the best available
876 * capabilities of the hardware and kernel.
878 static int igb_request_irq(struct igb_adapter
*adapter
)
880 struct net_device
*netdev
= adapter
->netdev
;
881 struct pci_dev
*pdev
= adapter
->pdev
;
884 if (adapter
->msix_entries
) {
885 err
= igb_request_msix(adapter
);
888 /* fall back to MSI */
889 igb_clear_interrupt_scheme(adapter
);
890 if (!pci_enable_msi(adapter
->pdev
))
891 adapter
->flags
|= IGB_FLAG_HAS_MSI
;
892 igb_free_all_tx_resources(adapter
);
893 igb_free_all_rx_resources(adapter
);
894 adapter
->num_tx_queues
= 1;
895 adapter
->num_rx_queues
= 1;
896 adapter
->num_q_vectors
= 1;
897 err
= igb_alloc_q_vectors(adapter
);
900 "Unable to allocate memory for vectors\n");
903 err
= igb_alloc_queues(adapter
);
906 "Unable to allocate memory for queues\n");
907 igb_free_q_vectors(adapter
);
910 igb_setup_all_tx_resources(adapter
);
911 igb_setup_all_rx_resources(adapter
);
913 igb_assign_vector(adapter
->q_vector
[0], 0);
916 if (adapter
->flags
& IGB_FLAG_HAS_MSI
) {
917 err
= request_irq(adapter
->pdev
->irq
, igb_intr_msi
, 0,
918 netdev
->name
, adapter
);
922 /* fall back to legacy interrupts */
923 igb_reset_interrupt_capability(adapter
);
924 adapter
->flags
&= ~IGB_FLAG_HAS_MSI
;
927 err
= request_irq(adapter
->pdev
->irq
, igb_intr
, IRQF_SHARED
,
928 netdev
->name
, adapter
);
931 dev_err(&adapter
->pdev
->dev
, "Error %d getting interrupt\n",
938 static void igb_free_irq(struct igb_adapter
*adapter
)
940 if (adapter
->msix_entries
) {
943 free_irq(adapter
->msix_entries
[vector
++].vector
, adapter
);
945 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
946 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
947 free_irq(adapter
->msix_entries
[vector
++].vector
,
951 free_irq(adapter
->pdev
->irq
, adapter
);
956 * igb_irq_disable - Mask off interrupt generation on the NIC
957 * @adapter: board private structure
959 static void igb_irq_disable(struct igb_adapter
*adapter
)
961 struct e1000_hw
*hw
= &adapter
->hw
;
964 * we need to be careful when disabling interrupts. The VFs are also
965 * mapped into these registers and so clearing the bits can cause
966 * issues on the VF drivers so we only need to clear what we set
968 if (adapter
->msix_entries
) {
969 u32 regval
= rd32(E1000_EIAM
);
970 wr32(E1000_EIAM
, regval
& ~adapter
->eims_enable_mask
);
971 wr32(E1000_EIMC
, adapter
->eims_enable_mask
);
972 regval
= rd32(E1000_EIAC
);
973 wr32(E1000_EIAC
, regval
& ~adapter
->eims_enable_mask
);
979 synchronize_irq(adapter
->pdev
->irq
);
983 * igb_irq_enable - Enable default interrupt generation settings
984 * @adapter: board private structure
986 static void igb_irq_enable(struct igb_adapter
*adapter
)
988 struct e1000_hw
*hw
= &adapter
->hw
;
990 if (adapter
->msix_entries
) {
991 u32 ims
= E1000_IMS_LSC
| E1000_IMS_DOUTSYNC
;
992 u32 regval
= rd32(E1000_EIAC
);
993 wr32(E1000_EIAC
, regval
| adapter
->eims_enable_mask
);
994 regval
= rd32(E1000_EIAM
);
995 wr32(E1000_EIAM
, regval
| adapter
->eims_enable_mask
);
996 wr32(E1000_EIMS
, adapter
->eims_enable_mask
);
997 if (adapter
->vfs_allocated_count
) {
998 wr32(E1000_MBVFIMR
, 0xFF);
999 ims
|= E1000_IMS_VMMB
;
1001 if (adapter
->hw
.mac
.type
== e1000_82580
)
1002 ims
|= E1000_IMS_DRSTA
;
1004 wr32(E1000_IMS
, ims
);
1006 wr32(E1000_IMS
, IMS_ENABLE_MASK
|
1008 wr32(E1000_IAM
, IMS_ENABLE_MASK
|
1013 static void igb_update_mng_vlan(struct igb_adapter
*adapter
)
1015 struct e1000_hw
*hw
= &adapter
->hw
;
1016 u16 vid
= adapter
->hw
.mng_cookie
.vlan_id
;
1017 u16 old_vid
= adapter
->mng_vlan_id
;
1019 if (hw
->mng_cookie
.status
& E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) {
1020 /* add VID to filter table */
1021 igb_vfta_set(hw
, vid
, true);
1022 adapter
->mng_vlan_id
= vid
;
1024 adapter
->mng_vlan_id
= IGB_MNG_VLAN_NONE
;
1027 if ((old_vid
!= (u16
)IGB_MNG_VLAN_NONE
) &&
1029 !vlan_group_get_device(adapter
->vlgrp
, old_vid
)) {
1030 /* remove VID from filter table */
1031 igb_vfta_set(hw
, old_vid
, false);
1036 * igb_release_hw_control - release control of the h/w to f/w
1037 * @adapter: address of board private structure
1039 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1040 * For ASF and Pass Through versions of f/w this means that the
1041 * driver is no longer loaded.
1044 static void igb_release_hw_control(struct igb_adapter
*adapter
)
1046 struct e1000_hw
*hw
= &adapter
->hw
;
1049 /* Let firmware take over control of h/w */
1050 ctrl_ext
= rd32(E1000_CTRL_EXT
);
1051 wr32(E1000_CTRL_EXT
,
1052 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
1056 * igb_get_hw_control - get control of the h/w from f/w
1057 * @adapter: address of board private structure
1059 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1060 * For ASF and Pass Through versions of f/w this means that
1061 * the driver is loaded.
1064 static void igb_get_hw_control(struct igb_adapter
*adapter
)
1066 struct e1000_hw
*hw
= &adapter
->hw
;
1069 /* Let firmware know the driver has taken over */
1070 ctrl_ext
= rd32(E1000_CTRL_EXT
);
1071 wr32(E1000_CTRL_EXT
,
1072 ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
1076 * igb_configure - configure the hardware for RX and TX
1077 * @adapter: private board structure
1079 static void igb_configure(struct igb_adapter
*adapter
)
1081 struct net_device
*netdev
= adapter
->netdev
;
1084 igb_get_hw_control(adapter
);
1085 igb_set_rx_mode(netdev
);
1087 igb_restore_vlan(adapter
);
1089 igb_setup_tctl(adapter
);
1090 igb_setup_mrqc(adapter
);
1091 igb_setup_rctl(adapter
);
1093 igb_configure_tx(adapter
);
1094 igb_configure_rx(adapter
);
1096 igb_rx_fifo_flush_82575(&adapter
->hw
);
1098 /* call igb_desc_unused which always leaves
1099 * at least 1 descriptor unused to make sure
1100 * next_to_use != next_to_clean */
1101 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1102 struct igb_ring
*ring
= &adapter
->rx_ring
[i
];
1103 igb_alloc_rx_buffers_adv(ring
, igb_desc_unused(ring
));
1107 adapter
->tx_queue_len
= netdev
->tx_queue_len
;
1112 * igb_up - Open the interface and prepare it to handle traffic
1113 * @adapter: board private structure
1115 int igb_up(struct igb_adapter
*adapter
)
1117 struct e1000_hw
*hw
= &adapter
->hw
;
1120 /* hardware has been reset, we need to reload some things */
1121 igb_configure(adapter
);
1123 clear_bit(__IGB_DOWN
, &adapter
->state
);
1125 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1126 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1127 napi_enable(&q_vector
->napi
);
1129 if (adapter
->msix_entries
)
1130 igb_configure_msix(adapter
);
1132 igb_assign_vector(adapter
->q_vector
[0], 0);
1134 /* Clear any pending interrupts. */
1136 igb_irq_enable(adapter
);
1138 /* notify VFs that reset has been completed */
1139 if (adapter
->vfs_allocated_count
) {
1140 u32 reg_data
= rd32(E1000_CTRL_EXT
);
1141 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
1142 wr32(E1000_CTRL_EXT
, reg_data
);
1145 netif_tx_start_all_queues(adapter
->netdev
);
1147 /* start the watchdog. */
1148 hw
->mac
.get_link_status
= 1;
1149 schedule_work(&adapter
->watchdog_task
);
1154 void igb_down(struct igb_adapter
*adapter
)
1156 struct net_device
*netdev
= adapter
->netdev
;
1157 struct e1000_hw
*hw
= &adapter
->hw
;
1161 /* signal that we're down so the interrupt handler does not
1162 * reschedule our watchdog timer */
1163 set_bit(__IGB_DOWN
, &adapter
->state
);
1165 /* disable receives in the hardware */
1166 rctl
= rd32(E1000_RCTL
);
1167 wr32(E1000_RCTL
, rctl
& ~E1000_RCTL_EN
);
1168 /* flush and sleep below */
1170 netif_tx_stop_all_queues(netdev
);
1172 /* disable transmits in the hardware */
1173 tctl
= rd32(E1000_TCTL
);
1174 tctl
&= ~E1000_TCTL_EN
;
1175 wr32(E1000_TCTL
, tctl
);
1176 /* flush both disables and wait for them to finish */
1180 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
1181 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
1182 napi_disable(&q_vector
->napi
);
1185 igb_irq_disable(adapter
);
1187 del_timer_sync(&adapter
->watchdog_timer
);
1188 del_timer_sync(&adapter
->phy_info_timer
);
1190 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
1191 netif_carrier_off(netdev
);
1193 /* record the stats before reset*/
1194 igb_update_stats(adapter
);
1196 adapter
->link_speed
= 0;
1197 adapter
->link_duplex
= 0;
1199 if (!pci_channel_offline(adapter
->pdev
))
1201 igb_clean_all_tx_rings(adapter
);
1202 igb_clean_all_rx_rings(adapter
);
1203 #ifdef CONFIG_IGB_DCA
1205 /* since we reset the hardware DCA settings were cleared */
1206 igb_setup_dca(adapter
);
1210 void igb_reinit_locked(struct igb_adapter
*adapter
)
1212 WARN_ON(in_interrupt());
1213 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
1217 clear_bit(__IGB_RESETTING
, &adapter
->state
);
1220 void igb_reset(struct igb_adapter
*adapter
)
1222 struct pci_dev
*pdev
= adapter
->pdev
;
1223 struct e1000_hw
*hw
= &adapter
->hw
;
1224 struct e1000_mac_info
*mac
= &hw
->mac
;
1225 struct e1000_fc_info
*fc
= &hw
->fc
;
1226 u32 pba
= 0, tx_space
, min_tx_space
, min_rx_space
;
1229 /* Repartition Pba for greater than 9k mtu
1230 * To take effect CTRL.RST is required.
1232 switch (mac
->type
) {
1234 pba
= rd32(E1000_RXPBS
);
1235 pba
= igb_rxpbs_adjust_82580(pba
);
1238 pba
= rd32(E1000_RXPBS
);
1239 pba
&= E1000_RXPBS_SIZE_MASK_82576
;
1243 pba
= E1000_PBA_34K
;
1247 if ((adapter
->max_frame_size
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) &&
1248 (mac
->type
< e1000_82576
)) {
1249 /* adjust PBA for jumbo frames */
1250 wr32(E1000_PBA
, pba
);
1252 /* To maintain wire speed transmits, the Tx FIFO should be
1253 * large enough to accommodate two full transmit packets,
1254 * rounded up to the next 1KB and expressed in KB. Likewise,
1255 * the Rx FIFO should be large enough to accommodate at least
1256 * one full receive packet and is similarly rounded up and
1257 * expressed in KB. */
1258 pba
= rd32(E1000_PBA
);
1259 /* upper 16 bits has Tx packet buffer allocation size in KB */
1260 tx_space
= pba
>> 16;
1261 /* lower 16 bits has Rx packet buffer allocation size in KB */
1263 /* the tx fifo also stores 16 bytes of information about the tx
1264 * but don't include ethernet FCS because hardware appends it */
1265 min_tx_space
= (adapter
->max_frame_size
+
1266 sizeof(union e1000_adv_tx_desc
) -
1268 min_tx_space
= ALIGN(min_tx_space
, 1024);
1269 min_tx_space
>>= 10;
1270 /* software strips receive CRC, so leave room for it */
1271 min_rx_space
= adapter
->max_frame_size
;
1272 min_rx_space
= ALIGN(min_rx_space
, 1024);
1273 min_rx_space
>>= 10;
1275 /* If current Tx allocation is less than the min Tx FIFO size,
1276 * and the min Tx FIFO size is less than the current Rx FIFO
1277 * allocation, take space away from current Rx allocation */
1278 if (tx_space
< min_tx_space
&&
1279 ((min_tx_space
- tx_space
) < pba
)) {
1280 pba
= pba
- (min_tx_space
- tx_space
);
1282 /* if short on rx space, rx wins and must trump tx
1284 if (pba
< min_rx_space
)
1287 wr32(E1000_PBA
, pba
);
1290 /* flow control settings */
1291 /* The high water mark must be low enough to fit one full frame
1292 * (or the size used for early receive) above it in the Rx FIFO.
1293 * Set it to the lower of:
1294 * - 90% of the Rx FIFO size, or
1295 * - the full Rx FIFO size minus one full frame */
1296 hwm
= min(((pba
<< 10) * 9 / 10),
1297 ((pba
<< 10) - 2 * adapter
->max_frame_size
));
1299 fc
->high_water
= hwm
& 0xFFF0; /* 16-byte granularity */
1300 fc
->low_water
= fc
->high_water
- 16;
1301 fc
->pause_time
= 0xFFFF;
1303 fc
->current_mode
= fc
->requested_mode
;
1305 /* disable receive for all VFs and wait one second */
1306 if (adapter
->vfs_allocated_count
) {
1308 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++)
1309 adapter
->vf_data
[i
].flags
= 0;
1311 /* ping all the active vfs to let them know we are going down */
1312 igb_ping_all_vfs(adapter
);
1314 /* disable transmits and receives */
1315 wr32(E1000_VFRE
, 0);
1316 wr32(E1000_VFTE
, 0);
1319 /* Allow time for pending master requests to run */
1320 hw
->mac
.ops
.reset_hw(hw
);
1323 if (hw
->mac
.ops
.init_hw(hw
))
1324 dev_err(&pdev
->dev
, "Hardware Error\n");
1326 if (hw
->mac
.type
== e1000_82580
) {
1327 u32 reg
= rd32(E1000_PCIEMISC
);
1328 wr32(E1000_PCIEMISC
,
1329 reg
& ~E1000_PCIEMISC_LX_DECISION
);
1331 igb_update_mng_vlan(adapter
);
1333 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1334 wr32(E1000_VET
, ETHERNET_IEEE_VLAN_TYPE
);
1336 igb_reset_adaptive(hw
);
1337 igb_get_phy_info(hw
);
1340 static const struct net_device_ops igb_netdev_ops
= {
1341 .ndo_open
= igb_open
,
1342 .ndo_stop
= igb_close
,
1343 .ndo_start_xmit
= igb_xmit_frame_adv
,
1344 .ndo_get_stats
= igb_get_stats
,
1345 .ndo_set_rx_mode
= igb_set_rx_mode
,
1346 .ndo_set_multicast_list
= igb_set_rx_mode
,
1347 .ndo_set_mac_address
= igb_set_mac
,
1348 .ndo_change_mtu
= igb_change_mtu
,
1349 .ndo_do_ioctl
= igb_ioctl
,
1350 .ndo_tx_timeout
= igb_tx_timeout
,
1351 .ndo_validate_addr
= eth_validate_addr
,
1352 .ndo_vlan_rx_register
= igb_vlan_rx_register
,
1353 .ndo_vlan_rx_add_vid
= igb_vlan_rx_add_vid
,
1354 .ndo_vlan_rx_kill_vid
= igb_vlan_rx_kill_vid
,
1355 #ifdef CONFIG_NET_POLL_CONTROLLER
1356 .ndo_poll_controller
= igb_netpoll
,
1361 * igb_probe - Device Initialization Routine
1362 * @pdev: PCI device information struct
1363 * @ent: entry in igb_pci_tbl
1365 * Returns 0 on success, negative on failure
1367 * igb_probe initializes an adapter identified by a pci_dev structure.
1368 * The OS initialization, configuring of the adapter private structure,
1369 * and a hardware reset occur.
1371 static int __devinit
igb_probe(struct pci_dev
*pdev
,
1372 const struct pci_device_id
*ent
)
1374 struct net_device
*netdev
;
1375 struct igb_adapter
*adapter
;
1376 struct e1000_hw
*hw
;
1377 u16 eeprom_data
= 0;
1378 static int global_quad_port_a
; /* global quad port a indication */
1379 const struct e1000_info
*ei
= igb_info_tbl
[ent
->driver_data
];
1380 unsigned long mmio_start
, mmio_len
;
1381 int err
, pci_using_dac
;
1382 u16 eeprom_apme_mask
= IGB_EEPROM_APME
;
1385 err
= pci_enable_device_mem(pdev
);
1390 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1392 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1396 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1398 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1400 dev_err(&pdev
->dev
, "No usable DMA "
1401 "configuration, aborting\n");
1407 err
= pci_request_selected_regions(pdev
, pci_select_bars(pdev
,
1413 pci_enable_pcie_error_reporting(pdev
);
1415 pci_set_master(pdev
);
1416 pci_save_state(pdev
);
1419 netdev
= alloc_etherdev_mq(sizeof(struct igb_adapter
),
1420 IGB_ABS_MAX_TX_QUEUES
);
1422 goto err_alloc_etherdev
;
1424 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1426 pci_set_drvdata(pdev
, netdev
);
1427 adapter
= netdev_priv(netdev
);
1428 adapter
->netdev
= netdev
;
1429 adapter
->pdev
= pdev
;
1432 adapter
->msg_enable
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
;
1434 mmio_start
= pci_resource_start(pdev
, 0);
1435 mmio_len
= pci_resource_len(pdev
, 0);
1438 hw
->hw_addr
= ioremap(mmio_start
, mmio_len
);
1442 netdev
->netdev_ops
= &igb_netdev_ops
;
1443 igb_set_ethtool_ops(netdev
);
1444 netdev
->watchdog_timeo
= 5 * HZ
;
1446 strncpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
) - 1);
1448 netdev
->mem_start
= mmio_start
;
1449 netdev
->mem_end
= mmio_start
+ mmio_len
;
1451 /* PCI config space info */
1452 hw
->vendor_id
= pdev
->vendor
;
1453 hw
->device_id
= pdev
->device
;
1454 hw
->revision_id
= pdev
->revision
;
1455 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
1456 hw
->subsystem_device_id
= pdev
->subsystem_device
;
1458 /* Copy the default MAC, PHY and NVM function pointers */
1459 memcpy(&hw
->mac
.ops
, ei
->mac_ops
, sizeof(hw
->mac
.ops
));
1460 memcpy(&hw
->phy
.ops
, ei
->phy_ops
, sizeof(hw
->phy
.ops
));
1461 memcpy(&hw
->nvm
.ops
, ei
->nvm_ops
, sizeof(hw
->nvm
.ops
));
1462 /* Initialize skew-specific constants */
1463 err
= ei
->get_invariants(hw
);
1467 /* setup the private structure */
1468 err
= igb_sw_init(adapter
);
1472 igb_get_bus_info_pcie(hw
);
1474 hw
->phy
.autoneg_wait_to_complete
= false;
1475 hw
->mac
.adaptive_ifs
= true;
1477 /* Copper options */
1478 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
1479 hw
->phy
.mdix
= AUTO_ALL_MODES
;
1480 hw
->phy
.disable_polarity_correction
= false;
1481 hw
->phy
.ms_type
= e1000_ms_hw_default
;
1484 if (igb_check_reset_block(hw
))
1485 dev_info(&pdev
->dev
,
1486 "PHY reset is blocked due to SOL/IDER session.\n");
1488 netdev
->features
= NETIF_F_SG
|
1490 NETIF_F_HW_VLAN_TX
|
1491 NETIF_F_HW_VLAN_RX
|
1492 NETIF_F_HW_VLAN_FILTER
;
1494 netdev
->features
|= NETIF_F_IPV6_CSUM
;
1495 netdev
->features
|= NETIF_F_TSO
;
1496 netdev
->features
|= NETIF_F_TSO6
;
1497 netdev
->features
|= NETIF_F_GRO
;
1499 netdev
->vlan_features
|= NETIF_F_TSO
;
1500 netdev
->vlan_features
|= NETIF_F_TSO6
;
1501 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
1502 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
1503 netdev
->vlan_features
|= NETIF_F_SG
;
1506 netdev
->features
|= NETIF_F_HIGHDMA
;
1508 if (hw
->mac
.type
>= e1000_82576
)
1509 netdev
->features
|= NETIF_F_SCTP_CSUM
;
1511 adapter
->en_mng_pt
= igb_enable_mng_pass_thru(hw
);
1513 /* before reading the NVM, reset the controller to put the device in a
1514 * known good starting state */
1515 hw
->mac
.ops
.reset_hw(hw
);
1517 /* make sure the NVM is good */
1518 if (igb_validate_nvm_checksum(hw
) < 0) {
1519 dev_err(&pdev
->dev
, "The NVM Checksum Is Not Valid\n");
1524 /* copy the MAC address out of the NVM */
1525 if (hw
->mac
.ops
.read_mac_addr(hw
))
1526 dev_err(&pdev
->dev
, "NVM Read Error\n");
1528 memcpy(netdev
->dev_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1529 memcpy(netdev
->perm_addr
, hw
->mac
.addr
, netdev
->addr_len
);
1531 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
1532 dev_err(&pdev
->dev
, "Invalid MAC Address\n");
1537 setup_timer(&adapter
->watchdog_timer
, &igb_watchdog
,
1538 (unsigned long) adapter
);
1539 setup_timer(&adapter
->phy_info_timer
, &igb_update_phy_info
,
1540 (unsigned long) adapter
);
1542 INIT_WORK(&adapter
->reset_task
, igb_reset_task
);
1543 INIT_WORK(&adapter
->watchdog_task
, igb_watchdog_task
);
1545 /* Initialize link properties that are user-changeable */
1546 adapter
->fc_autoneg
= true;
1547 hw
->mac
.autoneg
= true;
1548 hw
->phy
.autoneg_advertised
= 0x2f;
1550 hw
->fc
.requested_mode
= e1000_fc_default
;
1551 hw
->fc
.current_mode
= e1000_fc_default
;
1553 igb_validate_mdi_setting(hw
);
1555 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1556 * enable the ACPI Magic Packet filter
1559 if (hw
->bus
.func
== 0)
1560 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_A
, 1, &eeprom_data
);
1561 else if (hw
->mac
.type
== e1000_82580
)
1562 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_A
+
1563 NVM_82580_LAN_FUNC_OFFSET(hw
->bus
.func
), 1,
1565 else if (hw
->bus
.func
== 1)
1566 hw
->nvm
.ops
.read(hw
, NVM_INIT_CONTROL3_PORT_B
, 1, &eeprom_data
);
1568 if (eeprom_data
& eeprom_apme_mask
)
1569 adapter
->eeprom_wol
|= E1000_WUFC_MAG
;
1571 /* now that we have the eeprom settings, apply the special cases where
1572 * the eeprom may be wrong or the board simply won't support wake on
1573 * lan on a particular port */
1574 switch (pdev
->device
) {
1575 case E1000_DEV_ID_82575GB_QUAD_COPPER
:
1576 adapter
->eeprom_wol
= 0;
1578 case E1000_DEV_ID_82575EB_FIBER_SERDES
:
1579 case E1000_DEV_ID_82576_FIBER
:
1580 case E1000_DEV_ID_82576_SERDES
:
1581 /* Wake events only supported on port A for dual fiber
1582 * regardless of eeprom setting */
1583 if (rd32(E1000_STATUS
) & E1000_STATUS_FUNC_1
)
1584 adapter
->eeprom_wol
= 0;
1586 case E1000_DEV_ID_82576_QUAD_COPPER
:
1587 /* if quad port adapter, disable WoL on all but port A */
1588 if (global_quad_port_a
!= 0)
1589 adapter
->eeprom_wol
= 0;
1591 adapter
->flags
|= IGB_FLAG_QUAD_PORT_A
;
1592 /* Reset for multiple quad port adapters */
1593 if (++global_quad_port_a
== 4)
1594 global_quad_port_a
= 0;
1598 /* initialize the wol settings based on the eeprom settings */
1599 adapter
->wol
= adapter
->eeprom_wol
;
1600 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
1602 /* reset the hardware with the new settings */
1605 /* let the f/w know that the h/w is now under the control of the
1607 igb_get_hw_control(adapter
);
1609 strcpy(netdev
->name
, "eth%d");
1610 err
= register_netdev(netdev
);
1614 /* carrier off reporting is important to ethtool even BEFORE open */
1615 netif_carrier_off(netdev
);
1617 #ifdef CONFIG_IGB_DCA
1618 if (dca_add_requester(&pdev
->dev
) == 0) {
1619 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
1620 dev_info(&pdev
->dev
, "DCA enabled\n");
1621 igb_setup_dca(adapter
);
1625 dev_info(&pdev
->dev
, "Intel(R) Gigabit Ethernet Network Connection\n");
1626 /* print bus type/speed/width info */
1627 dev_info(&pdev
->dev
, "%s: (PCIe:%s:%s) %pM\n",
1629 ((hw
->bus
.speed
== e1000_bus_speed_2500
) ? "2.5Gb/s" :
1631 ((hw
->bus
.width
== e1000_bus_width_pcie_x4
) ? "Width x4" :
1632 (hw
->bus
.width
== e1000_bus_width_pcie_x2
) ? "Width x2" :
1633 (hw
->bus
.width
== e1000_bus_width_pcie_x1
) ? "Width x1" :
1637 igb_read_part_num(hw
, &part_num
);
1638 dev_info(&pdev
->dev
, "%s: PBA No: %06x-%03x\n", netdev
->name
,
1639 (part_num
>> 8), (part_num
& 0xff));
1641 dev_info(&pdev
->dev
,
1642 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1643 adapter
->msix_entries
? "MSI-X" :
1644 (adapter
->flags
& IGB_FLAG_HAS_MSI
) ? "MSI" : "legacy",
1645 adapter
->num_rx_queues
, adapter
->num_tx_queues
);
1650 igb_release_hw_control(adapter
);
1652 if (!igb_check_reset_block(hw
))
1655 if (hw
->flash_address
)
1656 iounmap(hw
->flash_address
);
1658 igb_clear_interrupt_scheme(adapter
);
1659 iounmap(hw
->hw_addr
);
1661 free_netdev(netdev
);
1663 pci_release_selected_regions(pdev
,
1664 pci_select_bars(pdev
, IORESOURCE_MEM
));
1667 pci_disable_device(pdev
);
1672 * igb_remove - Device Removal Routine
1673 * @pdev: PCI device information struct
1675 * igb_remove is called by the PCI subsystem to alert the driver
1676 * that it should release a PCI device. The could be caused by a
1677 * Hot-Plug event, or because the driver is going to be removed from
1680 static void __devexit
igb_remove(struct pci_dev
*pdev
)
1682 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1683 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1684 struct e1000_hw
*hw
= &adapter
->hw
;
1686 /* flush_scheduled work may reschedule our watchdog task, so
1687 * explicitly disable watchdog tasks from being rescheduled */
1688 set_bit(__IGB_DOWN
, &adapter
->state
);
1689 del_timer_sync(&adapter
->watchdog_timer
);
1690 del_timer_sync(&adapter
->phy_info_timer
);
1692 flush_scheduled_work();
1694 #ifdef CONFIG_IGB_DCA
1695 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
1696 dev_info(&pdev
->dev
, "DCA disabled\n");
1697 dca_remove_requester(&pdev
->dev
);
1698 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
1699 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
1703 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1704 * would have already happened in close and is redundant. */
1705 igb_release_hw_control(adapter
);
1707 unregister_netdev(netdev
);
1709 if (!igb_check_reset_block(hw
))
1712 igb_clear_interrupt_scheme(adapter
);
1714 #ifdef CONFIG_PCI_IOV
1715 /* reclaim resources allocated to VFs */
1716 if (adapter
->vf_data
) {
1717 /* disable iov and allow time for transactions to clear */
1718 pci_disable_sriov(pdev
);
1721 kfree(adapter
->vf_data
);
1722 adapter
->vf_data
= NULL
;
1723 wr32(E1000_IOVCTL
, E1000_IOVCTL_REUSE_VFQ
);
1725 dev_info(&pdev
->dev
, "IOV Disabled\n");
1729 iounmap(hw
->hw_addr
);
1730 if (hw
->flash_address
)
1731 iounmap(hw
->flash_address
);
1732 pci_release_selected_regions(pdev
,
1733 pci_select_bars(pdev
, IORESOURCE_MEM
));
1735 free_netdev(netdev
);
1737 pci_disable_pcie_error_reporting(pdev
);
1739 pci_disable_device(pdev
);
1743 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1744 * @adapter: board private structure to initialize
1746 * This function initializes the vf specific data storage and then attempts to
1747 * allocate the VFs. The reason for ordering it this way is because it is much
1748 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1749 * the memory for the VFs.
1751 static void __devinit
igb_probe_vfs(struct igb_adapter
* adapter
)
1753 #ifdef CONFIG_PCI_IOV
1754 struct pci_dev
*pdev
= adapter
->pdev
;
1756 if (adapter
->vfs_allocated_count
> 7)
1757 adapter
->vfs_allocated_count
= 7;
1759 if (adapter
->vfs_allocated_count
) {
1760 adapter
->vf_data
= kcalloc(adapter
->vfs_allocated_count
,
1761 sizeof(struct vf_data_storage
),
1763 /* if allocation failed then we do not support SR-IOV */
1764 if (!adapter
->vf_data
) {
1765 adapter
->vfs_allocated_count
= 0;
1766 dev_err(&pdev
->dev
, "Unable to allocate memory for VF "
1771 if (pci_enable_sriov(pdev
, adapter
->vfs_allocated_count
)) {
1772 kfree(adapter
->vf_data
);
1773 adapter
->vf_data
= NULL
;
1774 #endif /* CONFIG_PCI_IOV */
1775 adapter
->vfs_allocated_count
= 0;
1776 #ifdef CONFIG_PCI_IOV
1778 unsigned char mac_addr
[ETH_ALEN
];
1780 dev_info(&pdev
->dev
, "%d vfs allocated\n",
1781 adapter
->vfs_allocated_count
);
1782 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
1783 random_ether_addr(mac_addr
);
1784 igb_set_vf_mac(adapter
, i
, mac_addr
);
1787 #endif /* CONFIG_PCI_IOV */
1792 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1793 * @adapter: board private structure to initialize
1795 * igb_init_hw_timer initializes the function pointer and values for the hw
1796 * timer found in hardware.
1798 static void igb_init_hw_timer(struct igb_adapter
*adapter
)
1800 struct e1000_hw
*hw
= &adapter
->hw
;
1802 switch (hw
->mac
.type
) {
1804 memset(&adapter
->cycles
, 0, sizeof(adapter
->cycles
));
1805 adapter
->cycles
.read
= igb_read_clock
;
1806 adapter
->cycles
.mask
= CLOCKSOURCE_MASK(64);
1807 adapter
->cycles
.mult
= 1;
1809 * The 82580 timesync updates the system timer every 8ns by 8ns
1810 * and the value cannot be shifted. Instead we need to shift
1811 * the registers to generate a 64bit timer value. As a result
1812 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
1813 * 24 in order to generate a larger value for synchronization.
1815 adapter
->cycles
.shift
= IGB_82580_TSYNC_SHIFT
;
1816 /* disable system timer temporarily by setting bit 31 */
1817 wr32(E1000_TSAUXC
, 0x80000000);
1820 /* Set registers so that rollover occurs soon to test this. */
1821 wr32(E1000_SYSTIMR
, 0x00000000);
1822 wr32(E1000_SYSTIML
, 0x80000000);
1823 wr32(E1000_SYSTIMH
, 0x000000FF);
1826 /* enable system timer by clearing bit 31 */
1827 wr32(E1000_TSAUXC
, 0x0);
1830 timecounter_init(&adapter
->clock
,
1832 ktime_to_ns(ktime_get_real()));
1834 * Synchronize our NIC clock against system wall clock. NIC
1835 * time stamp reading requires ~3us per sample, each sample
1836 * was pretty stable even under load => only require 10
1837 * samples for each offset comparison.
1839 memset(&adapter
->compare
, 0, sizeof(adapter
->compare
));
1840 adapter
->compare
.source
= &adapter
->clock
;
1841 adapter
->compare
.target
= ktime_get_real
;
1842 adapter
->compare
.num_samples
= 10;
1843 timecompare_update(&adapter
->compare
, 0);
1847 * Initialize hardware timer: we keep it running just in case
1848 * that some program needs it later on.
1850 memset(&adapter
->cycles
, 0, sizeof(adapter
->cycles
));
1851 adapter
->cycles
.read
= igb_read_clock
;
1852 adapter
->cycles
.mask
= CLOCKSOURCE_MASK(64);
1853 adapter
->cycles
.mult
= 1;
1855 * Scale the NIC clock cycle by a large factor so that
1856 * relatively small clock corrections can be added or
1857 * substracted at each clock tick. The drawbacks of a large
1858 * factor are a) that the clock register overflows more quickly
1859 * (not such a big deal) and b) that the increment per tick has
1860 * to fit into 24 bits. As a result we need to use a shift of
1861 * 19 so we can fit a value of 16 into the TIMINCA register.
1863 adapter
->cycles
.shift
= IGB_82576_TSYNC_SHIFT
;
1865 (1 << E1000_TIMINCA_16NS_SHIFT
) |
1866 (16 << IGB_82576_TSYNC_SHIFT
));
1868 /* Set registers so that rollover occurs soon to test this. */
1869 wr32(E1000_SYSTIML
, 0x00000000);
1870 wr32(E1000_SYSTIMH
, 0xFF800000);
1873 timecounter_init(&adapter
->clock
,
1875 ktime_to_ns(ktime_get_real()));
1877 * Synchronize our NIC clock against system wall clock. NIC
1878 * time stamp reading requires ~3us per sample, each sample
1879 * was pretty stable even under load => only require 10
1880 * samples for each offset comparison.
1882 memset(&adapter
->compare
, 0, sizeof(adapter
->compare
));
1883 adapter
->compare
.source
= &adapter
->clock
;
1884 adapter
->compare
.target
= ktime_get_real
;
1885 adapter
->compare
.num_samples
= 10;
1886 timecompare_update(&adapter
->compare
, 0);
1889 /* 82575 does not support timesync */
1897 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1898 * @adapter: board private structure to initialize
1900 * igb_sw_init initializes the Adapter private data structure.
1901 * Fields are initialized based on PCI device information and
1902 * OS network device settings (MTU size).
1904 static int __devinit
igb_sw_init(struct igb_adapter
*adapter
)
1906 struct e1000_hw
*hw
= &adapter
->hw
;
1907 struct net_device
*netdev
= adapter
->netdev
;
1908 struct pci_dev
*pdev
= adapter
->pdev
;
1910 pci_read_config_word(pdev
, PCI_COMMAND
, &hw
->bus
.pci_cmd_word
);
1912 adapter
->tx_ring_count
= IGB_DEFAULT_TXD
;
1913 adapter
->rx_ring_count
= IGB_DEFAULT_RXD
;
1914 adapter
->rx_itr_setting
= IGB_DEFAULT_ITR
;
1915 adapter
->tx_itr_setting
= IGB_DEFAULT_ITR
;
1917 adapter
->max_frame_size
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1918 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
1920 #ifdef CONFIG_PCI_IOV
1921 if (hw
->mac
.type
== e1000_82576
)
1922 adapter
->vfs_allocated_count
= max_vfs
;
1924 #endif /* CONFIG_PCI_IOV */
1925 adapter
->rss_queues
= min_t(u32
, IGB_MAX_RX_QUEUES
, num_online_cpus());
1928 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1929 * then we should combine the queues into a queue pair in order to
1930 * conserve interrupts due to limited supply
1932 if ((adapter
->rss_queues
> 4) ||
1933 ((adapter
->rss_queues
> 1) && (adapter
->vfs_allocated_count
> 6)))
1934 adapter
->flags
|= IGB_FLAG_QUEUE_PAIRS
;
1936 /* This call may decrease the number of queues */
1937 if (igb_init_interrupt_scheme(adapter
)) {
1938 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
1942 igb_init_hw_timer(adapter
);
1943 igb_probe_vfs(adapter
);
1945 /* Explicitly disable IRQ since the NIC can be in any state. */
1946 igb_irq_disable(adapter
);
1948 set_bit(__IGB_DOWN
, &adapter
->state
);
1953 * igb_open - Called when a network interface is made active
1954 * @netdev: network interface device structure
1956 * Returns 0 on success, negative value on failure
1958 * The open entry point is called when a network interface is made
1959 * active by the system (IFF_UP). At this point all resources needed
1960 * for transmit and receive operations are allocated, the interrupt
1961 * handler is registered with the OS, the watchdog timer is started,
1962 * and the stack is notified that the interface is ready.
1964 static int igb_open(struct net_device
*netdev
)
1966 struct igb_adapter
*adapter
= netdev_priv(netdev
);
1967 struct e1000_hw
*hw
= &adapter
->hw
;
1971 /* disallow open during test */
1972 if (test_bit(__IGB_TESTING
, &adapter
->state
))
1975 netif_carrier_off(netdev
);
1977 /* allocate transmit descriptors */
1978 err
= igb_setup_all_tx_resources(adapter
);
1982 /* allocate receive descriptors */
1983 err
= igb_setup_all_rx_resources(adapter
);
1987 /* e1000_power_up_phy(adapter); */
1989 /* before we allocate an interrupt, we must be ready to handle it.
1990 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1991 * as soon as we call pci_request_irq, so we have to setup our
1992 * clean_rx handler before we do so. */
1993 igb_configure(adapter
);
1995 err
= igb_request_irq(adapter
);
1999 /* From here on the code is the same as igb_up() */
2000 clear_bit(__IGB_DOWN
, &adapter
->state
);
2002 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
2003 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
2004 napi_enable(&q_vector
->napi
);
2007 /* Clear any pending interrupts. */
2010 igb_irq_enable(adapter
);
2012 /* notify VFs that reset has been completed */
2013 if (adapter
->vfs_allocated_count
) {
2014 u32 reg_data
= rd32(E1000_CTRL_EXT
);
2015 reg_data
|= E1000_CTRL_EXT_PFRSTD
;
2016 wr32(E1000_CTRL_EXT
, reg_data
);
2019 netif_tx_start_all_queues(netdev
);
2021 /* start the watchdog. */
2022 hw
->mac
.get_link_status
= 1;
2023 schedule_work(&adapter
->watchdog_task
);
2028 igb_release_hw_control(adapter
);
2029 /* e1000_power_down_phy(adapter); */
2030 igb_free_all_rx_resources(adapter
);
2032 igb_free_all_tx_resources(adapter
);
2040 * igb_close - Disables a network interface
2041 * @netdev: network interface device structure
2043 * Returns 0, this is not allowed to fail
2045 * The close entry point is called when an interface is de-activated
2046 * by the OS. The hardware is still under the driver's control, but
2047 * needs to be disabled. A global MAC reset is issued to stop the
2048 * hardware, and all transmit and receive resources are freed.
2050 static int igb_close(struct net_device
*netdev
)
2052 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2054 WARN_ON(test_bit(__IGB_RESETTING
, &adapter
->state
));
2057 igb_free_irq(adapter
);
2059 igb_free_all_tx_resources(adapter
);
2060 igb_free_all_rx_resources(adapter
);
2066 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2067 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2069 * Return 0 on success, negative on failure
2071 int igb_setup_tx_resources(struct igb_ring
*tx_ring
)
2073 struct pci_dev
*pdev
= tx_ring
->pdev
;
2076 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
2077 tx_ring
->buffer_info
= vmalloc(size
);
2078 if (!tx_ring
->buffer_info
)
2080 memset(tx_ring
->buffer_info
, 0, size
);
2082 /* round up to nearest 4K */
2083 tx_ring
->size
= tx_ring
->count
* sizeof(union e1000_adv_tx_desc
);
2084 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2086 tx_ring
->desc
= pci_alloc_consistent(pdev
,
2093 tx_ring
->next_to_use
= 0;
2094 tx_ring
->next_to_clean
= 0;
2098 vfree(tx_ring
->buffer_info
);
2100 "Unable to allocate memory for the transmit descriptor ring\n");
2105 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2106 * (Descriptors) for all queues
2107 * @adapter: board private structure
2109 * Return 0 on success, negative on failure
2111 static int igb_setup_all_tx_resources(struct igb_adapter
*adapter
)
2113 struct pci_dev
*pdev
= adapter
->pdev
;
2116 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2117 err
= igb_setup_tx_resources(&adapter
->tx_ring
[i
]);
2120 "Allocation for Tx Queue %u failed\n", i
);
2121 for (i
--; i
>= 0; i
--)
2122 igb_free_tx_resources(&adapter
->tx_ring
[i
]);
2127 for (i
= 0; i
< IGB_ABS_MAX_TX_QUEUES
; i
++) {
2128 int r_idx
= i
% adapter
->num_tx_queues
;
2129 adapter
->multi_tx_table
[i
] = &adapter
->tx_ring
[r_idx
];
2135 * igb_setup_tctl - configure the transmit control registers
2136 * @adapter: Board private structure
2138 void igb_setup_tctl(struct igb_adapter
*adapter
)
2140 struct e1000_hw
*hw
= &adapter
->hw
;
2143 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2144 wr32(E1000_TXDCTL(0), 0);
2146 /* Program the Transmit Control Register */
2147 tctl
= rd32(E1000_TCTL
);
2148 tctl
&= ~E1000_TCTL_CT
;
2149 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
2150 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
2152 igb_config_collision_dist(hw
);
2154 /* Enable transmits */
2155 tctl
|= E1000_TCTL_EN
;
2157 wr32(E1000_TCTL
, tctl
);
2161 * igb_configure_tx_ring - Configure transmit ring after Reset
2162 * @adapter: board private structure
2163 * @ring: tx ring to configure
2165 * Configure a transmit ring after a reset.
2167 void igb_configure_tx_ring(struct igb_adapter
*adapter
,
2168 struct igb_ring
*ring
)
2170 struct e1000_hw
*hw
= &adapter
->hw
;
2172 u64 tdba
= ring
->dma
;
2173 int reg_idx
= ring
->reg_idx
;
2175 /* disable the queue */
2176 txdctl
= rd32(E1000_TXDCTL(reg_idx
));
2177 wr32(E1000_TXDCTL(reg_idx
),
2178 txdctl
& ~E1000_TXDCTL_QUEUE_ENABLE
);
2182 wr32(E1000_TDLEN(reg_idx
),
2183 ring
->count
* sizeof(union e1000_adv_tx_desc
));
2184 wr32(E1000_TDBAL(reg_idx
),
2185 tdba
& 0x00000000ffffffffULL
);
2186 wr32(E1000_TDBAH(reg_idx
), tdba
>> 32);
2188 ring
->head
= hw
->hw_addr
+ E1000_TDH(reg_idx
);
2189 ring
->tail
= hw
->hw_addr
+ E1000_TDT(reg_idx
);
2190 writel(0, ring
->head
);
2191 writel(0, ring
->tail
);
2193 txdctl
|= IGB_TX_PTHRESH
;
2194 txdctl
|= IGB_TX_HTHRESH
<< 8;
2195 txdctl
|= IGB_TX_WTHRESH
<< 16;
2197 txdctl
|= E1000_TXDCTL_QUEUE_ENABLE
;
2198 wr32(E1000_TXDCTL(reg_idx
), txdctl
);
2202 * igb_configure_tx - Configure transmit Unit after Reset
2203 * @adapter: board private structure
2205 * Configure the Tx unit of the MAC after a reset.
2207 static void igb_configure_tx(struct igb_adapter
*adapter
)
2211 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2212 igb_configure_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
2216 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2217 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2219 * Returns 0 on success, negative on failure
2221 int igb_setup_rx_resources(struct igb_ring
*rx_ring
)
2223 struct pci_dev
*pdev
= rx_ring
->pdev
;
2226 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
2227 rx_ring
->buffer_info
= vmalloc(size
);
2228 if (!rx_ring
->buffer_info
)
2230 memset(rx_ring
->buffer_info
, 0, size
);
2232 desc_len
= sizeof(union e1000_adv_rx_desc
);
2234 /* Round up to nearest 4K */
2235 rx_ring
->size
= rx_ring
->count
* desc_len
;
2236 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2238 rx_ring
->desc
= pci_alloc_consistent(pdev
, rx_ring
->size
,
2244 rx_ring
->next_to_clean
= 0;
2245 rx_ring
->next_to_use
= 0;
2250 vfree(rx_ring
->buffer_info
);
2251 rx_ring
->buffer_info
= NULL
;
2252 dev_err(&pdev
->dev
, "Unable to allocate memory for "
2253 "the receive descriptor ring\n");
2258 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2259 * (Descriptors) for all queues
2260 * @adapter: board private structure
2262 * Return 0 on success, negative on failure
2264 static int igb_setup_all_rx_resources(struct igb_adapter
*adapter
)
2266 struct pci_dev
*pdev
= adapter
->pdev
;
2269 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2270 err
= igb_setup_rx_resources(&adapter
->rx_ring
[i
]);
2273 "Allocation for Rx Queue %u failed\n", i
);
2274 for (i
--; i
>= 0; i
--)
2275 igb_free_rx_resources(&adapter
->rx_ring
[i
]);
2284 * igb_setup_mrqc - configure the multiple receive queue control registers
2285 * @adapter: Board private structure
2287 static void igb_setup_mrqc(struct igb_adapter
*adapter
)
2289 struct e1000_hw
*hw
= &adapter
->hw
;
2291 u32 j
, num_rx_queues
, shift
= 0, shift2
= 0;
2296 static const u8 rsshash
[40] = {
2297 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2298 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2299 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2300 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2302 /* Fill out hash function seeds */
2303 for (j
= 0; j
< 10; j
++) {
2304 u32 rsskey
= rsshash
[(j
* 4)];
2305 rsskey
|= rsshash
[(j
* 4) + 1] << 8;
2306 rsskey
|= rsshash
[(j
* 4) + 2] << 16;
2307 rsskey
|= rsshash
[(j
* 4) + 3] << 24;
2308 array_wr32(E1000_RSSRK(0), j
, rsskey
);
2311 num_rx_queues
= adapter
->rss_queues
;
2313 if (adapter
->vfs_allocated_count
) {
2314 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2315 switch (hw
->mac
.type
) {
2331 if (hw
->mac
.type
== e1000_82575
)
2335 for (j
= 0; j
< (32 * 4); j
++) {
2336 reta
.bytes
[j
& 3] = (j
% num_rx_queues
) << shift
;
2338 reta
.bytes
[j
& 3] |= num_rx_queues
<< shift2
;
2340 wr32(E1000_RETA(j
>> 2), reta
.dword
);
2344 * Disable raw packet checksumming so that RSS hash is placed in
2345 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2346 * offloads as they are enabled by default
2348 rxcsum
= rd32(E1000_RXCSUM
);
2349 rxcsum
|= E1000_RXCSUM_PCSD
;
2351 if (adapter
->hw
.mac
.type
>= e1000_82576
)
2352 /* Enable Receive Checksum Offload for SCTP */
2353 rxcsum
|= E1000_RXCSUM_CRCOFL
;
2355 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2356 wr32(E1000_RXCSUM
, rxcsum
);
2358 /* If VMDq is enabled then we set the appropriate mode for that, else
2359 * we default to RSS so that an RSS hash is calculated per packet even
2360 * if we are only using one queue */
2361 if (adapter
->vfs_allocated_count
) {
2362 if (hw
->mac
.type
> e1000_82575
) {
2363 /* Set the default pool for the PF's first queue */
2364 u32 vtctl
= rd32(E1000_VT_CTL
);
2365 vtctl
&= ~(E1000_VT_CTL_DEFAULT_POOL_MASK
|
2366 E1000_VT_CTL_DISABLE_DEF_POOL
);
2367 vtctl
|= adapter
->vfs_allocated_count
<<
2368 E1000_VT_CTL_DEFAULT_POOL_SHIFT
;
2369 wr32(E1000_VT_CTL
, vtctl
);
2371 if (adapter
->rss_queues
> 1)
2372 mrqc
= E1000_MRQC_ENABLE_VMDQ_RSS_2Q
;
2374 mrqc
= E1000_MRQC_ENABLE_VMDQ
;
2376 mrqc
= E1000_MRQC_ENABLE_RSS_4Q
;
2378 igb_vmm_control(adapter
);
2380 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV4
|
2381 E1000_MRQC_RSS_FIELD_IPV4_TCP
);
2382 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV6
|
2383 E1000_MRQC_RSS_FIELD_IPV6_TCP
);
2384 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV4_UDP
|
2385 E1000_MRQC_RSS_FIELD_IPV6_UDP
);
2386 mrqc
|= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX
|
2387 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX
);
2389 wr32(E1000_MRQC
, mrqc
);
2393 * igb_setup_rctl - configure the receive control registers
2394 * @adapter: Board private structure
2396 void igb_setup_rctl(struct igb_adapter
*adapter
)
2398 struct e1000_hw
*hw
= &adapter
->hw
;
2401 rctl
= rd32(E1000_RCTL
);
2403 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
2404 rctl
&= ~(E1000_RCTL_LBM_TCVR
| E1000_RCTL_LBM_MAC
);
2406 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
| E1000_RCTL_RDMTS_HALF
|
2407 (hw
->mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
2410 * enable stripping of CRC. It's unlikely this will break BMC
2411 * redirection as it did with e1000. Newer features require
2412 * that the HW strips the CRC.
2414 rctl
|= E1000_RCTL_SECRC
;
2416 /* disable store bad packets and clear size bits. */
2417 rctl
&= ~(E1000_RCTL_SBP
| E1000_RCTL_SZ_256
);
2419 /* enable LPE to prevent packets larger than max_frame_size */
2420 rctl
|= E1000_RCTL_LPE
;
2422 /* disable queue 0 to prevent tail write w/o re-config */
2423 wr32(E1000_RXDCTL(0), 0);
2425 /* Attention!!! For SR-IOV PF driver operations you must enable
2426 * queue drop for all VF and PF queues to prevent head of line blocking
2427 * if an un-trusted VF does not provide descriptors to hardware.
2429 if (adapter
->vfs_allocated_count
) {
2430 /* set all queue drop enable bits */
2431 wr32(E1000_QDE
, ALL_QUEUES
);
2434 wr32(E1000_RCTL
, rctl
);
2437 static inline int igb_set_vf_rlpml(struct igb_adapter
*adapter
, int size
,
2440 struct e1000_hw
*hw
= &adapter
->hw
;
2443 /* if it isn't the PF check to see if VFs are enabled and
2444 * increase the size to support vlan tags */
2445 if (vfn
< adapter
->vfs_allocated_count
&&
2446 adapter
->vf_data
[vfn
].vlans_enabled
)
2447 size
+= VLAN_TAG_SIZE
;
2449 vmolr
= rd32(E1000_VMOLR(vfn
));
2450 vmolr
&= ~E1000_VMOLR_RLPML_MASK
;
2451 vmolr
|= size
| E1000_VMOLR_LPE
;
2452 wr32(E1000_VMOLR(vfn
), vmolr
);
2458 * igb_rlpml_set - set maximum receive packet size
2459 * @adapter: board private structure
2461 * Configure maximum receivable packet size.
2463 static void igb_rlpml_set(struct igb_adapter
*adapter
)
2465 u32 max_frame_size
= adapter
->max_frame_size
;
2466 struct e1000_hw
*hw
= &adapter
->hw
;
2467 u16 pf_id
= adapter
->vfs_allocated_count
;
2470 max_frame_size
+= VLAN_TAG_SIZE
;
2472 /* if vfs are enabled we set RLPML to the largest possible request
2473 * size and set the VMOLR RLPML to the size we need */
2475 igb_set_vf_rlpml(adapter
, max_frame_size
, pf_id
);
2476 max_frame_size
= MAX_JUMBO_FRAME_SIZE
;
2479 wr32(E1000_RLPML
, max_frame_size
);
2482 static inline void igb_set_vmolr(struct igb_adapter
*adapter
, int vfn
)
2484 struct e1000_hw
*hw
= &adapter
->hw
;
2488 * This register exists only on 82576 and newer so if we are older then
2489 * we should exit and do nothing
2491 if (hw
->mac
.type
< e1000_82576
)
2494 vmolr
= rd32(E1000_VMOLR(vfn
));
2495 vmolr
|= E1000_VMOLR_AUPE
| /* Accept untagged packets */
2496 E1000_VMOLR_STRVLAN
; /* Strip vlan tags */
2498 /* clear all bits that might not be set */
2499 vmolr
&= ~(E1000_VMOLR_BAM
| E1000_VMOLR_RSSE
);
2501 if (adapter
->rss_queues
> 1 && vfn
== adapter
->vfs_allocated_count
)
2502 vmolr
|= E1000_VMOLR_RSSE
; /* enable RSS */
2504 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2507 if (vfn
<= adapter
->vfs_allocated_count
)
2508 vmolr
|= E1000_VMOLR_BAM
; /* Accept broadcast */
2510 wr32(E1000_VMOLR(vfn
), vmolr
);
2514 * igb_configure_rx_ring - Configure a receive ring after Reset
2515 * @adapter: board private structure
2516 * @ring: receive ring to be configured
2518 * Configure the Rx unit of the MAC after a reset.
2520 void igb_configure_rx_ring(struct igb_adapter
*adapter
,
2521 struct igb_ring
*ring
)
2523 struct e1000_hw
*hw
= &adapter
->hw
;
2524 u64 rdba
= ring
->dma
;
2525 int reg_idx
= ring
->reg_idx
;
2528 /* disable the queue */
2529 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
2530 wr32(E1000_RXDCTL(reg_idx
),
2531 rxdctl
& ~E1000_RXDCTL_QUEUE_ENABLE
);
2533 /* Set DMA base address registers */
2534 wr32(E1000_RDBAL(reg_idx
),
2535 rdba
& 0x00000000ffffffffULL
);
2536 wr32(E1000_RDBAH(reg_idx
), rdba
>> 32);
2537 wr32(E1000_RDLEN(reg_idx
),
2538 ring
->count
* sizeof(union e1000_adv_rx_desc
));
2540 /* initialize head and tail */
2541 ring
->head
= hw
->hw_addr
+ E1000_RDH(reg_idx
);
2542 ring
->tail
= hw
->hw_addr
+ E1000_RDT(reg_idx
);
2543 writel(0, ring
->head
);
2544 writel(0, ring
->tail
);
2546 /* set descriptor configuration */
2547 if (ring
->rx_buffer_len
< IGB_RXBUFFER_1024
) {
2548 srrctl
= ALIGN(ring
->rx_buffer_len
, 64) <<
2549 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
2550 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2551 srrctl
|= IGB_RXBUFFER_16384
>>
2552 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2554 srrctl
|= (PAGE_SIZE
/ 2) >>
2555 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2557 srrctl
|= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
2559 srrctl
= ALIGN(ring
->rx_buffer_len
, 1024) >>
2560 E1000_SRRCTL_BSIZEPKT_SHIFT
;
2561 srrctl
|= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
;
2564 wr32(E1000_SRRCTL(reg_idx
), srrctl
);
2566 /* set filtering for VMDQ pools */
2567 igb_set_vmolr(adapter
, reg_idx
& 0x7);
2569 /* enable receive descriptor fetching */
2570 rxdctl
= rd32(E1000_RXDCTL(reg_idx
));
2571 rxdctl
|= E1000_RXDCTL_QUEUE_ENABLE
;
2572 rxdctl
&= 0xFFF00000;
2573 rxdctl
|= IGB_RX_PTHRESH
;
2574 rxdctl
|= IGB_RX_HTHRESH
<< 8;
2575 rxdctl
|= IGB_RX_WTHRESH
<< 16;
2576 wr32(E1000_RXDCTL(reg_idx
), rxdctl
);
2580 * igb_configure_rx - Configure receive Unit after Reset
2581 * @adapter: board private structure
2583 * Configure the Rx unit of the MAC after a reset.
2585 static void igb_configure_rx(struct igb_adapter
*adapter
)
2589 /* set UTA to appropriate mode */
2590 igb_set_uta(adapter
);
2592 /* set the correct pool for the PF default MAC address in entry 0 */
2593 igb_rar_set_qsel(adapter
, adapter
->hw
.mac
.addr
, 0,
2594 adapter
->vfs_allocated_count
);
2596 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2597 * the Base and Length of the Rx Descriptor Ring */
2598 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2599 igb_configure_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
2603 * igb_free_tx_resources - Free Tx Resources per Queue
2604 * @tx_ring: Tx descriptor ring for a specific queue
2606 * Free all transmit software resources
2608 void igb_free_tx_resources(struct igb_ring
*tx_ring
)
2610 igb_clean_tx_ring(tx_ring
);
2612 vfree(tx_ring
->buffer_info
);
2613 tx_ring
->buffer_info
= NULL
;
2615 /* if not set, then don't free */
2619 pci_free_consistent(tx_ring
->pdev
, tx_ring
->size
,
2620 tx_ring
->desc
, tx_ring
->dma
);
2622 tx_ring
->desc
= NULL
;
2626 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2627 * @adapter: board private structure
2629 * Free all transmit software resources
2631 static void igb_free_all_tx_resources(struct igb_adapter
*adapter
)
2635 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2636 igb_free_tx_resources(&adapter
->tx_ring
[i
]);
2639 void igb_unmap_and_free_tx_resource(struct igb_ring
*tx_ring
,
2640 struct igb_buffer
*buffer_info
)
2642 if (buffer_info
->dma
) {
2643 if (buffer_info
->mapped_as_page
)
2644 pci_unmap_page(tx_ring
->pdev
,
2646 buffer_info
->length
,
2649 pci_unmap_single(tx_ring
->pdev
,
2651 buffer_info
->length
,
2653 buffer_info
->dma
= 0;
2655 if (buffer_info
->skb
) {
2656 dev_kfree_skb_any(buffer_info
->skb
);
2657 buffer_info
->skb
= NULL
;
2659 buffer_info
->time_stamp
= 0;
2660 buffer_info
->length
= 0;
2661 buffer_info
->next_to_watch
= 0;
2662 buffer_info
->mapped_as_page
= false;
2666 * igb_clean_tx_ring - Free Tx Buffers
2667 * @tx_ring: ring to be cleaned
2669 static void igb_clean_tx_ring(struct igb_ring
*tx_ring
)
2671 struct igb_buffer
*buffer_info
;
2675 if (!tx_ring
->buffer_info
)
2677 /* Free all the Tx ring sk_buffs */
2679 for (i
= 0; i
< tx_ring
->count
; i
++) {
2680 buffer_info
= &tx_ring
->buffer_info
[i
];
2681 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
2684 size
= sizeof(struct igb_buffer
) * tx_ring
->count
;
2685 memset(tx_ring
->buffer_info
, 0, size
);
2687 /* Zero out the descriptor ring */
2688 memset(tx_ring
->desc
, 0, tx_ring
->size
);
2690 tx_ring
->next_to_use
= 0;
2691 tx_ring
->next_to_clean
= 0;
2695 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2696 * @adapter: board private structure
2698 static void igb_clean_all_tx_rings(struct igb_adapter
*adapter
)
2702 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2703 igb_clean_tx_ring(&adapter
->tx_ring
[i
]);
2707 * igb_free_rx_resources - Free Rx Resources
2708 * @rx_ring: ring to clean the resources from
2710 * Free all receive software resources
2712 void igb_free_rx_resources(struct igb_ring
*rx_ring
)
2714 igb_clean_rx_ring(rx_ring
);
2716 vfree(rx_ring
->buffer_info
);
2717 rx_ring
->buffer_info
= NULL
;
2719 /* if not set, then don't free */
2723 pci_free_consistent(rx_ring
->pdev
, rx_ring
->size
,
2724 rx_ring
->desc
, rx_ring
->dma
);
2726 rx_ring
->desc
= NULL
;
2730 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2731 * @adapter: board private structure
2733 * Free all receive software resources
2735 static void igb_free_all_rx_resources(struct igb_adapter
*adapter
)
2739 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2740 igb_free_rx_resources(&adapter
->rx_ring
[i
]);
2744 * igb_clean_rx_ring - Free Rx Buffers per Queue
2745 * @rx_ring: ring to free buffers from
2747 static void igb_clean_rx_ring(struct igb_ring
*rx_ring
)
2749 struct igb_buffer
*buffer_info
;
2753 if (!rx_ring
->buffer_info
)
2756 /* Free all the Rx ring sk_buffs */
2757 for (i
= 0; i
< rx_ring
->count
; i
++) {
2758 buffer_info
= &rx_ring
->buffer_info
[i
];
2759 if (buffer_info
->dma
) {
2760 pci_unmap_single(rx_ring
->pdev
,
2762 rx_ring
->rx_buffer_len
,
2763 PCI_DMA_FROMDEVICE
);
2764 buffer_info
->dma
= 0;
2767 if (buffer_info
->skb
) {
2768 dev_kfree_skb(buffer_info
->skb
);
2769 buffer_info
->skb
= NULL
;
2771 if (buffer_info
->page_dma
) {
2772 pci_unmap_page(rx_ring
->pdev
,
2773 buffer_info
->page_dma
,
2775 PCI_DMA_FROMDEVICE
);
2776 buffer_info
->page_dma
= 0;
2778 if (buffer_info
->page
) {
2779 put_page(buffer_info
->page
);
2780 buffer_info
->page
= NULL
;
2781 buffer_info
->page_offset
= 0;
2785 size
= sizeof(struct igb_buffer
) * rx_ring
->count
;
2786 memset(rx_ring
->buffer_info
, 0, size
);
2788 /* Zero out the descriptor ring */
2789 memset(rx_ring
->desc
, 0, rx_ring
->size
);
2791 rx_ring
->next_to_clean
= 0;
2792 rx_ring
->next_to_use
= 0;
2796 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2797 * @adapter: board private structure
2799 static void igb_clean_all_rx_rings(struct igb_adapter
*adapter
)
2803 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2804 igb_clean_rx_ring(&adapter
->rx_ring
[i
]);
2808 * igb_set_mac - Change the Ethernet Address of the NIC
2809 * @netdev: network interface device structure
2810 * @p: pointer to an address structure
2812 * Returns 0 on success, negative on failure
2814 static int igb_set_mac(struct net_device
*netdev
, void *p
)
2816 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2817 struct e1000_hw
*hw
= &adapter
->hw
;
2818 struct sockaddr
*addr
= p
;
2820 if (!is_valid_ether_addr(addr
->sa_data
))
2821 return -EADDRNOTAVAIL
;
2823 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2824 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
2826 /* set the correct pool for the new PF MAC address in entry 0 */
2827 igb_rar_set_qsel(adapter
, hw
->mac
.addr
, 0,
2828 adapter
->vfs_allocated_count
);
2834 * igb_write_mc_addr_list - write multicast addresses to MTA
2835 * @netdev: network interface device structure
2837 * Writes multicast address list to the MTA hash table.
2838 * Returns: -ENOMEM on failure
2839 * 0 on no addresses written
2840 * X on writing X addresses to MTA
2842 static int igb_write_mc_addr_list(struct net_device
*netdev
)
2844 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2845 struct e1000_hw
*hw
= &adapter
->hw
;
2846 struct dev_mc_list
*mc_ptr
= netdev
->mc_list
;
2851 if (!netdev
->mc_count
) {
2852 /* nothing to program, so clear mc list */
2853 igb_update_mc_addr_list(hw
, NULL
, 0);
2854 igb_restore_vf_multicasts(adapter
);
2858 mta_list
= kzalloc(netdev
->mc_count
* 6, GFP_ATOMIC
);
2862 /* set vmolr receive overflow multicast bit */
2863 vmolr
|= E1000_VMOLR_ROMPE
;
2865 /* The shared function expects a packed array of only addresses. */
2866 mc_ptr
= netdev
->mc_list
;
2868 for (i
= 0; i
< netdev
->mc_count
; i
++) {
2871 memcpy(mta_list
+ (i
*ETH_ALEN
), mc_ptr
->dmi_addr
, ETH_ALEN
);
2872 mc_ptr
= mc_ptr
->next
;
2874 igb_update_mc_addr_list(hw
, mta_list
, i
);
2877 return netdev
->mc_count
;
2881 * igb_write_uc_addr_list - write unicast addresses to RAR table
2882 * @netdev: network interface device structure
2884 * Writes unicast address list to the RAR table.
2885 * Returns: -ENOMEM on failure/insufficient address space
2886 * 0 on no addresses written
2887 * X on writing X addresses to the RAR table
2889 static int igb_write_uc_addr_list(struct net_device
*netdev
)
2891 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2892 struct e1000_hw
*hw
= &adapter
->hw
;
2893 unsigned int vfn
= adapter
->vfs_allocated_count
;
2894 unsigned int rar_entries
= hw
->mac
.rar_entry_count
- (vfn
+ 1);
2897 /* return ENOMEM indicating insufficient memory for addresses */
2898 if (netdev
->uc
.count
> rar_entries
)
2901 if (netdev
->uc
.count
&& rar_entries
) {
2902 struct netdev_hw_addr
*ha
;
2903 list_for_each_entry(ha
, &netdev
->uc
.list
, list
) {
2906 igb_rar_set_qsel(adapter
, ha
->addr
,
2912 /* write the addresses in reverse order to avoid write combining */
2913 for (; rar_entries
> 0 ; rar_entries
--) {
2914 wr32(E1000_RAH(rar_entries
), 0);
2915 wr32(E1000_RAL(rar_entries
), 0);
2923 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2924 * @netdev: network interface device structure
2926 * The set_rx_mode entry point is called whenever the unicast or multicast
2927 * address lists or the network interface flags are updated. This routine is
2928 * responsible for configuring the hardware for proper unicast, multicast,
2929 * promiscuous mode, and all-multi behavior.
2931 static void igb_set_rx_mode(struct net_device
*netdev
)
2933 struct igb_adapter
*adapter
= netdev_priv(netdev
);
2934 struct e1000_hw
*hw
= &adapter
->hw
;
2935 unsigned int vfn
= adapter
->vfs_allocated_count
;
2936 u32 rctl
, vmolr
= 0;
2939 /* Check for Promiscuous and All Multicast modes */
2940 rctl
= rd32(E1000_RCTL
);
2942 /* clear the effected bits */
2943 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
| E1000_RCTL_VFE
);
2945 if (netdev
->flags
& IFF_PROMISC
) {
2946 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2947 vmolr
|= (E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
);
2949 if (netdev
->flags
& IFF_ALLMULTI
) {
2950 rctl
|= E1000_RCTL_MPE
;
2951 vmolr
|= E1000_VMOLR_MPME
;
2954 * Write addresses to the MTA, if the attempt fails
2955 * then we should just turn on promiscous mode so
2956 * that we can at least receive multicast traffic
2958 count
= igb_write_mc_addr_list(netdev
);
2960 rctl
|= E1000_RCTL_MPE
;
2961 vmolr
|= E1000_VMOLR_MPME
;
2963 vmolr
|= E1000_VMOLR_ROMPE
;
2967 * Write addresses to available RAR registers, if there is not
2968 * sufficient space to store all the addresses then enable
2969 * unicast promiscous mode
2971 count
= igb_write_uc_addr_list(netdev
);
2973 rctl
|= E1000_RCTL_UPE
;
2974 vmolr
|= E1000_VMOLR_ROPE
;
2976 rctl
|= E1000_RCTL_VFE
;
2978 wr32(E1000_RCTL
, rctl
);
2981 * In order to support SR-IOV and eventually VMDq it is necessary to set
2982 * the VMOLR to enable the appropriate modes. Without this workaround
2983 * we will have issues with VLAN tag stripping not being done for frames
2984 * that are only arriving because we are the default pool
2986 if (hw
->mac
.type
< e1000_82576
)
2989 vmolr
|= rd32(E1000_VMOLR(vfn
)) &
2990 ~(E1000_VMOLR_ROPE
| E1000_VMOLR_MPME
| E1000_VMOLR_ROMPE
);
2991 wr32(E1000_VMOLR(vfn
), vmolr
);
2992 igb_restore_vf_multicasts(adapter
);
2995 /* Need to wait a few seconds after link up to get diagnostic information from
2997 static void igb_update_phy_info(unsigned long data
)
2999 struct igb_adapter
*adapter
= (struct igb_adapter
*) data
;
3000 igb_get_phy_info(&adapter
->hw
);
3004 * igb_has_link - check shared code for link and determine up/down
3005 * @adapter: pointer to driver private info
3007 static bool igb_has_link(struct igb_adapter
*adapter
)
3009 struct e1000_hw
*hw
= &adapter
->hw
;
3010 bool link_active
= false;
3013 /* get_link_status is set on LSC (link status) interrupt or
3014 * rx sequence error interrupt. get_link_status will stay
3015 * false until the e1000_check_for_link establishes link
3016 * for copper adapters ONLY
3018 switch (hw
->phy
.media_type
) {
3019 case e1000_media_type_copper
:
3020 if (hw
->mac
.get_link_status
) {
3021 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3022 link_active
= !hw
->mac
.get_link_status
;
3027 case e1000_media_type_internal_serdes
:
3028 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
3029 link_active
= hw
->mac
.serdes_has_link
;
3032 case e1000_media_type_unknown
:
3040 * igb_watchdog - Timer Call-back
3041 * @data: pointer to adapter cast into an unsigned long
3043 static void igb_watchdog(unsigned long data
)
3045 struct igb_adapter
*adapter
= (struct igb_adapter
*)data
;
3046 /* Do the rest outside of interrupt context */
3047 schedule_work(&adapter
->watchdog_task
);
3050 static void igb_watchdog_task(struct work_struct
*work
)
3052 struct igb_adapter
*adapter
= container_of(work
,
3055 struct e1000_hw
*hw
= &adapter
->hw
;
3056 struct net_device
*netdev
= adapter
->netdev
;
3060 link
= igb_has_link(adapter
);
3062 if (!netif_carrier_ok(netdev
)) {
3064 hw
->mac
.ops
.get_speed_and_duplex(hw
,
3065 &adapter
->link_speed
,
3066 &adapter
->link_duplex
);
3068 ctrl
= rd32(E1000_CTRL
);
3069 /* Links status message must follow this format */
3070 printk(KERN_INFO
"igb: %s NIC Link is Up %d Mbps %s, "
3071 "Flow Control: %s\n",
3073 adapter
->link_speed
,
3074 adapter
->link_duplex
== FULL_DUPLEX
?
3075 "Full Duplex" : "Half Duplex",
3076 ((ctrl
& E1000_CTRL_TFCE
) &&
3077 (ctrl
& E1000_CTRL_RFCE
)) ? "RX/TX" :
3078 ((ctrl
& E1000_CTRL_RFCE
) ? "RX" :
3079 ((ctrl
& E1000_CTRL_TFCE
) ? "TX" : "None")));
3081 /* tweak tx_queue_len according to speed/duplex and
3082 * adjust the timeout factor */
3083 netdev
->tx_queue_len
= adapter
->tx_queue_len
;
3084 adapter
->tx_timeout_factor
= 1;
3085 switch (adapter
->link_speed
) {
3087 netdev
->tx_queue_len
= 10;
3088 adapter
->tx_timeout_factor
= 14;
3091 netdev
->tx_queue_len
= 100;
3092 /* maybe add some timeout factor ? */
3096 netif_carrier_on(netdev
);
3098 igb_ping_all_vfs(adapter
);
3100 /* link state has changed, schedule phy info update */
3101 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3102 mod_timer(&adapter
->phy_info_timer
,
3103 round_jiffies(jiffies
+ 2 * HZ
));
3106 if (netif_carrier_ok(netdev
)) {
3107 adapter
->link_speed
= 0;
3108 adapter
->link_duplex
= 0;
3109 /* Links status message must follow this format */
3110 printk(KERN_INFO
"igb: %s NIC Link is Down\n",
3112 netif_carrier_off(netdev
);
3114 igb_ping_all_vfs(adapter
);
3116 /* link state has changed, schedule phy info update */
3117 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3118 mod_timer(&adapter
->phy_info_timer
,
3119 round_jiffies(jiffies
+ 2 * HZ
));
3123 igb_update_stats(adapter
);
3124 igb_update_adaptive(hw
);
3126 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3127 struct igb_ring
*tx_ring
= &adapter
->tx_ring
[i
];
3128 if (!netif_carrier_ok(netdev
)) {
3129 /* We've lost link, so the controller stops DMA,
3130 * but we've got queued Tx work that's never going
3131 * to get done, so reset controller to flush Tx.
3132 * (Do the reset outside of interrupt context). */
3133 if (igb_desc_unused(tx_ring
) + 1 < tx_ring
->count
) {
3134 adapter
->tx_timeout_count
++;
3135 schedule_work(&adapter
->reset_task
);
3136 /* return immediately since reset is imminent */
3141 /* Force detection of hung controller every watchdog period */
3142 tx_ring
->detect_tx_hung
= true;
3145 /* Cause software interrupt to ensure rx ring is cleaned */
3146 if (adapter
->msix_entries
) {
3148 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
3149 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
3150 eics
|= q_vector
->eims_value
;
3152 wr32(E1000_EICS
, eics
);
3154 wr32(E1000_ICS
, E1000_ICS_RXDMT0
);
3157 /* Reset the timer */
3158 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
3159 mod_timer(&adapter
->watchdog_timer
,
3160 round_jiffies(jiffies
+ 2 * HZ
));
3163 enum latency_range
{
3167 latency_invalid
= 255
3171 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3173 * Stores a new ITR value based on strictly on packet size. This
3174 * algorithm is less sophisticated than that used in igb_update_itr,
3175 * due to the difficulty of synchronizing statistics across multiple
3176 * receive rings. The divisors and thresholds used by this fuction
3177 * were determined based on theoretical maximum wire speed and testing
3178 * data, in order to minimize response time while increasing bulk
3180 * This functionality is controlled by the InterruptThrottleRate module
3181 * parameter (see igb_param.c)
3182 * NOTE: This function is called only when operating in a multiqueue
3183 * receive environment.
3184 * @q_vector: pointer to q_vector
3186 static void igb_update_ring_itr(struct igb_q_vector
*q_vector
)
3188 int new_val
= q_vector
->itr_val
;
3189 int avg_wire_size
= 0;
3190 struct igb_adapter
*adapter
= q_vector
->adapter
;
3192 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3193 * ints/sec - ITR timer value of 120 ticks.
3195 if (adapter
->link_speed
!= SPEED_1000
) {
3200 if (q_vector
->rx_ring
&& q_vector
->rx_ring
->total_packets
) {
3201 struct igb_ring
*ring
= q_vector
->rx_ring
;
3202 avg_wire_size
= ring
->total_bytes
/ ring
->total_packets
;
3205 if (q_vector
->tx_ring
&& q_vector
->tx_ring
->total_packets
) {
3206 struct igb_ring
*ring
= q_vector
->tx_ring
;
3207 avg_wire_size
= max_t(u32
, avg_wire_size
,
3208 (ring
->total_bytes
/
3209 ring
->total_packets
));
3212 /* if avg_wire_size isn't set no work was done */
3216 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3217 avg_wire_size
+= 24;
3219 /* Don't starve jumbo frames */
3220 avg_wire_size
= min(avg_wire_size
, 3000);
3222 /* Give a little boost to mid-size frames */
3223 if ((avg_wire_size
> 300) && (avg_wire_size
< 1200))
3224 new_val
= avg_wire_size
/ 3;
3226 new_val
= avg_wire_size
/ 2;
3229 if (new_val
!= q_vector
->itr_val
) {
3230 q_vector
->itr_val
= new_val
;
3231 q_vector
->set_itr
= 1;
3234 if (q_vector
->rx_ring
) {
3235 q_vector
->rx_ring
->total_bytes
= 0;
3236 q_vector
->rx_ring
->total_packets
= 0;
3238 if (q_vector
->tx_ring
) {
3239 q_vector
->tx_ring
->total_bytes
= 0;
3240 q_vector
->tx_ring
->total_packets
= 0;
3245 * igb_update_itr - update the dynamic ITR value based on statistics
3246 * Stores a new ITR value based on packets and byte
3247 * counts during the last interrupt. The advantage of per interrupt
3248 * computation is faster updates and more accurate ITR for the current
3249 * traffic pattern. Constants in this function were computed
3250 * based on theoretical maximum wire speed and thresholds were set based
3251 * on testing data as well as attempting to minimize response time
3252 * while increasing bulk throughput.
3253 * this functionality is controlled by the InterruptThrottleRate module
3254 * parameter (see igb_param.c)
3255 * NOTE: These calculations are only valid when operating in a single-
3256 * queue environment.
3257 * @adapter: pointer to adapter
3258 * @itr_setting: current q_vector->itr_val
3259 * @packets: the number of packets during this measurement interval
3260 * @bytes: the number of bytes during this measurement interval
3262 static unsigned int igb_update_itr(struct igb_adapter
*adapter
, u16 itr_setting
,
3263 int packets
, int bytes
)
3265 unsigned int retval
= itr_setting
;
3268 goto update_itr_done
;
3270 switch (itr_setting
) {
3271 case lowest_latency
:
3272 /* handle TSO and jumbo frames */
3273 if (bytes
/packets
> 8000)
3274 retval
= bulk_latency
;
3275 else if ((packets
< 5) && (bytes
> 512))
3276 retval
= low_latency
;
3278 case low_latency
: /* 50 usec aka 20000 ints/s */
3279 if (bytes
> 10000) {
3280 /* this if handles the TSO accounting */
3281 if (bytes
/packets
> 8000) {
3282 retval
= bulk_latency
;
3283 } else if ((packets
< 10) || ((bytes
/packets
) > 1200)) {
3284 retval
= bulk_latency
;
3285 } else if ((packets
> 35)) {
3286 retval
= lowest_latency
;
3288 } else if (bytes
/packets
> 2000) {
3289 retval
= bulk_latency
;
3290 } else if (packets
<= 2 && bytes
< 512) {
3291 retval
= lowest_latency
;
3294 case bulk_latency
: /* 250 usec aka 4000 ints/s */
3295 if (bytes
> 25000) {
3297 retval
= low_latency
;
3298 } else if (bytes
< 1500) {
3299 retval
= low_latency
;
3308 static void igb_set_itr(struct igb_adapter
*adapter
)
3310 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
3312 u32 new_itr
= q_vector
->itr_val
;
3314 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3315 if (adapter
->link_speed
!= SPEED_1000
) {
3321 adapter
->rx_itr
= igb_update_itr(adapter
,
3323 adapter
->rx_ring
->total_packets
,
3324 adapter
->rx_ring
->total_bytes
);
3326 adapter
->tx_itr
= igb_update_itr(adapter
,
3328 adapter
->tx_ring
->total_packets
,
3329 adapter
->tx_ring
->total_bytes
);
3330 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
3332 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3333 if (adapter
->rx_itr_setting
== 3 && current_itr
== lowest_latency
)
3334 current_itr
= low_latency
;
3336 switch (current_itr
) {
3337 /* counts and packets in update_itr are dependent on these numbers */
3338 case lowest_latency
:
3339 new_itr
= 56; /* aka 70,000 ints/sec */
3342 new_itr
= 196; /* aka 20,000 ints/sec */
3345 new_itr
= 980; /* aka 4,000 ints/sec */
3352 adapter
->rx_ring
->total_bytes
= 0;
3353 adapter
->rx_ring
->total_packets
= 0;
3354 adapter
->tx_ring
->total_bytes
= 0;
3355 adapter
->tx_ring
->total_packets
= 0;
3357 if (new_itr
!= q_vector
->itr_val
) {
3358 /* this attempts to bias the interrupt rate towards Bulk
3359 * by adding intermediate steps when interrupt rate is
3361 new_itr
= new_itr
> q_vector
->itr_val
?
3362 max((new_itr
* q_vector
->itr_val
) /
3363 (new_itr
+ (q_vector
->itr_val
>> 2)),
3366 /* Don't write the value here; it resets the adapter's
3367 * internal timer, and causes us to delay far longer than
3368 * we should between interrupts. Instead, we write the ITR
3369 * value at the beginning of the next interrupt so the timing
3370 * ends up being correct.
3372 q_vector
->itr_val
= new_itr
;
3373 q_vector
->set_itr
= 1;
3379 #define IGB_TX_FLAGS_CSUM 0x00000001
3380 #define IGB_TX_FLAGS_VLAN 0x00000002
3381 #define IGB_TX_FLAGS_TSO 0x00000004
3382 #define IGB_TX_FLAGS_IPV4 0x00000008
3383 #define IGB_TX_FLAGS_TSTAMP 0x00000010
3384 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3385 #define IGB_TX_FLAGS_VLAN_SHIFT 16
3387 static inline int igb_tso_adv(struct igb_ring
*tx_ring
,
3388 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
3390 struct e1000_adv_tx_context_desc
*context_desc
;
3393 struct igb_buffer
*buffer_info
;
3394 u32 info
= 0, tu_cmd
= 0;
3395 u32 mss_l4len_idx
, l4len
;
3398 if (skb_header_cloned(skb
)) {
3399 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3404 l4len
= tcp_hdrlen(skb
);
3407 if (skb
->protocol
== htons(ETH_P_IP
)) {
3408 struct iphdr
*iph
= ip_hdr(skb
);
3411 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
3415 } else if (skb_is_gso_v6(skb
)) {
3416 ipv6_hdr(skb
)->payload_len
= 0;
3417 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3418 &ipv6_hdr(skb
)->daddr
,
3422 i
= tx_ring
->next_to_use
;
3424 buffer_info
= &tx_ring
->buffer_info
[i
];
3425 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
3426 /* VLAN MACLEN IPLEN */
3427 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3428 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
3429 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
3430 *hdr_len
+= skb_network_offset(skb
);
3431 info
|= skb_network_header_len(skb
);
3432 *hdr_len
+= skb_network_header_len(skb
);
3433 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
3435 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3436 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
3438 if (skb
->protocol
== htons(ETH_P_IP
))
3439 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
3440 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3442 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
3445 mss_l4len_idx
= (skb_shinfo(skb
)->gso_size
<< E1000_ADVTXD_MSS_SHIFT
);
3446 mss_l4len_idx
|= (l4len
<< E1000_ADVTXD_L4LEN_SHIFT
);
3448 /* For 82575, context index must be unique per ring. */
3449 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
3450 mss_l4len_idx
|= tx_ring
->reg_idx
<< 4;
3452 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3453 context_desc
->seqnum_seed
= 0;
3455 buffer_info
->time_stamp
= jiffies
;
3456 buffer_info
->next_to_watch
= i
;
3457 buffer_info
->dma
= 0;
3459 if (i
== tx_ring
->count
)
3462 tx_ring
->next_to_use
= i
;
3467 static inline bool igb_tx_csum_adv(struct igb_ring
*tx_ring
,
3468 struct sk_buff
*skb
, u32 tx_flags
)
3470 struct e1000_adv_tx_context_desc
*context_desc
;
3471 struct pci_dev
*pdev
= tx_ring
->pdev
;
3472 struct igb_buffer
*buffer_info
;
3473 u32 info
= 0, tu_cmd
= 0;
3476 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3477 (tx_flags
& IGB_TX_FLAGS_VLAN
)) {
3478 i
= tx_ring
->next_to_use
;
3479 buffer_info
= &tx_ring
->buffer_info
[i
];
3480 context_desc
= E1000_TX_CTXTDESC_ADV(*tx_ring
, i
);
3482 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3483 info
|= (tx_flags
& IGB_TX_FLAGS_VLAN_MASK
);
3485 info
|= (skb_network_offset(skb
) << E1000_ADVTXD_MACLEN_SHIFT
);
3486 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3487 info
|= skb_network_header_len(skb
);
3489 context_desc
->vlan_macip_lens
= cpu_to_le32(info
);
3491 tu_cmd
|= (E1000_TXD_CMD_DEXT
| E1000_ADVTXD_DTYP_CTXT
);
3493 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3496 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
3497 const struct vlan_ethhdr
*vhdr
=
3498 (const struct vlan_ethhdr
*)skb
->data
;
3500 protocol
= vhdr
->h_vlan_encapsulated_proto
;
3502 protocol
= skb
->protocol
;
3506 case cpu_to_be16(ETH_P_IP
):
3507 tu_cmd
|= E1000_ADVTXD_TUCMD_IPV4
;
3508 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3509 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3510 else if (ip_hdr(skb
)->protocol
== IPPROTO_SCTP
)
3511 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
3513 case cpu_to_be16(ETH_P_IPV6
):
3514 /* XXX what about other V6 headers?? */
3515 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3516 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_TCP
;
3517 else if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_SCTP
)
3518 tu_cmd
|= E1000_ADVTXD_TUCMD_L4T_SCTP
;
3521 if (unlikely(net_ratelimit()))
3522 dev_warn(&pdev
->dev
,
3523 "partial checksum but proto=%x!\n",
3529 context_desc
->type_tucmd_mlhl
= cpu_to_le32(tu_cmd
);
3530 context_desc
->seqnum_seed
= 0;
3531 if (tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
)
3532 context_desc
->mss_l4len_idx
=
3533 cpu_to_le32(tx_ring
->reg_idx
<< 4);
3535 buffer_info
->time_stamp
= jiffies
;
3536 buffer_info
->next_to_watch
= i
;
3537 buffer_info
->dma
= 0;
3540 if (i
== tx_ring
->count
)
3542 tx_ring
->next_to_use
= i
;
3549 #define IGB_MAX_TXD_PWR 16
3550 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3552 static inline int igb_tx_map_adv(struct igb_ring
*tx_ring
, struct sk_buff
*skb
,
3555 struct igb_buffer
*buffer_info
;
3556 struct pci_dev
*pdev
= tx_ring
->pdev
;
3557 unsigned int len
= skb_headlen(skb
);
3558 unsigned int count
= 0, i
;
3561 i
= tx_ring
->next_to_use
;
3563 buffer_info
= &tx_ring
->buffer_info
[i
];
3564 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
3565 buffer_info
->length
= len
;
3566 /* set time_stamp *before* dma to help avoid a possible race */
3567 buffer_info
->time_stamp
= jiffies
;
3568 buffer_info
->next_to_watch
= i
;
3569 buffer_info
->dma
= pci_map_single(pdev
, skb
->data
, len
,
3571 if (pci_dma_mapping_error(pdev
, buffer_info
->dma
))
3574 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
3575 struct skb_frag_struct
*frag
;
3579 if (i
== tx_ring
->count
)
3582 frag
= &skb_shinfo(skb
)->frags
[f
];
3585 buffer_info
= &tx_ring
->buffer_info
[i
];
3586 BUG_ON(len
>= IGB_MAX_DATA_PER_TXD
);
3587 buffer_info
->length
= len
;
3588 buffer_info
->time_stamp
= jiffies
;
3589 buffer_info
->next_to_watch
= i
;
3590 buffer_info
->mapped_as_page
= true;
3591 buffer_info
->dma
= pci_map_page(pdev
,
3596 if (pci_dma_mapping_error(pdev
, buffer_info
->dma
))
3601 tx_ring
->buffer_info
[i
].skb
= skb
;
3602 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
3607 dev_err(&pdev
->dev
, "TX DMA map failed\n");
3609 /* clear timestamp and dma mappings for failed buffer_info mapping */
3610 buffer_info
->dma
= 0;
3611 buffer_info
->time_stamp
= 0;
3612 buffer_info
->length
= 0;
3613 buffer_info
->next_to_watch
= 0;
3614 buffer_info
->mapped_as_page
= false;
3617 /* clear timestamp and dma mappings for remaining portion of packet */
3618 while (count
>= 0) {
3622 i
+= tx_ring
->count
;
3623 buffer_info
= &tx_ring
->buffer_info
[i
];
3624 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
3630 static inline void igb_tx_queue_adv(struct igb_ring
*tx_ring
,
3631 int tx_flags
, int count
, u32 paylen
,
3634 union e1000_adv_tx_desc
*tx_desc
;
3635 struct igb_buffer
*buffer_info
;
3636 u32 olinfo_status
= 0, cmd_type_len
;
3637 unsigned int i
= tx_ring
->next_to_use
;
3639 cmd_type_len
= (E1000_ADVTXD_DTYP_DATA
| E1000_ADVTXD_DCMD_IFCS
|
3640 E1000_ADVTXD_DCMD_DEXT
);
3642 if (tx_flags
& IGB_TX_FLAGS_VLAN
)
3643 cmd_type_len
|= E1000_ADVTXD_DCMD_VLE
;
3645 if (tx_flags
& IGB_TX_FLAGS_TSTAMP
)
3646 cmd_type_len
|= E1000_ADVTXD_MAC_TSTAMP
;
3648 if (tx_flags
& IGB_TX_FLAGS_TSO
) {
3649 cmd_type_len
|= E1000_ADVTXD_DCMD_TSE
;
3651 /* insert tcp checksum */
3652 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
3654 /* insert ip checksum */
3655 if (tx_flags
& IGB_TX_FLAGS_IPV4
)
3656 olinfo_status
|= E1000_TXD_POPTS_IXSM
<< 8;
3658 } else if (tx_flags
& IGB_TX_FLAGS_CSUM
) {
3659 olinfo_status
|= E1000_TXD_POPTS_TXSM
<< 8;
3662 if ((tx_ring
->flags
& IGB_RING_FLAG_TX_CTX_IDX
) &&
3663 (tx_flags
& (IGB_TX_FLAGS_CSUM
|
3665 IGB_TX_FLAGS_VLAN
)))
3666 olinfo_status
|= tx_ring
->reg_idx
<< 4;
3668 olinfo_status
|= ((paylen
- hdr_len
) << E1000_ADVTXD_PAYLEN_SHIFT
);
3671 buffer_info
= &tx_ring
->buffer_info
[i
];
3672 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
3673 tx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
3674 tx_desc
->read
.cmd_type_len
=
3675 cpu_to_le32(cmd_type_len
| buffer_info
->length
);
3676 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3679 if (i
== tx_ring
->count
)
3681 } while (count
> 0);
3683 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(IGB_ADVTXD_DCMD
);
3684 /* Force memory writes to complete before letting h/w
3685 * know there are new descriptors to fetch. (Only
3686 * applicable for weak-ordered memory model archs,
3687 * such as IA-64). */
3690 tx_ring
->next_to_use
= i
;
3691 writel(i
, tx_ring
->tail
);
3692 /* we need this if more than one processor can write to our tail
3693 * at a time, it syncronizes IO on IA64/Altix systems */
3697 static int __igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
3699 struct net_device
*netdev
= tx_ring
->netdev
;
3701 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3703 /* Herbert's original patch had:
3704 * smp_mb__after_netif_stop_queue();
3705 * but since that doesn't exist yet, just open code it. */
3708 /* We need to check again in a case another CPU has just
3709 * made room available. */
3710 if (igb_desc_unused(tx_ring
) < size
)
3714 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
3715 tx_ring
->tx_stats
.restart_queue
++;
3719 static int igb_maybe_stop_tx(struct igb_ring
*tx_ring
, int size
)
3721 if (igb_desc_unused(tx_ring
) >= size
)
3723 return __igb_maybe_stop_tx(tx_ring
, size
);
3726 netdev_tx_t
igb_xmit_frame_ring_adv(struct sk_buff
*skb
,
3727 struct igb_ring
*tx_ring
)
3729 struct igb_adapter
*adapter
= netdev_priv(tx_ring
->netdev
);
3731 unsigned int tx_flags
= 0;
3734 union skb_shared_tx
*shtx
= skb_tx(skb
);
3736 /* need: 1 descriptor per page,
3737 * + 2 desc gap to keep tail from touching head,
3738 * + 1 desc for skb->data,
3739 * + 1 desc for context descriptor,
3740 * otherwise try next time */
3741 if (igb_maybe_stop_tx(tx_ring
, skb_shinfo(skb
)->nr_frags
+ 4)) {
3742 /* this is a hard error */
3743 return NETDEV_TX_BUSY
;
3746 if (unlikely(shtx
->hardware
)) {
3747 shtx
->in_progress
= 1;
3748 tx_flags
|= IGB_TX_FLAGS_TSTAMP
;
3751 if (vlan_tx_tag_present(skb
) && adapter
->vlgrp
) {
3752 tx_flags
|= IGB_TX_FLAGS_VLAN
;
3753 tx_flags
|= (vlan_tx_tag_get(skb
) << IGB_TX_FLAGS_VLAN_SHIFT
);
3756 if (skb
->protocol
== htons(ETH_P_IP
))
3757 tx_flags
|= IGB_TX_FLAGS_IPV4
;
3759 first
= tx_ring
->next_to_use
;
3760 if (skb_is_gso(skb
)) {
3761 tso
= igb_tso_adv(tx_ring
, skb
, tx_flags
, &hdr_len
);
3764 dev_kfree_skb_any(skb
);
3765 return NETDEV_TX_OK
;
3770 tx_flags
|= IGB_TX_FLAGS_TSO
;
3771 else if (igb_tx_csum_adv(tx_ring
, skb
, tx_flags
) &&
3772 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3773 tx_flags
|= IGB_TX_FLAGS_CSUM
;
3776 * count reflects descriptors mapped, if 0 or less then mapping error
3777 * has occured and we need to rewind the descriptor queue
3779 count
= igb_tx_map_adv(tx_ring
, skb
, first
);
3781 dev_kfree_skb_any(skb
);
3782 tx_ring
->buffer_info
[first
].time_stamp
= 0;
3783 tx_ring
->next_to_use
= first
;
3784 return NETDEV_TX_OK
;
3787 igb_tx_queue_adv(tx_ring
, tx_flags
, count
, skb
->len
, hdr_len
);
3789 /* Make sure there is space in the ring for the next send. */
3790 igb_maybe_stop_tx(tx_ring
, MAX_SKB_FRAGS
+ 4);
3792 return NETDEV_TX_OK
;
3795 static netdev_tx_t
igb_xmit_frame_adv(struct sk_buff
*skb
,
3796 struct net_device
*netdev
)
3798 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3799 struct igb_ring
*tx_ring
;
3802 if (test_bit(__IGB_DOWN
, &adapter
->state
)) {
3803 dev_kfree_skb_any(skb
);
3804 return NETDEV_TX_OK
;
3807 if (skb
->len
<= 0) {
3808 dev_kfree_skb_any(skb
);
3809 return NETDEV_TX_OK
;
3812 r_idx
= skb
->queue_mapping
& (IGB_ABS_MAX_TX_QUEUES
- 1);
3813 tx_ring
= adapter
->multi_tx_table
[r_idx
];
3815 /* This goes back to the question of how to logically map a tx queue
3816 * to a flow. Right now, performance is impacted slightly negatively
3817 * if using multiple tx queues. If the stack breaks away from a
3818 * single qdisc implementation, we can look at this again. */
3819 return igb_xmit_frame_ring_adv(skb
, tx_ring
);
3823 * igb_tx_timeout - Respond to a Tx Hang
3824 * @netdev: network interface device structure
3826 static void igb_tx_timeout(struct net_device
*netdev
)
3828 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3829 struct e1000_hw
*hw
= &adapter
->hw
;
3831 /* Do the reset outside of interrupt context */
3832 adapter
->tx_timeout_count
++;
3834 if (hw
->mac
.type
== e1000_82580
)
3835 hw
->dev_spec
._82575
.global_device_reset
= true;
3837 schedule_work(&adapter
->reset_task
);
3839 (adapter
->eims_enable_mask
& ~adapter
->eims_other
));
3842 static void igb_reset_task(struct work_struct
*work
)
3844 struct igb_adapter
*adapter
;
3845 adapter
= container_of(work
, struct igb_adapter
, reset_task
);
3847 igb_reinit_locked(adapter
);
3851 * igb_get_stats - Get System Network Statistics
3852 * @netdev: network interface device structure
3854 * Returns the address of the device statistics structure.
3855 * The statistics are actually updated from the timer callback.
3857 static struct net_device_stats
*igb_get_stats(struct net_device
*netdev
)
3859 /* only return the current stats */
3860 return &netdev
->stats
;
3864 * igb_change_mtu - Change the Maximum Transfer Unit
3865 * @netdev: network interface device structure
3866 * @new_mtu: new value for maximum frame size
3868 * Returns 0 on success, negative on failure
3870 static int igb_change_mtu(struct net_device
*netdev
, int new_mtu
)
3872 struct igb_adapter
*adapter
= netdev_priv(netdev
);
3873 struct pci_dev
*pdev
= adapter
->pdev
;
3874 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3875 u32 rx_buffer_len
, i
;
3877 if ((new_mtu
< 68) || (max_frame
> MAX_JUMBO_FRAME_SIZE
)) {
3878 dev_err(&pdev
->dev
, "Invalid MTU setting\n");
3882 if (max_frame
> MAX_STD_JUMBO_FRAME_SIZE
) {
3883 dev_err(&pdev
->dev
, "MTU > 9216 not supported.\n");
3887 while (test_and_set_bit(__IGB_RESETTING
, &adapter
->state
))
3890 /* igb_down has a dependency on max_frame_size */
3891 adapter
->max_frame_size
= max_frame
;
3893 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3894 * means we reserve 2 more, this pushes us to allocate from the next
3896 * i.e. RXBUFFER_2048 --> size-4096 slab
3899 if (max_frame
<= IGB_RXBUFFER_1024
)
3900 rx_buffer_len
= IGB_RXBUFFER_1024
;
3901 else if (max_frame
<= MAXIMUM_ETHERNET_VLAN_SIZE
)
3902 rx_buffer_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3904 rx_buffer_len
= IGB_RXBUFFER_128
;
3906 if (netif_running(netdev
))
3909 dev_info(&pdev
->dev
, "changing MTU from %d to %d\n",
3910 netdev
->mtu
, new_mtu
);
3911 netdev
->mtu
= new_mtu
;
3913 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3914 adapter
->rx_ring
[i
].rx_buffer_len
= rx_buffer_len
;
3916 if (netif_running(netdev
))
3921 clear_bit(__IGB_RESETTING
, &adapter
->state
);
3927 * igb_update_stats - Update the board statistics counters
3928 * @adapter: board private structure
3931 void igb_update_stats(struct igb_adapter
*adapter
)
3933 struct net_device_stats
*net_stats
= igb_get_stats(adapter
->netdev
);
3934 struct e1000_hw
*hw
= &adapter
->hw
;
3935 struct pci_dev
*pdev
= adapter
->pdev
;
3941 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3944 * Prevent stats update while adapter is being reset, or if the pci
3945 * connection is down.
3947 if (adapter
->link_speed
== 0)
3949 if (pci_channel_offline(pdev
))
3954 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3955 u32 rqdpc_tmp
= rd32(E1000_RQDPC(i
)) & 0x0FFF;
3956 adapter
->rx_ring
[i
].rx_stats
.drops
+= rqdpc_tmp
;
3957 net_stats
->rx_fifo_errors
+= rqdpc_tmp
;
3958 bytes
+= adapter
->rx_ring
[i
].rx_stats
.bytes
;
3959 packets
+= adapter
->rx_ring
[i
].rx_stats
.packets
;
3962 net_stats
->rx_bytes
= bytes
;
3963 net_stats
->rx_packets
= packets
;
3967 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3968 bytes
+= adapter
->tx_ring
[i
].tx_stats
.bytes
;
3969 packets
+= adapter
->tx_ring
[i
].tx_stats
.packets
;
3971 net_stats
->tx_bytes
= bytes
;
3972 net_stats
->tx_packets
= packets
;
3974 /* read stats registers */
3975 adapter
->stats
.crcerrs
+= rd32(E1000_CRCERRS
);
3976 adapter
->stats
.gprc
+= rd32(E1000_GPRC
);
3977 adapter
->stats
.gorc
+= rd32(E1000_GORCL
);
3978 rd32(E1000_GORCH
); /* clear GORCL */
3979 adapter
->stats
.bprc
+= rd32(E1000_BPRC
);
3980 adapter
->stats
.mprc
+= rd32(E1000_MPRC
);
3981 adapter
->stats
.roc
+= rd32(E1000_ROC
);
3983 adapter
->stats
.prc64
+= rd32(E1000_PRC64
);
3984 adapter
->stats
.prc127
+= rd32(E1000_PRC127
);
3985 adapter
->stats
.prc255
+= rd32(E1000_PRC255
);
3986 adapter
->stats
.prc511
+= rd32(E1000_PRC511
);
3987 adapter
->stats
.prc1023
+= rd32(E1000_PRC1023
);
3988 adapter
->stats
.prc1522
+= rd32(E1000_PRC1522
);
3989 adapter
->stats
.symerrs
+= rd32(E1000_SYMERRS
);
3990 adapter
->stats
.sec
+= rd32(E1000_SEC
);
3992 adapter
->stats
.mpc
+= rd32(E1000_MPC
);
3993 adapter
->stats
.scc
+= rd32(E1000_SCC
);
3994 adapter
->stats
.ecol
+= rd32(E1000_ECOL
);
3995 adapter
->stats
.mcc
+= rd32(E1000_MCC
);
3996 adapter
->stats
.latecol
+= rd32(E1000_LATECOL
);
3997 adapter
->stats
.dc
+= rd32(E1000_DC
);
3998 adapter
->stats
.rlec
+= rd32(E1000_RLEC
);
3999 adapter
->stats
.xonrxc
+= rd32(E1000_XONRXC
);
4000 adapter
->stats
.xontxc
+= rd32(E1000_XONTXC
);
4001 adapter
->stats
.xoffrxc
+= rd32(E1000_XOFFRXC
);
4002 adapter
->stats
.xofftxc
+= rd32(E1000_XOFFTXC
);
4003 adapter
->stats
.fcruc
+= rd32(E1000_FCRUC
);
4004 adapter
->stats
.gptc
+= rd32(E1000_GPTC
);
4005 adapter
->stats
.gotc
+= rd32(E1000_GOTCL
);
4006 rd32(E1000_GOTCH
); /* clear GOTCL */
4007 rnbc
= rd32(E1000_RNBC
);
4008 adapter
->stats
.rnbc
+= rnbc
;
4009 net_stats
->rx_fifo_errors
+= rnbc
;
4010 adapter
->stats
.ruc
+= rd32(E1000_RUC
);
4011 adapter
->stats
.rfc
+= rd32(E1000_RFC
);
4012 adapter
->stats
.rjc
+= rd32(E1000_RJC
);
4013 adapter
->stats
.tor
+= rd32(E1000_TORH
);
4014 adapter
->stats
.tot
+= rd32(E1000_TOTH
);
4015 adapter
->stats
.tpr
+= rd32(E1000_TPR
);
4017 adapter
->stats
.ptc64
+= rd32(E1000_PTC64
);
4018 adapter
->stats
.ptc127
+= rd32(E1000_PTC127
);
4019 adapter
->stats
.ptc255
+= rd32(E1000_PTC255
);
4020 adapter
->stats
.ptc511
+= rd32(E1000_PTC511
);
4021 adapter
->stats
.ptc1023
+= rd32(E1000_PTC1023
);
4022 adapter
->stats
.ptc1522
+= rd32(E1000_PTC1522
);
4024 adapter
->stats
.mptc
+= rd32(E1000_MPTC
);
4025 adapter
->stats
.bptc
+= rd32(E1000_BPTC
);
4027 /* used for adaptive IFS */
4028 hw
->mac
.tx_packet_delta
= rd32(E1000_TPT
);
4029 adapter
->stats
.tpt
+= hw
->mac
.tx_packet_delta
;
4030 hw
->mac
.collision_delta
= rd32(E1000_COLC
);
4031 adapter
->stats
.colc
+= hw
->mac
.collision_delta
;
4033 adapter
->stats
.algnerrc
+= rd32(E1000_ALGNERRC
);
4034 adapter
->stats
.rxerrc
+= rd32(E1000_RXERRC
);
4035 adapter
->stats
.tncrs
+= rd32(E1000_TNCRS
);
4036 adapter
->stats
.tsctc
+= rd32(E1000_TSCTC
);
4037 adapter
->stats
.tsctfc
+= rd32(E1000_TSCTFC
);
4039 adapter
->stats
.iac
+= rd32(E1000_IAC
);
4040 adapter
->stats
.icrxoc
+= rd32(E1000_ICRXOC
);
4041 adapter
->stats
.icrxptc
+= rd32(E1000_ICRXPTC
);
4042 adapter
->stats
.icrxatc
+= rd32(E1000_ICRXATC
);
4043 adapter
->stats
.ictxptc
+= rd32(E1000_ICTXPTC
);
4044 adapter
->stats
.ictxatc
+= rd32(E1000_ICTXATC
);
4045 adapter
->stats
.ictxqec
+= rd32(E1000_ICTXQEC
);
4046 adapter
->stats
.ictxqmtc
+= rd32(E1000_ICTXQMTC
);
4047 adapter
->stats
.icrxdmtc
+= rd32(E1000_ICRXDMTC
);
4049 /* Fill out the OS statistics structure */
4050 net_stats
->multicast
= adapter
->stats
.mprc
;
4051 net_stats
->collisions
= adapter
->stats
.colc
;
4055 /* RLEC on some newer hardware can be incorrect so build
4056 * our own version based on RUC and ROC */
4057 net_stats
->rx_errors
= adapter
->stats
.rxerrc
+
4058 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
4059 adapter
->stats
.ruc
+ adapter
->stats
.roc
+
4060 adapter
->stats
.cexterr
;
4061 net_stats
->rx_length_errors
= adapter
->stats
.ruc
+
4063 net_stats
->rx_crc_errors
= adapter
->stats
.crcerrs
;
4064 net_stats
->rx_frame_errors
= adapter
->stats
.algnerrc
;
4065 net_stats
->rx_missed_errors
= adapter
->stats
.mpc
;
4068 net_stats
->tx_errors
= adapter
->stats
.ecol
+
4069 adapter
->stats
.latecol
;
4070 net_stats
->tx_aborted_errors
= adapter
->stats
.ecol
;
4071 net_stats
->tx_window_errors
= adapter
->stats
.latecol
;
4072 net_stats
->tx_carrier_errors
= adapter
->stats
.tncrs
;
4074 /* Tx Dropped needs to be maintained elsewhere */
4077 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
4078 if ((adapter
->link_speed
== SPEED_1000
) &&
4079 (!igb_read_phy_reg(hw
, PHY_1000T_STATUS
, &phy_tmp
))) {
4080 phy_tmp
&= PHY_IDLE_ERROR_COUNT_MASK
;
4081 adapter
->phy_stats
.idle_errors
+= phy_tmp
;
4085 /* Management Stats */
4086 adapter
->stats
.mgptc
+= rd32(E1000_MGTPTC
);
4087 adapter
->stats
.mgprc
+= rd32(E1000_MGTPRC
);
4088 adapter
->stats
.mgpdc
+= rd32(E1000_MGTPDC
);
4091 static irqreturn_t
igb_msix_other(int irq
, void *data
)
4093 struct igb_adapter
*adapter
= data
;
4094 struct e1000_hw
*hw
= &adapter
->hw
;
4095 u32 icr
= rd32(E1000_ICR
);
4096 /* reading ICR causes bit 31 of EICR to be cleared */
4098 if (icr
& E1000_ICR_DOUTSYNC
) {
4099 /* HW is reporting DMA is out of sync */
4100 adapter
->stats
.doosync
++;
4103 /* Check for a mailbox event */
4104 if (icr
& E1000_ICR_VMMB
)
4105 igb_msg_task(adapter
);
4107 if (icr
& E1000_ICR_LSC
) {
4108 hw
->mac
.get_link_status
= 1;
4109 /* guard against interrupt when we're going down */
4110 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4111 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
4114 if (adapter
->vfs_allocated_count
)
4115 wr32(E1000_IMS
, E1000_IMS_LSC
|
4117 E1000_IMS_DOUTSYNC
);
4119 wr32(E1000_IMS
, E1000_IMS_LSC
| E1000_IMS_DOUTSYNC
);
4120 wr32(E1000_EIMS
, adapter
->eims_other
);
4125 static void igb_write_itr(struct igb_q_vector
*q_vector
)
4127 u32 itr_val
= q_vector
->itr_val
& 0x7FFC;
4129 if (!q_vector
->set_itr
)
4135 if (q_vector
->itr_shift
)
4136 itr_val
|= itr_val
<< q_vector
->itr_shift
;
4138 itr_val
|= 0x8000000;
4140 writel(itr_val
, q_vector
->itr_register
);
4141 q_vector
->set_itr
= 0;
4144 static irqreturn_t
igb_msix_ring(int irq
, void *data
)
4146 struct igb_q_vector
*q_vector
= data
;
4148 /* Write the ITR value calculated from the previous interrupt. */
4149 igb_write_itr(q_vector
);
4151 napi_schedule(&q_vector
->napi
);
4156 #ifdef CONFIG_IGB_DCA
4157 static void igb_update_dca(struct igb_q_vector
*q_vector
)
4159 struct igb_adapter
*adapter
= q_vector
->adapter
;
4160 struct e1000_hw
*hw
= &adapter
->hw
;
4161 int cpu
= get_cpu();
4163 if (q_vector
->cpu
== cpu
)
4166 if (q_vector
->tx_ring
) {
4167 int q
= q_vector
->tx_ring
->reg_idx
;
4168 u32 dca_txctrl
= rd32(E1000_DCA_TXCTRL(q
));
4169 if (hw
->mac
.type
== e1000_82575
) {
4170 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK
;
4171 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4173 dca_txctrl
&= ~E1000_DCA_TXCTRL_CPUID_MASK_82576
;
4174 dca_txctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4175 E1000_DCA_TXCTRL_CPUID_SHIFT
;
4177 dca_txctrl
|= E1000_DCA_TXCTRL_DESC_DCA_EN
;
4178 wr32(E1000_DCA_TXCTRL(q
), dca_txctrl
);
4180 if (q_vector
->rx_ring
) {
4181 int q
= q_vector
->rx_ring
->reg_idx
;
4182 u32 dca_rxctrl
= rd32(E1000_DCA_RXCTRL(q
));
4183 if (hw
->mac
.type
== e1000_82575
) {
4184 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK
;
4185 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
);
4187 dca_rxctrl
&= ~E1000_DCA_RXCTRL_CPUID_MASK_82576
;
4188 dca_rxctrl
|= dca3_get_tag(&adapter
->pdev
->dev
, cpu
) <<
4189 E1000_DCA_RXCTRL_CPUID_SHIFT
;
4191 dca_rxctrl
|= E1000_DCA_RXCTRL_DESC_DCA_EN
;
4192 dca_rxctrl
|= E1000_DCA_RXCTRL_HEAD_DCA_EN
;
4193 dca_rxctrl
|= E1000_DCA_RXCTRL_DATA_DCA_EN
;
4194 wr32(E1000_DCA_RXCTRL(q
), dca_rxctrl
);
4196 q_vector
->cpu
= cpu
;
4201 static void igb_setup_dca(struct igb_adapter
*adapter
)
4203 struct e1000_hw
*hw
= &adapter
->hw
;
4206 if (!(adapter
->flags
& IGB_FLAG_DCA_ENABLED
))
4209 /* Always use CB2 mode, difference is masked in the CB driver. */
4210 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_CB2
);
4212 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
4213 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
4215 igb_update_dca(q_vector
);
4219 static int __igb_notify_dca(struct device
*dev
, void *data
)
4221 struct net_device
*netdev
= dev_get_drvdata(dev
);
4222 struct igb_adapter
*adapter
= netdev_priv(netdev
);
4223 struct pci_dev
*pdev
= adapter
->pdev
;
4224 struct e1000_hw
*hw
= &adapter
->hw
;
4225 unsigned long event
= *(unsigned long *)data
;
4228 case DCA_PROVIDER_ADD
:
4229 /* if already enabled, don't do it again */
4230 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
4232 if (dca_add_requester(dev
) == 0) {
4233 adapter
->flags
|= IGB_FLAG_DCA_ENABLED
;
4234 dev_info(&pdev
->dev
, "DCA enabled\n");
4235 igb_setup_dca(adapter
);
4238 /* Fall Through since DCA is disabled. */
4239 case DCA_PROVIDER_REMOVE
:
4240 if (adapter
->flags
& IGB_FLAG_DCA_ENABLED
) {
4241 /* without this a class_device is left
4242 * hanging around in the sysfs model */
4243 dca_remove_requester(dev
);
4244 dev_info(&pdev
->dev
, "DCA disabled\n");
4245 adapter
->flags
&= ~IGB_FLAG_DCA_ENABLED
;
4246 wr32(E1000_DCA_CTRL
, E1000_DCA_CTRL_DCA_MODE_DISABLE
);
4254 static int igb_notify_dca(struct notifier_block
*nb
, unsigned long event
,
4259 ret_val
= driver_for_each_device(&igb_driver
.driver
, NULL
, &event
,
4262 return ret_val
? NOTIFY_BAD
: NOTIFY_DONE
;
4264 #endif /* CONFIG_IGB_DCA */
4266 static void igb_ping_all_vfs(struct igb_adapter
*adapter
)
4268 struct e1000_hw
*hw
= &adapter
->hw
;
4272 for (i
= 0 ; i
< adapter
->vfs_allocated_count
; i
++) {
4273 ping
= E1000_PF_CONTROL_MSG
;
4274 if (adapter
->vf_data
[i
].flags
& IGB_VF_FLAG_CTS
)
4275 ping
|= E1000_VT_MSGTYPE_CTS
;
4276 igb_write_mbx(hw
, &ping
, 1, i
);
4280 static int igb_set_vf_promisc(struct igb_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
4282 struct e1000_hw
*hw
= &adapter
->hw
;
4283 u32 vmolr
= rd32(E1000_VMOLR(vf
));
4284 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4286 vf_data
->flags
|= ~(IGB_VF_FLAG_UNI_PROMISC
|
4287 IGB_VF_FLAG_MULTI_PROMISC
);
4288 vmolr
&= ~(E1000_VMOLR_ROPE
| E1000_VMOLR_ROMPE
| E1000_VMOLR_MPME
);
4290 if (*msgbuf
& E1000_VF_SET_PROMISC_MULTICAST
) {
4291 vmolr
|= E1000_VMOLR_MPME
;
4292 *msgbuf
&= ~E1000_VF_SET_PROMISC_MULTICAST
;
4295 * if we have hashes and we are clearing a multicast promisc
4296 * flag we need to write the hashes to the MTA as this step
4297 * was previously skipped
4299 if (vf_data
->num_vf_mc_hashes
> 30) {
4300 vmolr
|= E1000_VMOLR_MPME
;
4301 } else if (vf_data
->num_vf_mc_hashes
) {
4303 vmolr
|= E1000_VMOLR_ROMPE
;
4304 for (j
= 0; j
< vf_data
->num_vf_mc_hashes
; j
++)
4305 igb_mta_set(hw
, vf_data
->vf_mc_hashes
[j
]);
4309 wr32(E1000_VMOLR(vf
), vmolr
);
4311 /* there are flags left unprocessed, likely not supported */
4312 if (*msgbuf
& E1000_VT_MSGINFO_MASK
)
4319 static int igb_set_vf_multicasts(struct igb_adapter
*adapter
,
4320 u32
*msgbuf
, u32 vf
)
4322 int n
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
4323 u16
*hash_list
= (u16
*)&msgbuf
[1];
4324 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4327 /* salt away the number of multicast addresses assigned
4328 * to this VF for later use to restore when the PF multi cast
4331 vf_data
->num_vf_mc_hashes
= n
;
4333 /* only up to 30 hash values supported */
4337 /* store the hashes for later use */
4338 for (i
= 0; i
< n
; i
++)
4339 vf_data
->vf_mc_hashes
[i
] = hash_list
[i
];
4341 /* Flush and reset the mta with the new values */
4342 igb_set_rx_mode(adapter
->netdev
);
4347 static void igb_restore_vf_multicasts(struct igb_adapter
*adapter
)
4349 struct e1000_hw
*hw
= &adapter
->hw
;
4350 struct vf_data_storage
*vf_data
;
4353 for (i
= 0; i
< adapter
->vfs_allocated_count
; i
++) {
4354 u32 vmolr
= rd32(E1000_VMOLR(i
));
4355 vmolr
&= ~(E1000_VMOLR_ROMPE
| E1000_VMOLR_MPME
);
4357 vf_data
= &adapter
->vf_data
[i
];
4359 if ((vf_data
->num_vf_mc_hashes
> 30) ||
4360 (vf_data
->flags
& IGB_VF_FLAG_MULTI_PROMISC
)) {
4361 vmolr
|= E1000_VMOLR_MPME
;
4362 } else if (vf_data
->num_vf_mc_hashes
) {
4363 vmolr
|= E1000_VMOLR_ROMPE
;
4364 for (j
= 0; j
< vf_data
->num_vf_mc_hashes
; j
++)
4365 igb_mta_set(hw
, vf_data
->vf_mc_hashes
[j
]);
4367 wr32(E1000_VMOLR(i
), vmolr
);
4371 static void igb_clear_vf_vfta(struct igb_adapter
*adapter
, u32 vf
)
4373 struct e1000_hw
*hw
= &adapter
->hw
;
4374 u32 pool_mask
, reg
, vid
;
4377 pool_mask
= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
4379 /* Find the vlan filter for this id */
4380 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4381 reg
= rd32(E1000_VLVF(i
));
4383 /* remove the vf from the pool */
4386 /* if pool is empty then remove entry from vfta */
4387 if (!(reg
& E1000_VLVF_POOLSEL_MASK
) &&
4388 (reg
& E1000_VLVF_VLANID_ENABLE
)) {
4390 vid
= reg
& E1000_VLVF_VLANID_MASK
;
4391 igb_vfta_set(hw
, vid
, false);
4394 wr32(E1000_VLVF(i
), reg
);
4397 adapter
->vf_data
[vf
].vlans_enabled
= 0;
4400 static s32
igb_vlvf_set(struct igb_adapter
*adapter
, u32 vid
, bool add
, u32 vf
)
4402 struct e1000_hw
*hw
= &adapter
->hw
;
4405 /* The vlvf table only exists on 82576 hardware and newer */
4406 if (hw
->mac
.type
< e1000_82576
)
4409 /* we only need to do this if VMDq is enabled */
4410 if (!adapter
->vfs_allocated_count
)
4413 /* Find the vlan filter for this id */
4414 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4415 reg
= rd32(E1000_VLVF(i
));
4416 if ((reg
& E1000_VLVF_VLANID_ENABLE
) &&
4417 vid
== (reg
& E1000_VLVF_VLANID_MASK
))
4422 if (i
== E1000_VLVF_ARRAY_SIZE
) {
4423 /* Did not find a matching VLAN ID entry that was
4424 * enabled. Search for a free filter entry, i.e.
4425 * one without the enable bit set
4427 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
4428 reg
= rd32(E1000_VLVF(i
));
4429 if (!(reg
& E1000_VLVF_VLANID_ENABLE
))
4433 if (i
< E1000_VLVF_ARRAY_SIZE
) {
4434 /* Found an enabled/available entry */
4435 reg
|= 1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
);
4437 /* if !enabled we need to set this up in vfta */
4438 if (!(reg
& E1000_VLVF_VLANID_ENABLE
)) {
4439 /* add VID to filter table */
4440 igb_vfta_set(hw
, vid
, true);
4441 reg
|= E1000_VLVF_VLANID_ENABLE
;
4443 reg
&= ~E1000_VLVF_VLANID_MASK
;
4445 wr32(E1000_VLVF(i
), reg
);
4447 /* do not modify RLPML for PF devices */
4448 if (vf
>= adapter
->vfs_allocated_count
)
4451 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
4453 reg
= rd32(E1000_VMOLR(vf
));
4454 size
= reg
& E1000_VMOLR_RLPML_MASK
;
4456 reg
&= ~E1000_VMOLR_RLPML_MASK
;
4458 wr32(E1000_VMOLR(vf
), reg
);
4461 adapter
->vf_data
[vf
].vlans_enabled
++;
4465 if (i
< E1000_VLVF_ARRAY_SIZE
) {
4466 /* remove vf from the pool */
4467 reg
&= ~(1 << (E1000_VLVF_POOLSEL_SHIFT
+ vf
));
4468 /* if pool is empty then remove entry from vfta */
4469 if (!(reg
& E1000_VLVF_POOLSEL_MASK
)) {
4471 igb_vfta_set(hw
, vid
, false);
4473 wr32(E1000_VLVF(i
), reg
);
4475 /* do not modify RLPML for PF devices */
4476 if (vf
>= adapter
->vfs_allocated_count
)
4479 adapter
->vf_data
[vf
].vlans_enabled
--;
4480 if (!adapter
->vf_data
[vf
].vlans_enabled
) {
4482 reg
= rd32(E1000_VMOLR(vf
));
4483 size
= reg
& E1000_VMOLR_RLPML_MASK
;
4485 reg
&= ~E1000_VMOLR_RLPML_MASK
;
4487 wr32(E1000_VMOLR(vf
), reg
);
4495 static int igb_set_vf_vlan(struct igb_adapter
*adapter
, u32
*msgbuf
, u32 vf
)
4497 int add
= (msgbuf
[0] & E1000_VT_MSGINFO_MASK
) >> E1000_VT_MSGINFO_SHIFT
;
4498 int vid
= (msgbuf
[1] & E1000_VLVF_VLANID_MASK
);
4500 return igb_vlvf_set(adapter
, vid
, add
, vf
);
4503 static inline void igb_vf_reset(struct igb_adapter
*adapter
, u32 vf
)
4505 /* clear all flags */
4506 adapter
->vf_data
[vf
].flags
= 0;
4507 adapter
->vf_data
[vf
].last_nack
= jiffies
;
4509 /* reset offloads to defaults */
4510 igb_set_vmolr(adapter
, vf
);
4512 /* reset vlans for device */
4513 igb_clear_vf_vfta(adapter
, vf
);
4515 /* reset multicast table array for vf */
4516 adapter
->vf_data
[vf
].num_vf_mc_hashes
= 0;
4518 /* Flush and reset the mta with the new values */
4519 igb_set_rx_mode(adapter
->netdev
);
4522 static void igb_vf_reset_event(struct igb_adapter
*adapter
, u32 vf
)
4524 unsigned char *vf_mac
= adapter
->vf_data
[vf
].vf_mac_addresses
;
4526 /* generate a new mac address as we were hotplug removed/added */
4527 random_ether_addr(vf_mac
);
4529 /* process remaining reset events */
4530 igb_vf_reset(adapter
, vf
);
4533 static void igb_vf_reset_msg(struct igb_adapter
*adapter
, u32 vf
)
4535 struct e1000_hw
*hw
= &adapter
->hw
;
4536 unsigned char *vf_mac
= adapter
->vf_data
[vf
].vf_mac_addresses
;
4537 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
4539 u8
*addr
= (u8
*)(&msgbuf
[1]);
4541 /* process all the same items cleared in a function level reset */
4542 igb_vf_reset(adapter
, vf
);
4544 /* set vf mac address */
4545 igb_rar_set_qsel(adapter
, vf_mac
, rar_entry
, vf
);
4547 /* enable transmit and receive for vf */
4548 reg
= rd32(E1000_VFTE
);
4549 wr32(E1000_VFTE
, reg
| (1 << vf
));
4550 reg
= rd32(E1000_VFRE
);
4551 wr32(E1000_VFRE
, reg
| (1 << vf
));
4553 adapter
->vf_data
[vf
].flags
= IGB_VF_FLAG_CTS
;
4555 /* reply to reset with ack and vf mac address */
4556 msgbuf
[0] = E1000_VF_RESET
| E1000_VT_MSGTYPE_ACK
;
4557 memcpy(addr
, vf_mac
, 6);
4558 igb_write_mbx(hw
, msgbuf
, 3, vf
);
4561 static int igb_set_vf_mac_addr(struct igb_adapter
*adapter
, u32
*msg
, int vf
)
4563 unsigned char *addr
= (char *)&msg
[1];
4566 if (is_valid_ether_addr(addr
))
4567 err
= igb_set_vf_mac(adapter
, vf
, addr
);
4572 static void igb_rcv_ack_from_vf(struct igb_adapter
*adapter
, u32 vf
)
4574 struct e1000_hw
*hw
= &adapter
->hw
;
4575 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4576 u32 msg
= E1000_VT_MSGTYPE_NACK
;
4578 /* if device isn't clear to send it shouldn't be reading either */
4579 if (!(vf_data
->flags
& IGB_VF_FLAG_CTS
) &&
4580 time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
))) {
4581 igb_write_mbx(hw
, &msg
, 1, vf
);
4582 vf_data
->last_nack
= jiffies
;
4586 static void igb_rcv_msg_from_vf(struct igb_adapter
*adapter
, u32 vf
)
4588 struct pci_dev
*pdev
= adapter
->pdev
;
4589 u32 msgbuf
[E1000_VFMAILBOX_SIZE
];
4590 struct e1000_hw
*hw
= &adapter
->hw
;
4591 struct vf_data_storage
*vf_data
= &adapter
->vf_data
[vf
];
4594 retval
= igb_read_mbx(hw
, msgbuf
, E1000_VFMAILBOX_SIZE
, vf
);
4597 /* if receive failed revoke VF CTS stats and restart init */
4598 dev_err(&pdev
->dev
, "Error receiving message from VF\n");
4599 vf_data
->flags
&= ~IGB_VF_FLAG_CTS
;
4600 if (!time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
)))
4605 /* this is a message we already processed, do nothing */
4606 if (msgbuf
[0] & (E1000_VT_MSGTYPE_ACK
| E1000_VT_MSGTYPE_NACK
))
4610 * until the vf completes a reset it should not be
4611 * allowed to start any configuration.
4614 if (msgbuf
[0] == E1000_VF_RESET
) {
4615 igb_vf_reset_msg(adapter
, vf
);
4619 if (!(vf_data
->flags
& IGB_VF_FLAG_CTS
)) {
4620 if (!time_after(jiffies
, vf_data
->last_nack
+ (2 * HZ
)))
4626 switch ((msgbuf
[0] & 0xFFFF)) {
4627 case E1000_VF_SET_MAC_ADDR
:
4628 retval
= igb_set_vf_mac_addr(adapter
, msgbuf
, vf
);
4630 case E1000_VF_SET_PROMISC
:
4631 retval
= igb_set_vf_promisc(adapter
, msgbuf
, vf
);
4633 case E1000_VF_SET_MULTICAST
:
4634 retval
= igb_set_vf_multicasts(adapter
, msgbuf
, vf
);
4636 case E1000_VF_SET_LPE
:
4637 retval
= igb_set_vf_rlpml(adapter
, msgbuf
[1], vf
);
4639 case E1000_VF_SET_VLAN
:
4640 retval
= igb_set_vf_vlan(adapter
, msgbuf
, vf
);
4643 dev_err(&pdev
->dev
, "Unhandled Msg %08x\n", msgbuf
[0]);
4648 msgbuf
[0] |= E1000_VT_MSGTYPE_CTS
;
4650 /* notify the VF of the results of what it sent us */
4652 msgbuf
[0] |= E1000_VT_MSGTYPE_NACK
;
4654 msgbuf
[0] |= E1000_VT_MSGTYPE_ACK
;
4656 igb_write_mbx(hw
, msgbuf
, 1, vf
);
4659 static void igb_msg_task(struct igb_adapter
*adapter
)
4661 struct e1000_hw
*hw
= &adapter
->hw
;
4664 for (vf
= 0; vf
< adapter
->vfs_allocated_count
; vf
++) {
4665 /* process any reset requests */
4666 if (!igb_check_for_rst(hw
, vf
))
4667 igb_vf_reset_event(adapter
, vf
);
4669 /* process any messages pending */
4670 if (!igb_check_for_msg(hw
, vf
))
4671 igb_rcv_msg_from_vf(adapter
, vf
);
4673 /* process any acks */
4674 if (!igb_check_for_ack(hw
, vf
))
4675 igb_rcv_ack_from_vf(adapter
, vf
);
4680 * igb_set_uta - Set unicast filter table address
4681 * @adapter: board private structure
4683 * The unicast table address is a register array of 32-bit registers.
4684 * The table is meant to be used in a way similar to how the MTA is used
4685 * however due to certain limitations in the hardware it is necessary to
4686 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4687 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4689 static void igb_set_uta(struct igb_adapter
*adapter
)
4691 struct e1000_hw
*hw
= &adapter
->hw
;
4694 /* The UTA table only exists on 82576 hardware and newer */
4695 if (hw
->mac
.type
< e1000_82576
)
4698 /* we only need to do this if VMDq is enabled */
4699 if (!adapter
->vfs_allocated_count
)
4702 for (i
= 0; i
< hw
->mac
.uta_reg_count
; i
++)
4703 array_wr32(E1000_UTA
, i
, ~0);
4707 * igb_intr_msi - Interrupt Handler
4708 * @irq: interrupt number
4709 * @data: pointer to a network interface device structure
4711 static irqreturn_t
igb_intr_msi(int irq
, void *data
)
4713 struct igb_adapter
*adapter
= data
;
4714 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
4715 struct e1000_hw
*hw
= &adapter
->hw
;
4716 /* read ICR disables interrupts using IAM */
4717 u32 icr
= rd32(E1000_ICR
);
4719 igb_write_itr(q_vector
);
4721 if (icr
& E1000_ICR_DOUTSYNC
) {
4722 /* HW is reporting DMA is out of sync */
4723 adapter
->stats
.doosync
++;
4726 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
4727 hw
->mac
.get_link_status
= 1;
4728 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4729 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
4732 napi_schedule(&q_vector
->napi
);
4738 * igb_intr - Legacy Interrupt Handler
4739 * @irq: interrupt number
4740 * @data: pointer to a network interface device structure
4742 static irqreturn_t
igb_intr(int irq
, void *data
)
4744 struct igb_adapter
*adapter
= data
;
4745 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
4746 struct e1000_hw
*hw
= &adapter
->hw
;
4747 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4748 * need for the IMC write */
4749 u32 icr
= rd32(E1000_ICR
);
4751 return IRQ_NONE
; /* Not our interrupt */
4753 igb_write_itr(q_vector
);
4755 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4756 * not set, then the adapter didn't send an interrupt */
4757 if (!(icr
& E1000_ICR_INT_ASSERTED
))
4760 if (icr
& E1000_ICR_DOUTSYNC
) {
4761 /* HW is reporting DMA is out of sync */
4762 adapter
->stats
.doosync
++;
4765 if (icr
& (E1000_ICR_RXSEQ
| E1000_ICR_LSC
)) {
4766 hw
->mac
.get_link_status
= 1;
4767 /* guard against interrupt when we're going down */
4768 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
4769 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
4772 napi_schedule(&q_vector
->napi
);
4777 static inline void igb_ring_irq_enable(struct igb_q_vector
*q_vector
)
4779 struct igb_adapter
*adapter
= q_vector
->adapter
;
4780 struct e1000_hw
*hw
= &adapter
->hw
;
4782 if ((q_vector
->rx_ring
&& (adapter
->rx_itr_setting
& 3)) ||
4783 (!q_vector
->rx_ring
&& (adapter
->tx_itr_setting
& 3))) {
4784 if (!adapter
->msix_entries
)
4785 igb_set_itr(adapter
);
4787 igb_update_ring_itr(q_vector
);
4790 if (!test_bit(__IGB_DOWN
, &adapter
->state
)) {
4791 if (adapter
->msix_entries
)
4792 wr32(E1000_EIMS
, q_vector
->eims_value
);
4794 igb_irq_enable(adapter
);
4799 * igb_poll - NAPI Rx polling callback
4800 * @napi: napi polling structure
4801 * @budget: count of how many packets we should handle
4803 static int igb_poll(struct napi_struct
*napi
, int budget
)
4805 struct igb_q_vector
*q_vector
= container_of(napi
,
4806 struct igb_q_vector
,
4808 int tx_clean_complete
= 1, work_done
= 0;
4810 #ifdef CONFIG_IGB_DCA
4811 if (q_vector
->adapter
->flags
& IGB_FLAG_DCA_ENABLED
)
4812 igb_update_dca(q_vector
);
4814 if (q_vector
->tx_ring
)
4815 tx_clean_complete
= igb_clean_tx_irq(q_vector
);
4817 if (q_vector
->rx_ring
)
4818 igb_clean_rx_irq_adv(q_vector
, &work_done
, budget
);
4820 if (!tx_clean_complete
)
4823 /* If not enough Rx work done, exit the polling mode */
4824 if (work_done
< budget
) {
4825 napi_complete(napi
);
4826 igb_ring_irq_enable(q_vector
);
4833 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
4834 * @adapter: board private structure
4835 * @shhwtstamps: timestamp structure to update
4836 * @regval: unsigned 64bit system time value.
4838 * We need to convert the system time value stored in the RX/TXSTMP registers
4839 * into a hwtstamp which can be used by the upper level timestamping functions
4841 static void igb_systim_to_hwtstamp(struct igb_adapter
*adapter
,
4842 struct skb_shared_hwtstamps
*shhwtstamps
,
4848 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
4849 * 24 to match clock shift we setup earlier.
4851 if (adapter
->hw
.mac
.type
== e1000_82580
)
4852 regval
<<= IGB_82580_TSYNC_SHIFT
;
4854 ns
= timecounter_cyc2time(&adapter
->clock
, regval
);
4855 timecompare_update(&adapter
->compare
, ns
);
4856 memset(shhwtstamps
, 0, sizeof(struct skb_shared_hwtstamps
));
4857 shhwtstamps
->hwtstamp
= ns_to_ktime(ns
);
4858 shhwtstamps
->syststamp
= timecompare_transform(&adapter
->compare
, ns
);
4862 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4863 * @q_vector: pointer to q_vector containing needed info
4864 * @skb: packet that was just sent
4866 * If we were asked to do hardware stamping and such a time stamp is
4867 * available, then it must have been for this skb here because we only
4868 * allow only one such packet into the queue.
4870 static void igb_tx_hwtstamp(struct igb_q_vector
*q_vector
, struct sk_buff
*skb
)
4872 struct igb_adapter
*adapter
= q_vector
->adapter
;
4873 union skb_shared_tx
*shtx
= skb_tx(skb
);
4874 struct e1000_hw
*hw
= &adapter
->hw
;
4875 struct skb_shared_hwtstamps shhwtstamps
;
4878 /* if skb does not support hw timestamp or TX stamp not valid exit */
4879 if (likely(!shtx
->hardware
) ||
4880 !(rd32(E1000_TSYNCTXCTL
) & E1000_TSYNCTXCTL_VALID
))
4883 regval
= rd32(E1000_TXSTMPL
);
4884 regval
|= (u64
)rd32(E1000_TXSTMPH
) << 32;
4886 igb_systim_to_hwtstamp(adapter
, &shhwtstamps
, regval
);
4887 skb_tstamp_tx(skb
, &shhwtstamps
);
4891 * igb_clean_tx_irq - Reclaim resources after transmit completes
4892 * @q_vector: pointer to q_vector containing needed info
4893 * returns true if ring is completely cleaned
4895 static bool igb_clean_tx_irq(struct igb_q_vector
*q_vector
)
4897 struct igb_adapter
*adapter
= q_vector
->adapter
;
4898 struct igb_ring
*tx_ring
= q_vector
->tx_ring
;
4899 struct net_device
*netdev
= tx_ring
->netdev
;
4900 struct e1000_hw
*hw
= &adapter
->hw
;
4901 struct igb_buffer
*buffer_info
;
4902 struct sk_buff
*skb
;
4903 union e1000_adv_tx_desc
*tx_desc
, *eop_desc
;
4904 unsigned int total_bytes
= 0, total_packets
= 0;
4905 unsigned int i
, eop
, count
= 0;
4906 bool cleaned
= false;
4908 i
= tx_ring
->next_to_clean
;
4909 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
4910 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
4912 while ((eop_desc
->wb
.status
& cpu_to_le32(E1000_TXD_STAT_DD
)) &&
4913 (count
< tx_ring
->count
)) {
4914 for (cleaned
= false; !cleaned
; count
++) {
4915 tx_desc
= E1000_TX_DESC_ADV(*tx_ring
, i
);
4916 buffer_info
= &tx_ring
->buffer_info
[i
];
4917 cleaned
= (i
== eop
);
4918 skb
= buffer_info
->skb
;
4921 unsigned int segs
, bytecount
;
4922 /* gso_segs is currently only valid for tcp */
4923 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
4924 /* multiply data chunks by size of headers */
4925 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
4927 total_packets
+= segs
;
4928 total_bytes
+= bytecount
;
4930 igb_tx_hwtstamp(q_vector
, skb
);
4933 igb_unmap_and_free_tx_resource(tx_ring
, buffer_info
);
4934 tx_desc
->wb
.status
= 0;
4937 if (i
== tx_ring
->count
)
4940 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
4941 eop_desc
= E1000_TX_DESC_ADV(*tx_ring
, eop
);
4944 tx_ring
->next_to_clean
= i
;
4946 if (unlikely(count
&&
4947 netif_carrier_ok(netdev
) &&
4948 igb_desc_unused(tx_ring
) >= IGB_TX_QUEUE_WAKE
)) {
4949 /* Make sure that anybody stopping the queue after this
4950 * sees the new next_to_clean.
4953 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
4954 !(test_bit(__IGB_DOWN
, &adapter
->state
))) {
4955 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
4956 tx_ring
->tx_stats
.restart_queue
++;
4960 if (tx_ring
->detect_tx_hung
) {
4961 /* Detect a transmit hang in hardware, this serializes the
4962 * check with the clearing of time_stamp and movement of i */
4963 tx_ring
->detect_tx_hung
= false;
4964 if (tx_ring
->buffer_info
[i
].time_stamp
&&
4965 time_after(jiffies
, tx_ring
->buffer_info
[i
].time_stamp
+
4966 (adapter
->tx_timeout_factor
* HZ
)) &&
4967 !(rd32(E1000_STATUS
) & E1000_STATUS_TXOFF
)) {
4969 /* detected Tx unit hang */
4970 dev_err(&tx_ring
->pdev
->dev
,
4971 "Detected Tx Unit Hang\n"
4975 " next_to_use <%x>\n"
4976 " next_to_clean <%x>\n"
4977 "buffer_info[next_to_clean]\n"
4978 " time_stamp <%lx>\n"
4979 " next_to_watch <%x>\n"
4981 " desc.status <%x>\n",
4982 tx_ring
->queue_index
,
4983 readl(tx_ring
->head
),
4984 readl(tx_ring
->tail
),
4985 tx_ring
->next_to_use
,
4986 tx_ring
->next_to_clean
,
4987 tx_ring
->buffer_info
[eop
].time_stamp
,
4990 eop_desc
->wb
.status
);
4991 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
4994 tx_ring
->total_bytes
+= total_bytes
;
4995 tx_ring
->total_packets
+= total_packets
;
4996 tx_ring
->tx_stats
.bytes
+= total_bytes
;
4997 tx_ring
->tx_stats
.packets
+= total_packets
;
4998 return (count
< tx_ring
->count
);
5002 * igb_receive_skb - helper function to handle rx indications
5003 * @q_vector: structure containing interrupt and ring information
5004 * @skb: packet to send up
5005 * @vlan_tag: vlan tag for packet
5007 static void igb_receive_skb(struct igb_q_vector
*q_vector
,
5008 struct sk_buff
*skb
,
5011 struct igb_adapter
*adapter
= q_vector
->adapter
;
5014 vlan_gro_receive(&q_vector
->napi
, adapter
->vlgrp
,
5017 napi_gro_receive(&q_vector
->napi
, skb
);
5020 static inline void igb_rx_checksum_adv(struct igb_ring
*ring
,
5021 u32 status_err
, struct sk_buff
*skb
)
5023 skb
->ip_summed
= CHECKSUM_NONE
;
5025 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
5026 if (!(ring
->flags
& IGB_RING_FLAG_RX_CSUM
) ||
5027 (status_err
& E1000_RXD_STAT_IXSM
))
5030 /* TCP/UDP checksum error bit is set */
5032 (E1000_RXDEXT_STATERR_TCPE
| E1000_RXDEXT_STATERR_IPE
)) {
5034 * work around errata with sctp packets where the TCPE aka
5035 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5036 * packets, (aka let the stack check the crc32c)
5038 if ((skb
->len
== 60) &&
5039 (ring
->flags
& IGB_RING_FLAG_RX_SCTP_CSUM
))
5040 ring
->rx_stats
.csum_err
++;
5042 /* let the stack verify checksum errors */
5045 /* It must be a TCP or UDP packet with a valid checksum */
5046 if (status_err
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
))
5047 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5049 dev_dbg(&ring
->pdev
->dev
, "cksum success: bits %08X\n", status_err
);
5052 static inline void igb_rx_hwtstamp(struct igb_q_vector
*q_vector
, u32 staterr
,
5053 struct sk_buff
*skb
)
5055 struct igb_adapter
*adapter
= q_vector
->adapter
;
5056 struct e1000_hw
*hw
= &adapter
->hw
;
5060 * If this bit is set, then the RX registers contain the time stamp. No
5061 * other packet will be time stamped until we read these registers, so
5062 * read the registers to make them available again. Because only one
5063 * packet can be time stamped at a time, we know that the register
5064 * values must belong to this one here and therefore we don't need to
5065 * compare any of the additional attributes stored for it.
5067 * If nothing went wrong, then it should have a skb_shared_tx that we
5068 * can turn into a skb_shared_hwtstamps.
5070 if (likely(!(staterr
& E1000_RXDADV_STAT_TS
)))
5072 if (!(rd32(E1000_TSYNCRXCTL
) & E1000_TSYNCRXCTL_VALID
))
5075 regval
= rd32(E1000_RXSTMPL
);
5076 regval
|= (u64
)rd32(E1000_RXSTMPH
) << 32;
5078 igb_systim_to_hwtstamp(adapter
, skb_hwtstamps(skb
), regval
);
5080 static inline u16
igb_get_hlen(struct igb_ring
*rx_ring
,
5081 union e1000_adv_rx_desc
*rx_desc
)
5083 /* HW will not DMA in data larger than the given buffer, even if it
5084 * parses the (NFS, of course) header to be larger. In that case, it
5085 * fills the header buffer and spills the rest into the page.
5087 u16 hlen
= (le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hdr_info
) &
5088 E1000_RXDADV_HDRBUFLEN_MASK
) >> E1000_RXDADV_HDRBUFLEN_SHIFT
;
5089 if (hlen
> rx_ring
->rx_buffer_len
)
5090 hlen
= rx_ring
->rx_buffer_len
;
5094 static bool igb_clean_rx_irq_adv(struct igb_q_vector
*q_vector
,
5095 int *work_done
, int budget
)
5097 struct igb_ring
*rx_ring
= q_vector
->rx_ring
;
5098 struct net_device
*netdev
= rx_ring
->netdev
;
5099 struct pci_dev
*pdev
= rx_ring
->pdev
;
5100 union e1000_adv_rx_desc
*rx_desc
, *next_rxd
;
5101 struct igb_buffer
*buffer_info
, *next_buffer
;
5102 struct sk_buff
*skb
;
5103 bool cleaned
= false;
5104 int cleaned_count
= 0;
5105 int current_node
= numa_node_id();
5106 unsigned int total_bytes
= 0, total_packets
= 0;
5112 i
= rx_ring
->next_to_clean
;
5113 buffer_info
= &rx_ring
->buffer_info
[i
];
5114 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5115 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
5117 while (staterr
& E1000_RXD_STAT_DD
) {
5118 if (*work_done
>= budget
)
5122 skb
= buffer_info
->skb
;
5123 prefetch(skb
->data
- NET_IP_ALIGN
);
5124 buffer_info
->skb
= NULL
;
5127 if (i
== rx_ring
->count
)
5130 next_rxd
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5132 next_buffer
= &rx_ring
->buffer_info
[i
];
5134 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
5138 if (buffer_info
->dma
) {
5139 pci_unmap_single(pdev
, buffer_info
->dma
,
5140 rx_ring
->rx_buffer_len
,
5141 PCI_DMA_FROMDEVICE
);
5142 buffer_info
->dma
= 0;
5143 if (rx_ring
->rx_buffer_len
>= IGB_RXBUFFER_1024
) {
5144 skb_put(skb
, length
);
5147 skb_put(skb
, igb_get_hlen(rx_ring
, rx_desc
));
5151 pci_unmap_page(pdev
, buffer_info
->page_dma
,
5152 PAGE_SIZE
/ 2, PCI_DMA_FROMDEVICE
);
5153 buffer_info
->page_dma
= 0;
5155 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
++,
5157 buffer_info
->page_offset
,
5160 if ((page_count(buffer_info
->page
) != 1) ||
5161 (page_to_nid(buffer_info
->page
) != current_node
))
5162 buffer_info
->page
= NULL
;
5164 get_page(buffer_info
->page
);
5167 skb
->data_len
+= length
;
5168 skb
->truesize
+= length
;
5171 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
5172 buffer_info
->skb
= next_buffer
->skb
;
5173 buffer_info
->dma
= next_buffer
->dma
;
5174 next_buffer
->skb
= skb
;
5175 next_buffer
->dma
= 0;
5179 if (staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) {
5180 dev_kfree_skb_irq(skb
);
5184 igb_rx_hwtstamp(q_vector
, staterr
, skb
);
5185 total_bytes
+= skb
->len
;
5188 igb_rx_checksum_adv(rx_ring
, staterr
, skb
);
5190 skb
->protocol
= eth_type_trans(skb
, netdev
);
5191 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
5193 vlan_tag
= ((staterr
& E1000_RXD_STAT_VP
) ?
5194 le16_to_cpu(rx_desc
->wb
.upper
.vlan
) : 0);
5196 igb_receive_skb(q_vector
, skb
, vlan_tag
);
5199 rx_desc
->wb
.upper
.status_error
= 0;
5201 /* return some buffers to hardware, one at a time is too slow */
5202 if (cleaned_count
>= IGB_RX_BUFFER_WRITE
) {
5203 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5207 /* use prefetched values */
5209 buffer_info
= next_buffer
;
5210 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
5213 rx_ring
->next_to_clean
= i
;
5214 cleaned_count
= igb_desc_unused(rx_ring
);
5217 igb_alloc_rx_buffers_adv(rx_ring
, cleaned_count
);
5219 rx_ring
->total_packets
+= total_packets
;
5220 rx_ring
->total_bytes
+= total_bytes
;
5221 rx_ring
->rx_stats
.packets
+= total_packets
;
5222 rx_ring
->rx_stats
.bytes
+= total_bytes
;
5227 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5228 * @adapter: address of board private structure
5230 void igb_alloc_rx_buffers_adv(struct igb_ring
*rx_ring
, int cleaned_count
)
5232 struct net_device
*netdev
= rx_ring
->netdev
;
5233 union e1000_adv_rx_desc
*rx_desc
;
5234 struct igb_buffer
*buffer_info
;
5235 struct sk_buff
*skb
;
5239 i
= rx_ring
->next_to_use
;
5240 buffer_info
= &rx_ring
->buffer_info
[i
];
5242 bufsz
= rx_ring
->rx_buffer_len
;
5244 while (cleaned_count
--) {
5245 rx_desc
= E1000_RX_DESC_ADV(*rx_ring
, i
);
5247 if ((bufsz
< IGB_RXBUFFER_1024
) && !buffer_info
->page_dma
) {
5248 if (!buffer_info
->page
) {
5249 buffer_info
->page
= netdev_alloc_page(netdev
);
5250 if (!buffer_info
->page
) {
5251 rx_ring
->rx_stats
.alloc_failed
++;
5254 buffer_info
->page_offset
= 0;
5256 buffer_info
->page_offset
^= PAGE_SIZE
/ 2;
5258 buffer_info
->page_dma
=
5259 pci_map_page(rx_ring
->pdev
, buffer_info
->page
,
5260 buffer_info
->page_offset
,
5262 PCI_DMA_FROMDEVICE
);
5263 if (pci_dma_mapping_error(rx_ring
->pdev
,
5264 buffer_info
->page_dma
)) {
5265 buffer_info
->page_dma
= 0;
5266 rx_ring
->rx_stats
.alloc_failed
++;
5271 skb
= buffer_info
->skb
;
5273 skb
= netdev_alloc_skb_ip_align(netdev
, bufsz
);
5275 rx_ring
->rx_stats
.alloc_failed
++;
5279 buffer_info
->skb
= skb
;
5281 if (!buffer_info
->dma
) {
5282 buffer_info
->dma
= pci_map_single(rx_ring
->pdev
,
5285 PCI_DMA_FROMDEVICE
);
5286 if (pci_dma_mapping_error(rx_ring
->pdev
,
5287 buffer_info
->dma
)) {
5288 buffer_info
->dma
= 0;
5289 rx_ring
->rx_stats
.alloc_failed
++;
5293 /* Refresh the desc even if buffer_addrs didn't change because
5294 * each write-back erases this info. */
5295 if (bufsz
< IGB_RXBUFFER_1024
) {
5296 rx_desc
->read
.pkt_addr
=
5297 cpu_to_le64(buffer_info
->page_dma
);
5298 rx_desc
->read
.hdr_addr
= cpu_to_le64(buffer_info
->dma
);
5300 rx_desc
->read
.pkt_addr
= cpu_to_le64(buffer_info
->dma
);
5301 rx_desc
->read
.hdr_addr
= 0;
5305 if (i
== rx_ring
->count
)
5307 buffer_info
= &rx_ring
->buffer_info
[i
];
5311 if (rx_ring
->next_to_use
!= i
) {
5312 rx_ring
->next_to_use
= i
;
5314 i
= (rx_ring
->count
- 1);
5318 /* Force memory writes to complete before letting h/w
5319 * know there are new descriptors to fetch. (Only
5320 * applicable for weak-ordered memory model archs,
5321 * such as IA-64). */
5323 writel(i
, rx_ring
->tail
);
5333 static int igb_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
5335 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5336 struct mii_ioctl_data
*data
= if_mii(ifr
);
5338 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
5343 data
->phy_id
= adapter
->hw
.phy
.addr
;
5346 if (igb_read_phy_reg(&adapter
->hw
, data
->reg_num
& 0x1F,
5358 * igb_hwtstamp_ioctl - control hardware time stamping
5363 * Outgoing time stamping can be enabled and disabled. Play nice and
5364 * disable it when requested, although it shouldn't case any overhead
5365 * when no packet needs it. At most one packet in the queue may be
5366 * marked for time stamping, otherwise it would be impossible to tell
5367 * for sure to which packet the hardware time stamp belongs.
5369 * Incoming time stamping has to be configured via the hardware
5370 * filters. Not all combinations are supported, in particular event
5371 * type has to be specified. Matching the kind of event packet is
5372 * not supported, with the exception of "all V2 events regardless of
5376 static int igb_hwtstamp_ioctl(struct net_device
*netdev
,
5377 struct ifreq
*ifr
, int cmd
)
5379 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5380 struct e1000_hw
*hw
= &adapter
->hw
;
5381 struct hwtstamp_config config
;
5382 u32 tsync_tx_ctl
= E1000_TSYNCTXCTL_ENABLED
;
5383 u32 tsync_rx_ctl
= E1000_TSYNCRXCTL_ENABLED
;
5384 u32 tsync_rx_cfg
= 0;
5389 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
5392 /* reserved for future extensions */
5396 switch (config
.tx_type
) {
5397 case HWTSTAMP_TX_OFF
:
5399 case HWTSTAMP_TX_ON
:
5405 switch (config
.rx_filter
) {
5406 case HWTSTAMP_FILTER_NONE
:
5409 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
5410 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
5411 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
5412 case HWTSTAMP_FILTER_ALL
:
5414 * register TSYNCRXCFG must be set, therefore it is not
5415 * possible to time stamp both Sync and Delay_Req messages
5416 * => fall back to time stamping all packets
5418 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_ALL
;
5419 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
5421 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
5422 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
5423 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE
;
5426 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
5427 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
5428 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE
;
5431 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
5432 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
5433 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
5434 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE
;
5437 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
5439 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
5440 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
5441 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
5442 tsync_rx_cfg
= E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE
;
5445 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
5447 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
5448 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
5449 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
5450 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_EVENT_V2
;
5451 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
5458 if (hw
->mac
.type
== e1000_82575
) {
5459 if (tsync_rx_ctl
| tsync_tx_ctl
)
5464 /* enable/disable TX */
5465 regval
= rd32(E1000_TSYNCTXCTL
);
5466 regval
&= ~E1000_TSYNCTXCTL_ENABLED
;
5467 regval
|= tsync_tx_ctl
;
5468 wr32(E1000_TSYNCTXCTL
, regval
);
5470 /* enable/disable RX */
5471 regval
= rd32(E1000_TSYNCRXCTL
);
5472 regval
&= ~(E1000_TSYNCRXCTL_ENABLED
| E1000_TSYNCRXCTL_TYPE_MASK
);
5473 regval
|= tsync_rx_ctl
;
5474 wr32(E1000_TSYNCRXCTL
, regval
);
5476 /* define which PTP packets are time stamped */
5477 wr32(E1000_TSYNCRXCFG
, tsync_rx_cfg
);
5479 /* define ethertype filter for timestamped packets */
5482 (E1000_ETQF_FILTER_ENABLE
| /* enable filter */
5483 E1000_ETQF_1588
| /* enable timestamping */
5484 ETH_P_1588
)); /* 1588 eth protocol type */
5486 wr32(E1000_ETQF(3), 0);
5488 #define PTP_PORT 319
5489 /* L4 Queue Filter[3]: filter by destination port and protocol */
5491 u32 ftqf
= (IPPROTO_UDP
/* UDP */
5492 | E1000_FTQF_VF_BP
/* VF not compared */
5493 | E1000_FTQF_1588_TIME_STAMP
/* Enable Timestamping */
5494 | E1000_FTQF_MASK
); /* mask all inputs */
5495 ftqf
&= ~E1000_FTQF_MASK_PROTO_BP
; /* enable protocol check */
5497 wr32(E1000_IMIR(3), htons(PTP_PORT
));
5498 wr32(E1000_IMIREXT(3),
5499 (E1000_IMIREXT_SIZE_BP
| E1000_IMIREXT_CTRL_BP
));
5500 if (hw
->mac
.type
== e1000_82576
) {
5501 /* enable source port check */
5502 wr32(E1000_SPQF(3), htons(PTP_PORT
));
5503 ftqf
&= ~E1000_FTQF_MASK_SOURCE_PORT_BP
;
5505 wr32(E1000_FTQF(3), ftqf
);
5507 wr32(E1000_FTQF(3), E1000_FTQF_MASK
);
5511 adapter
->hwtstamp_config
= config
;
5513 /* clear TX/RX time stamp registers, just to be sure */
5514 regval
= rd32(E1000_TXSTMPH
);
5515 regval
= rd32(E1000_RXSTMPH
);
5517 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
5527 static int igb_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
5533 return igb_mii_ioctl(netdev
, ifr
, cmd
);
5535 return igb_hwtstamp_ioctl(netdev
, ifr
, cmd
);
5541 s32
igb_read_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
5543 struct igb_adapter
*adapter
= hw
->back
;
5546 cap_offset
= pci_find_capability(adapter
->pdev
, PCI_CAP_ID_EXP
);
5548 return -E1000_ERR_CONFIG
;
5550 pci_read_config_word(adapter
->pdev
, cap_offset
+ reg
, value
);
5555 s32
igb_write_pcie_cap_reg(struct e1000_hw
*hw
, u32 reg
, u16
*value
)
5557 struct igb_adapter
*adapter
= hw
->back
;
5560 cap_offset
= pci_find_capability(adapter
->pdev
, PCI_CAP_ID_EXP
);
5562 return -E1000_ERR_CONFIG
;
5564 pci_write_config_word(adapter
->pdev
, cap_offset
+ reg
, *value
);
5569 static void igb_vlan_rx_register(struct net_device
*netdev
,
5570 struct vlan_group
*grp
)
5572 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5573 struct e1000_hw
*hw
= &adapter
->hw
;
5576 igb_irq_disable(adapter
);
5577 adapter
->vlgrp
= grp
;
5580 /* enable VLAN tag insert/strip */
5581 ctrl
= rd32(E1000_CTRL
);
5582 ctrl
|= E1000_CTRL_VME
;
5583 wr32(E1000_CTRL
, ctrl
);
5585 /* Disable CFI check */
5586 rctl
= rd32(E1000_RCTL
);
5587 rctl
&= ~E1000_RCTL_CFIEN
;
5588 wr32(E1000_RCTL
, rctl
);
5590 /* disable VLAN tag insert/strip */
5591 ctrl
= rd32(E1000_CTRL
);
5592 ctrl
&= ~E1000_CTRL_VME
;
5593 wr32(E1000_CTRL
, ctrl
);
5596 igb_rlpml_set(adapter
);
5598 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5599 igb_irq_enable(adapter
);
5602 static void igb_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
5604 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5605 struct e1000_hw
*hw
= &adapter
->hw
;
5606 int pf_id
= adapter
->vfs_allocated_count
;
5608 /* attempt to add filter to vlvf array */
5609 igb_vlvf_set(adapter
, vid
, true, pf_id
);
5611 /* add the filter since PF can receive vlans w/o entry in vlvf */
5612 igb_vfta_set(hw
, vid
, true);
5615 static void igb_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
5617 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5618 struct e1000_hw
*hw
= &adapter
->hw
;
5619 int pf_id
= adapter
->vfs_allocated_count
;
5622 igb_irq_disable(adapter
);
5623 vlan_group_set_device(adapter
->vlgrp
, vid
, NULL
);
5625 if (!test_bit(__IGB_DOWN
, &adapter
->state
))
5626 igb_irq_enable(adapter
);
5628 /* remove vlan from VLVF table array */
5629 err
= igb_vlvf_set(adapter
, vid
, false, pf_id
);
5631 /* if vid was not present in VLVF just remove it from table */
5633 igb_vfta_set(hw
, vid
, false);
5636 static void igb_restore_vlan(struct igb_adapter
*adapter
)
5638 igb_vlan_rx_register(adapter
->netdev
, adapter
->vlgrp
);
5640 if (adapter
->vlgrp
) {
5642 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
5643 if (!vlan_group_get_device(adapter
->vlgrp
, vid
))
5645 igb_vlan_rx_add_vid(adapter
->netdev
, vid
);
5650 int igb_set_spd_dplx(struct igb_adapter
*adapter
, u16 spddplx
)
5652 struct pci_dev
*pdev
= adapter
->pdev
;
5653 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
5658 case SPEED_10
+ DUPLEX_HALF
:
5659 mac
->forced_speed_duplex
= ADVERTISE_10_HALF
;
5661 case SPEED_10
+ DUPLEX_FULL
:
5662 mac
->forced_speed_duplex
= ADVERTISE_10_FULL
;
5664 case SPEED_100
+ DUPLEX_HALF
:
5665 mac
->forced_speed_duplex
= ADVERTISE_100_HALF
;
5667 case SPEED_100
+ DUPLEX_FULL
:
5668 mac
->forced_speed_duplex
= ADVERTISE_100_FULL
;
5670 case SPEED_1000
+ DUPLEX_FULL
:
5672 adapter
->hw
.phy
.autoneg_advertised
= ADVERTISE_1000_FULL
;
5674 case SPEED_1000
+ DUPLEX_HALF
: /* not supported */
5676 dev_err(&pdev
->dev
, "Unsupported Speed/Duplex configuration\n");
5682 static int __igb_shutdown(struct pci_dev
*pdev
, bool *enable_wake
)
5684 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5685 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5686 struct e1000_hw
*hw
= &adapter
->hw
;
5687 u32 ctrl
, rctl
, status
;
5688 u32 wufc
= adapter
->wol
;
5693 netif_device_detach(netdev
);
5695 if (netif_running(netdev
))
5698 igb_clear_interrupt_scheme(adapter
);
5701 retval
= pci_save_state(pdev
);
5706 status
= rd32(E1000_STATUS
);
5707 if (status
& E1000_STATUS_LU
)
5708 wufc
&= ~E1000_WUFC_LNKC
;
5711 igb_setup_rctl(adapter
);
5712 igb_set_rx_mode(netdev
);
5714 /* turn on all-multi mode if wake on multicast is enabled */
5715 if (wufc
& E1000_WUFC_MC
) {
5716 rctl
= rd32(E1000_RCTL
);
5717 rctl
|= E1000_RCTL_MPE
;
5718 wr32(E1000_RCTL
, rctl
);
5721 ctrl
= rd32(E1000_CTRL
);
5722 /* advertise wake from D3Cold */
5723 #define E1000_CTRL_ADVD3WUC 0x00100000
5724 /* phy power management enable */
5725 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5726 ctrl
|= E1000_CTRL_ADVD3WUC
;
5727 wr32(E1000_CTRL
, ctrl
);
5729 /* Allow time for pending master requests to run */
5730 igb_disable_pcie_master(hw
);
5732 wr32(E1000_WUC
, E1000_WUC_PME_EN
);
5733 wr32(E1000_WUFC
, wufc
);
5736 wr32(E1000_WUFC
, 0);
5739 *enable_wake
= wufc
|| adapter
->en_mng_pt
;
5741 igb_shutdown_serdes_link_82575(hw
);
5743 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5744 * would have already happened in close and is redundant. */
5745 igb_release_hw_control(adapter
);
5747 pci_disable_device(pdev
);
5753 static int igb_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5758 retval
= __igb_shutdown(pdev
, &wake
);
5763 pci_prepare_to_sleep(pdev
);
5765 pci_wake_from_d3(pdev
, false);
5766 pci_set_power_state(pdev
, PCI_D3hot
);
5772 static int igb_resume(struct pci_dev
*pdev
)
5774 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5775 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5776 struct e1000_hw
*hw
= &adapter
->hw
;
5779 pci_set_power_state(pdev
, PCI_D0
);
5780 pci_restore_state(pdev
);
5782 err
= pci_enable_device_mem(pdev
);
5785 "igb: Cannot enable PCI device from suspend\n");
5788 pci_set_master(pdev
);
5790 pci_enable_wake(pdev
, PCI_D3hot
, 0);
5791 pci_enable_wake(pdev
, PCI_D3cold
, 0);
5793 if (igb_init_interrupt_scheme(adapter
)) {
5794 dev_err(&pdev
->dev
, "Unable to allocate memory for queues\n");
5798 /* e1000_power_up_phy(adapter); */
5802 /* let the f/w know that the h/w is now under the control of the
5804 igb_get_hw_control(adapter
);
5806 wr32(E1000_WUS
, ~0);
5808 if (netif_running(netdev
)) {
5809 err
= igb_open(netdev
);
5814 netif_device_attach(netdev
);
5820 static void igb_shutdown(struct pci_dev
*pdev
)
5824 __igb_shutdown(pdev
, &wake
);
5826 if (system_state
== SYSTEM_POWER_OFF
) {
5827 pci_wake_from_d3(pdev
, wake
);
5828 pci_set_power_state(pdev
, PCI_D3hot
);
5832 #ifdef CONFIG_NET_POLL_CONTROLLER
5834 * Polling 'interrupt' - used by things like netconsole to send skbs
5835 * without having to re-enable interrupts. It's not called while
5836 * the interrupt routine is executing.
5838 static void igb_netpoll(struct net_device
*netdev
)
5840 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5841 struct e1000_hw
*hw
= &adapter
->hw
;
5844 if (!adapter
->msix_entries
) {
5845 struct igb_q_vector
*q_vector
= adapter
->q_vector
[0];
5846 igb_irq_disable(adapter
);
5847 napi_schedule(&q_vector
->napi
);
5851 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
5852 struct igb_q_vector
*q_vector
= adapter
->q_vector
[i
];
5853 wr32(E1000_EIMC
, q_vector
->eims_value
);
5854 napi_schedule(&q_vector
->napi
);
5857 #endif /* CONFIG_NET_POLL_CONTROLLER */
5860 * igb_io_error_detected - called when PCI error is detected
5861 * @pdev: Pointer to PCI device
5862 * @state: The current pci connection state
5864 * This function is called after a PCI bus error affecting
5865 * this device has been detected.
5867 static pci_ers_result_t
igb_io_error_detected(struct pci_dev
*pdev
,
5868 pci_channel_state_t state
)
5870 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5871 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5873 netif_device_detach(netdev
);
5875 if (state
== pci_channel_io_perm_failure
)
5876 return PCI_ERS_RESULT_DISCONNECT
;
5878 if (netif_running(netdev
))
5880 pci_disable_device(pdev
);
5882 /* Request a slot slot reset. */
5883 return PCI_ERS_RESULT_NEED_RESET
;
5887 * igb_io_slot_reset - called after the pci bus has been reset.
5888 * @pdev: Pointer to PCI device
5890 * Restart the card from scratch, as if from a cold-boot. Implementation
5891 * resembles the first-half of the igb_resume routine.
5893 static pci_ers_result_t
igb_io_slot_reset(struct pci_dev
*pdev
)
5895 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5896 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5897 struct e1000_hw
*hw
= &adapter
->hw
;
5898 pci_ers_result_t result
;
5901 if (pci_enable_device_mem(pdev
)) {
5903 "Cannot re-enable PCI device after reset.\n");
5904 result
= PCI_ERS_RESULT_DISCONNECT
;
5906 pci_set_master(pdev
);
5907 pci_restore_state(pdev
);
5909 pci_enable_wake(pdev
, PCI_D3hot
, 0);
5910 pci_enable_wake(pdev
, PCI_D3cold
, 0);
5913 wr32(E1000_WUS
, ~0);
5914 result
= PCI_ERS_RESULT_RECOVERED
;
5917 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
5919 dev_err(&pdev
->dev
, "pci_cleanup_aer_uncorrect_error_status "
5920 "failed 0x%0x\n", err
);
5921 /* non-fatal, continue */
5928 * igb_io_resume - called when traffic can start flowing again.
5929 * @pdev: Pointer to PCI device
5931 * This callback is called when the error recovery driver tells us that
5932 * its OK to resume normal operation. Implementation resembles the
5933 * second-half of the igb_resume routine.
5935 static void igb_io_resume(struct pci_dev
*pdev
)
5937 struct net_device
*netdev
= pci_get_drvdata(pdev
);
5938 struct igb_adapter
*adapter
= netdev_priv(netdev
);
5940 if (netif_running(netdev
)) {
5941 if (igb_up(adapter
)) {
5942 dev_err(&pdev
->dev
, "igb_up failed after reset\n");
5947 netif_device_attach(netdev
);
5949 /* let the f/w know that the h/w is now under the control of the
5951 igb_get_hw_control(adapter
);
5954 static void igb_rar_set_qsel(struct igb_adapter
*adapter
, u8
*addr
, u32 index
,
5957 u32 rar_low
, rar_high
;
5958 struct e1000_hw
*hw
= &adapter
->hw
;
5960 /* HW expects these in little endian so we reverse the byte order
5961 * from network order (big endian) to little endian
5963 rar_low
= ((u32
) addr
[0] | ((u32
) addr
[1] << 8) |
5964 ((u32
) addr
[2] << 16) | ((u32
) addr
[3] << 24));
5965 rar_high
= ((u32
) addr
[4] | ((u32
) addr
[5] << 8));
5967 /* Indicate to hardware the Address is Valid. */
5968 rar_high
|= E1000_RAH_AV
;
5970 if (hw
->mac
.type
== e1000_82575
)
5971 rar_high
|= E1000_RAH_POOL_1
* qsel
;
5973 rar_high
|= E1000_RAH_POOL_1
<< qsel
;
5975 wr32(E1000_RAL(index
), rar_low
);
5977 wr32(E1000_RAH(index
), rar_high
);
5981 static int igb_set_vf_mac(struct igb_adapter
*adapter
,
5982 int vf
, unsigned char *mac_addr
)
5984 struct e1000_hw
*hw
= &adapter
->hw
;
5985 /* VF MAC addresses start at end of receive addresses and moves
5986 * torwards the first, as a result a collision should not be possible */
5987 int rar_entry
= hw
->mac
.rar_entry_count
- (vf
+ 1);
5989 memcpy(adapter
->vf_data
[vf
].vf_mac_addresses
, mac_addr
, ETH_ALEN
);
5991 igb_rar_set_qsel(adapter
, mac_addr
, rar_entry
, vf
);
5996 static void igb_vmm_control(struct igb_adapter
*adapter
)
5998 struct e1000_hw
*hw
= &adapter
->hw
;
6001 /* replication is not supported for 82575 */
6002 if (hw
->mac
.type
== e1000_82575
)
6005 /* enable replication vlan tag stripping */
6006 reg
= rd32(E1000_RPLOLR
);
6007 reg
|= E1000_RPLOLR_STRVLAN
;
6008 wr32(E1000_RPLOLR
, reg
);
6010 /* notify HW that the MAC is adding vlan tags */
6011 reg
= rd32(E1000_DTXCTL
);
6012 reg
|= E1000_DTXCTL_VLAN_ADDED
;
6013 wr32(E1000_DTXCTL
, reg
);
6015 if (adapter
->vfs_allocated_count
) {
6016 igb_vmdq_set_loopback_pf(hw
, true);
6017 igb_vmdq_set_replication_pf(hw
, true);
6019 igb_vmdq_set_loopback_pf(hw
, false);
6020 igb_vmdq_set_replication_pf(hw
, false);